summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/iscsi/Kconfig9
-rw-r--r--drivers/target/iscsi/Makefile20
-rw-r--r--drivers/target/iscsi/iscsi_target.c4564
-rw-r--r--drivers/target/iscsi/iscsi_target.h42
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c490
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h31
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c1882
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h859
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c531
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c87
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c1004
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h15
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c1299
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c474
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h18
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c1232
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1067
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h17
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c263
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c1905
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h269
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c664
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h86
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c950
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.h64
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c849
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c759
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h41
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c551
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h88
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1819
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h60
-rw-r--r--drivers/target/loopback/Kconfig6
-rw-r--r--drivers/target/loopback/tcm_loop.c220
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/target_core_alua.c426
-rw-r--r--drivers/target/target_core_cdb.c457
-rw-r--r--drivers/target/target_core_configfs.c677
-rw-r--r--drivers/target/target_core_device.c820
-rw-r--r--drivers/target/target_core_fabric_configfs.c122
-rw-r--r--drivers/target/target_core_fabric_lib.c27
-rw-r--r--drivers/target/target_core_file.c149
-rw-r--r--drivers/target/target_core_file.h4
-rw-r--r--drivers/target/target_core_hba.c37
-rw-r--r--drivers/target/target_core_iblock.c199
-rw-r--r--drivers/target/target_core_iblock.h9
-rw-r--r--drivers/target/target_core_pr.c868
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c316
-rw-r--r--drivers/target/target_core_pscsi.h4
-rw-r--r--drivers/target/target_core_rd.c483
-rw-r--r--drivers/target/target_core_rd.h4
-rw-r--r--drivers/target/target_core_scdb.c20
-rw-r--r--drivers/target/target_core_scdb.h10
-rw-r--r--drivers/target/target_core_stat.c112
-rw-r--r--drivers/target/target_core_tmr.c191
-rw-r--r--drivers/target/target_core_tpg.c206
-rw-r--r--drivers/target/target_core_transport.c3658
-rw-r--r--drivers/target/target_core_ua.c62
-rw-r--r--drivers/target/tcm_fc/Makefile17
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h32
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c198
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c48
-rw-r--r--drivers/target/tcm_fc/tfc_io.c250
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c24
74 files changed, 26238 insertions, 5532 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 5cb0f0ef6af..b28794b7212 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,5 +31,6 @@ config TCM_PSCSI
source "drivers/target/loopback/Kconfig"
source "drivers/target/tcm_fc/Kconfig"
+source "drivers/target/iscsi/Kconfig"
endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 21df808a992..1060c7b7f80 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -24,5 +24,5 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
# Fabric modules
obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
-
obj-$(CONFIG_TCM_FC) += tcm_fc/
+obj-$(CONFIG_ISCSI_TARGET) += iscsi/
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
new file mode 100644
index 00000000000..8345fb457a4
--- /dev/null
+++ b/drivers/target/iscsi/Kconfig
@@ -0,0 +1,9 @@
+config ISCSI_TARGET
+ tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
+ depends on NET
+ select CRYPTO
+ select CRYPTO_CRC32C
+ select CRYPTO_CRC32C_INTEL if X86
+ help
+ Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
+ Target Mode Stack.
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
new file mode 100644
index 00000000000..5b9a2cf7f0a
--- /dev/null
+++ b/drivers/target/iscsi/Makefile
@@ -0,0 +1,20 @@
+iscsi_target_mod-y += iscsi_target_parameters.o \
+ iscsi_target_seq_pdu_list.o \
+ iscsi_target_tq.o \
+ iscsi_target_auth.o \
+ iscsi_target_datain_values.o \
+ iscsi_target_device.o \
+ iscsi_target_erl0.o \
+ iscsi_target_erl1.o \
+ iscsi_target_erl2.o \
+ iscsi_target_login.o \
+ iscsi_target_nego.o \
+ iscsi_target_nodeattrib.o \
+ iscsi_target_tmr.o \
+ iscsi_target_tpg.o \
+ iscsi_target_util.o \
+ iscsi_target.o \
+ iscsi_target_configfs.o \
+ iscsi_target_stat.o
+
+obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
new file mode 100644
index 00000000000..c24fb10de60
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -0,0 +1,4564 @@
+/*******************************************************************************
+ * This file contains main functions related to the iSCSI Target Core Driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <linux/completion.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi_device.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_configfs.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_stat.h"
+
+static LIST_HEAD(g_tiqn_list);
+static LIST_HEAD(g_np_list);
+static DEFINE_SPINLOCK(tiqn_lock);
+static DEFINE_SPINLOCK(np_lock);
+
+static struct idr tiqn_idr;
+struct idr sess_idr;
+struct mutex auth_id_lock;
+spinlock_t sess_idr_lock;
+
+struct iscsit_global *iscsit_global;
+
+struct kmem_cache *lio_cmd_cache;
+struct kmem_cache *lio_qr_cache;
+struct kmem_cache *lio_dr_cache;
+struct kmem_cache *lio_ooo_cache;
+struct kmem_cache *lio_r2t_cache;
+
+static int iscsit_handle_immediate_data(struct iscsi_cmd *,
+ unsigned char *buf, u32);
+static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+
+struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
+{
+ struct iscsi_tiqn *tiqn = NULL;
+
+ spin_lock(&tiqn_lock);
+ list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+ if (!strcmp(tiqn->tiqn, buf)) {
+
+ spin_lock(&tiqn->tiqn_state_lock);
+ if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+ tiqn->tiqn_access_count++;
+ spin_unlock(&tiqn->tiqn_state_lock);
+ spin_unlock(&tiqn_lock);
+ return tiqn;
+ }
+ spin_unlock(&tiqn->tiqn_state_lock);
+ }
+ }
+ spin_unlock(&tiqn_lock);
+
+ return NULL;
+}
+
+static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
+{
+ spin_lock(&tiqn->tiqn_state_lock);
+ if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+ tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
+ spin_unlock(&tiqn->tiqn_state_lock);
+ return 0;
+ }
+ spin_unlock(&tiqn->tiqn_state_lock);
+
+ return -1;
+}
+
+void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
+{
+ spin_lock(&tiqn->tiqn_state_lock);
+ tiqn->tiqn_access_count--;
+ spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+/*
+ * Note that IQN formatting is expected to be done in userspace, and
+ * no explict IQN format checks are done here.
+ */
+struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
+{
+ struct iscsi_tiqn *tiqn = NULL;
+ int ret;
+
+ if (strlen(buf) >= ISCSI_IQN_LEN) {
+ pr_err("Target IQN exceeds %d bytes\n",
+ ISCSI_IQN_LEN);
+ return ERR_PTR(-EINVAL);
+ }
+
+ tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
+ if (!tiqn) {
+ pr_err("Unable to allocate struct iscsi_tiqn\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sprintf(tiqn->tiqn, "%s", buf);
+ INIT_LIST_HEAD(&tiqn->tiqn_list);
+ INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
+ spin_lock_init(&tiqn->tiqn_state_lock);
+ spin_lock_init(&tiqn->tiqn_tpg_lock);
+ spin_lock_init(&tiqn->sess_err_stats.lock);
+ spin_lock_init(&tiqn->login_stats.lock);
+ spin_lock_init(&tiqn->logout_stats.lock);
+
+ if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
+ pr_err("idr_pre_get() for tiqn_idr failed\n");
+ kfree(tiqn);
+ return ERR_PTR(-ENOMEM);
+ }
+ tiqn->tiqn_state = TIQN_STATE_ACTIVE;
+
+ spin_lock(&tiqn_lock);
+ ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
+ if (ret < 0) {
+ pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
+ spin_unlock(&tiqn_lock);
+ kfree(tiqn);
+ return ERR_PTR(ret);
+ }
+ list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+ spin_unlock(&tiqn_lock);
+
+ pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
+
+ return tiqn;
+
+}
+
+static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
+{
+ /*
+ * Wait for accesses to said struct iscsi_tiqn to end.
+ */
+ spin_lock(&tiqn->tiqn_state_lock);
+ while (tiqn->tiqn_access_count != 0) {
+ spin_unlock(&tiqn->tiqn_state_lock);
+ msleep(10);
+ spin_lock(&tiqn->tiqn_state_lock);
+ }
+ spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
+{
+ /*
+ * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
+ * while holding tiqn->tiqn_state_lock. This means that all subsequent
+ * attempts to access this struct iscsi_tiqn will fail from both transport
+ * fabric and control code paths.
+ */
+ if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
+ pr_err("iscsit_set_tiqn_shutdown() failed\n");
+ return;
+ }
+
+ iscsit_wait_for_tiqn(tiqn);
+
+ spin_lock(&tiqn_lock);
+ list_del(&tiqn->tiqn_list);
+ idr_remove(&tiqn_idr, tiqn->tiqn_index);
+ spin_unlock(&tiqn_lock);
+
+ pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
+ tiqn->tiqn);
+ kfree(tiqn);
+}
+
+int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+{
+ int ret;
+ /*
+ * Determine if the network portal is accepting storage traffic.
+ */
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return -1;
+ }
+ if (np->np_login_tpg) {
+ pr_err("np->np_login_tpg() is not NULL!\n");
+ spin_unlock_bh(&np->np_thread_lock);
+ return -1;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+ /*
+ * Determine if the portal group is accepting storage traffic.
+ */
+ spin_lock_bh(&tpg->tpg_state_lock);
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+ spin_unlock_bh(&tpg->tpg_state_lock);
+ return -1;
+ }
+ spin_unlock_bh(&tpg->tpg_state_lock);
+
+ /*
+ * Here we serialize access across the TIQN+TPG Tuple.
+ */
+ ret = mutex_lock_interruptible(&tpg->np_login_lock);
+ if ((ret != 0) || signal_pending(current))
+ return -1;
+
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_login_tpg = tpg;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ return 0;
+}
+
+int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+{
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_login_tpg = NULL;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ mutex_unlock(&tpg->np_login_lock);
+
+ if (tiqn)
+ iscsit_put_tiqn_for_login(tiqn);
+
+ return 0;
+}
+
+static struct iscsi_np *iscsit_get_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+{
+ struct sockaddr_in *sock_in, *sock_in_e;
+ struct sockaddr_in6 *sock_in6, *sock_in6_e;
+ struct iscsi_np *np;
+ int ip_match = 0;
+ u16 port;
+
+ spin_lock_bh(&np_lock);
+ list_for_each_entry(np, &g_np_list, np_list) {
+ spin_lock(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock(&np->np_thread_lock);
+ continue;
+ }
+
+ if (sockaddr->ss_family == AF_INET6) {
+ sock_in6 = (struct sockaddr_in6 *)sockaddr;
+ sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
+
+ if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
+ (void *)&sock_in6_e->sin6_addr.in6_u,
+ sizeof(struct in6_addr)))
+ ip_match = 1;
+
+ port = ntohs(sock_in6->sin6_port);
+ } else {
+ sock_in = (struct sockaddr_in *)sockaddr;
+ sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
+
+ if (sock_in->sin_addr.s_addr ==
+ sock_in_e->sin_addr.s_addr)
+ ip_match = 1;
+
+ port = ntohs(sock_in->sin_port);
+ }
+
+ if ((ip_match == 1) && (np->np_port == port) &&
+ (np->np_network_transport == network_transport)) {
+ /*
+ * Increment the np_exports reference count now to
+ * prevent iscsit_del_np() below from being called
+ * while iscsi_tpg_add_network_portal() is called.
+ */
+ np->np_exports++;
+ spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np_lock);
+ return np;
+ }
+ spin_unlock(&np->np_thread_lock);
+ }
+ spin_unlock_bh(&np_lock);
+
+ return NULL;
+}
+
+struct iscsi_np *iscsit_add_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ char *ip_str,
+ int network_transport)
+{
+ struct sockaddr_in *sock_in;
+ struct sockaddr_in6 *sock_in6;
+ struct iscsi_np *np;
+ int ret;
+ /*
+ * Locate the existing struct iscsi_np if already active..
+ */
+ np = iscsit_get_np(sockaddr, network_transport);
+ if (np)
+ return np;
+
+ np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
+ if (!np) {
+ pr_err("Unable to allocate memory for struct iscsi_np\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ np->np_flags |= NPF_IP_NETWORK;
+ if (sockaddr->ss_family == AF_INET6) {
+ sock_in6 = (struct sockaddr_in6 *)sockaddr;
+ snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
+ np->np_port = ntohs(sock_in6->sin6_port);
+ } else {
+ sock_in = (struct sockaddr_in *)sockaddr;
+ sprintf(np->np_ip, "%s", ip_str);
+ np->np_port = ntohs(sock_in->sin_port);
+ }
+
+ np->np_network_transport = network_transport;
+ spin_lock_init(&np->np_thread_lock);
+ init_completion(&np->np_restart_comp);
+ INIT_LIST_HEAD(&np->np_list);
+
+ ret = iscsi_target_setup_login_socket(np, sockaddr);
+ if (ret != 0) {
+ kfree(np);
+ return ERR_PTR(ret);
+ }
+
+ np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
+ if (IS_ERR(np->np_thread)) {
+ pr_err("Unable to create kthread: iscsi_np\n");
+ ret = PTR_ERR(np->np_thread);
+ kfree(np);
+ return ERR_PTR(ret);
+ }
+ /*
+ * Increment the np_exports reference count now to prevent
+ * iscsit_del_np() below from being run while a new call to
+ * iscsi_tpg_add_network_portal() for a matching iscsi_np is
+ * active. We don't need to hold np->np_thread_lock at this
+ * point because iscsi_np has not been added to g_np_list yet.
+ */
+ np->np_exports = 1;
+
+ spin_lock_bh(&np_lock);
+ list_add_tail(&np->np_list, &g_np_list);
+ spin_unlock_bh(&np_lock);
+
+ pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+ "TCP" : "SCTP");
+
+ return np;
+}
+
+int iscsit_reset_np_thread(
+ struct iscsi_np *np,
+ struct iscsi_tpg_np *tpg_np,
+ struct iscsi_portal_group *tpg)
+{
+ spin_lock_bh(&np->np_thread_lock);
+ if (tpg && tpg_np) {
+ /*
+ * The reset operation need only be performed when the
+ * passed struct iscsi_portal_group has a login in progress
+ * to one of the network portals.
+ */
+ if (tpg_np->tpg_np->np_login_tpg != tpg) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+ }
+ }
+ if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+ }
+ np->np_thread_state = ISCSI_NP_THREAD_RESET;
+
+ if (np->np_thread) {
+ spin_unlock_bh(&np->np_thread_lock);
+ send_sig(SIGINT, np->np_thread, 1);
+ wait_for_completion(&np->np_restart_comp);
+ spin_lock_bh(&np->np_thread_lock);
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ return 0;
+}
+
+int iscsit_del_np_comm(struct iscsi_np *np)
+{
+ if (!np->np_socket)
+ return 0;
+
+ /*
+ * Some network transports allocate their own struct sock->file,
+ * see if we need to free any additional allocated resources.
+ */
+ if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+ kfree(np->np_socket->file);
+ np->np_socket->file = NULL;
+ }
+
+ sock_release(np->np_socket);
+ return 0;
+}
+
+int iscsit_del_np(struct iscsi_np *np)
+{
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_exports--;
+ if (np->np_exports) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+ }
+ np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ if (np->np_thread) {
+ /*
+ * We need to send the signal to wakeup Linux/Net
+ * which may be sleeping in sock_accept()..
+ */
+ send_sig(SIGINT, np->np_thread, 1);
+ kthread_stop(np->np_thread);
+ }
+ iscsit_del_np_comm(np);
+
+ spin_lock_bh(&np_lock);
+ list_del(&np->np_list);
+ spin_unlock_bh(&np_lock);
+
+ pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+ "TCP" : "SCTP");
+
+ kfree(np);
+ return 0;
+}
+
+static int __init iscsi_target_init_module(void)
+{
+ int ret = 0;
+
+ pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
+
+ iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
+ if (!iscsit_global) {
+ pr_err("Unable to allocate memory for iscsit_global\n");
+ return -1;
+ }
+ mutex_init(&auth_id_lock);
+ spin_lock_init(&sess_idr_lock);
+ idr_init(&tiqn_idr);
+ idr_init(&sess_idr);
+
+ ret = iscsi_target_register_configfs();
+ if (ret < 0)
+ goto out;
+
+ ret = iscsi_thread_set_init();
+ if (ret < 0)
+ goto configfs_out;
+
+ if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
+ TARGET_THREAD_SET_COUNT) {
+ pr_err("iscsi_allocate_thread_sets() returned"
+ " unexpected value!\n");
+ goto ts_out1;
+ }
+
+ lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
+ sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
+ 0, NULL);
+ if (!lio_cmd_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_cmd_cache\n");
+ goto ts_out2;
+ }
+
+ lio_qr_cache = kmem_cache_create("lio_qr_cache",
+ sizeof(struct iscsi_queue_req),
+ __alignof__(struct iscsi_queue_req), 0, NULL);
+ if (!lio_qr_cache) {
+ pr_err("nable to kmem_cache_create() for"
+ " lio_qr_cache\n");
+ goto cmd_out;
+ }
+
+ lio_dr_cache = kmem_cache_create("lio_dr_cache",
+ sizeof(struct iscsi_datain_req),
+ __alignof__(struct iscsi_datain_req), 0, NULL);
+ if (!lio_dr_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_dr_cache\n");
+ goto qr_out;
+ }
+
+ lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
+ sizeof(struct iscsi_ooo_cmdsn),
+ __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
+ if (!lio_ooo_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_ooo_cache\n");
+ goto dr_out;
+ }
+
+ lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
+ sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
+ 0, NULL);
+ if (!lio_r2t_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_r2t_cache\n");
+ goto ooo_out;
+ }
+
+ if (iscsit_load_discovery_tpg() < 0)
+ goto r2t_out;
+
+ return ret;
+r2t_out:
+ kmem_cache_destroy(lio_r2t_cache);
+ooo_out:
+ kmem_cache_destroy(lio_ooo_cache);
+dr_out:
+ kmem_cache_destroy(lio_dr_cache);
+qr_out:
+ kmem_cache_destroy(lio_qr_cache);
+cmd_out:
+ kmem_cache_destroy(lio_cmd_cache);
+ts_out2:
+ iscsi_deallocate_thread_sets();
+ts_out1:
+ iscsi_thread_set_free();
+configfs_out:
+ iscsi_target_deregister_configfs();
+out:
+ kfree(iscsit_global);
+ return -ENOMEM;
+}
+
+static void __exit iscsi_target_cleanup_module(void)
+{
+ iscsi_deallocate_thread_sets();
+ iscsi_thread_set_free();
+ iscsit_release_discovery_tpg();
+ kmem_cache_destroy(lio_cmd_cache);
+ kmem_cache_destroy(lio_qr_cache);
+ kmem_cache_destroy(lio_dr_cache);
+ kmem_cache_destroy(lio_ooo_cache);
+ kmem_cache_destroy(lio_r2t_cache);
+
+ iscsi_target_deregister_configfs();
+
+ kfree(iscsit_global);
+}
+
+int iscsit_add_reject(
+ u8 reason,
+ int fail_conn,
+ unsigned char *buf,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd;
+ struct iscsi_reject *hdr;
+ int ret;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return -1;
+
+ cmd->iscsi_opcode = ISCSI_OP_REJECT;
+ if (fail_conn)
+ cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
+
+ hdr = (struct iscsi_reject *) cmd->pdu;
+ hdr->reason = reason;
+
+ cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!cmd->buf_ptr) {
+ pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+ iscsit_release_cmd(cmd);
+ return -1;
+ }
+ memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ cmd->i_state = ISTATE_SEND_REJECT;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ ret = wait_for_completion_interruptible(&cmd->reject_comp);
+ if (ret != 0)
+ return -1;
+
+ return (!fail_conn) ? 0 : -1;
+}
+
+int iscsit_add_reject_from_cmd(
+ u8 reason,
+ int fail_conn,
+ int add_to_conn,
+ unsigned char *buf,
+ struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn;
+ struct iscsi_reject *hdr;
+ int ret;
+
+ if (!cmd->conn) {
+ pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
+ cmd->init_task_tag);
+ return -1;
+ }
+ conn = cmd->conn;
+
+ cmd->iscsi_opcode = ISCSI_OP_REJECT;
+ if (fail_conn)
+ cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
+
+ hdr = (struct iscsi_reject *) cmd->pdu;
+ hdr->reason = reason;
+
+ cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!cmd->buf_ptr) {
+ pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+ iscsit_release_cmd(cmd);
+ return -1;
+ }
+ memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
+
+ if (add_to_conn) {
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ }
+
+ cmd->i_state = ISTATE_SEND_REJECT;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ ret = wait_for_completion_interruptible(&cmd->reject_comp);
+ if (ret != 0)
+ return -1;
+
+ return (!fail_conn) ? 0 : -1;
+}
+
+/*
+ * Map some portion of the allocated scatterlist to an iovec, suitable for
+ * kernel sockets to copy data in/out. This handles both pages and slab-allocated
+ * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
+ * either case (see iscsit_alloc_buffs)
+ */
+static int iscsit_map_iovec(
+ struct iscsi_cmd *cmd,
+ struct kvec *iov,
+ u32 data_offset,
+ u32 data_length)
+{
+ u32 i = 0;
+ struct scatterlist *sg;
+ unsigned int page_off;
+
+ /*
+ * We have a private mapping of the allocated pages in t_mem_sg.
+ * At this point, we also know each contains a page.
+ */
+ sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE];
+ page_off = (data_offset % PAGE_SIZE);
+
+ cmd->first_data_sg = sg;
+ cmd->first_data_sg_off = page_off;
+
+ while (data_length) {
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+
+ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
+ iov[i].iov_len = cur_len;
+
+ data_length -= cur_len;
+ page_off = 0;
+ sg = sg_next(sg);
+ i++;
+ }
+
+ cmd->kmapped_nents = i;
+
+ return i;
+}
+
+static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
+{
+ u32 i;
+ struct scatterlist *sg;
+
+ sg = cmd->first_data_sg;
+
+ for (i = 0; i < cmd->kmapped_nents; i++)
+ kunmap(sg_page(&sg[i]));
+}
+
+static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
+{
+ struct iscsi_cmd *cmd;
+
+ conn->exp_statsn = exp_statsn;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ spin_lock(&cmd->istate_lock);
+ if ((cmd->i_state == ISTATE_SENT_STATUS) &&
+ (cmd->stat_sn < exp_statsn)) {
+ cmd->i_state = ISTATE_REMOVE;
+ spin_unlock(&cmd->istate_lock);
+ iscsit_add_cmd_to_immediate_queue(cmd, conn,
+ cmd->i_state);
+ continue;
+ }
+ spin_unlock(&cmd->istate_lock);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+}
+
+static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
+{
+ u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
+ cmd->se_cmd.t_data_nents;
+
+ iov_count += TRANSPORT_IOV_DATA_BUFFER;
+
+ cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
+ if (!cmd->iov_data) {
+ pr_err("Unable to allocate cmd->iov_data\n");
+ return -ENOMEM;
+ }
+
+ cmd->orig_iov_data_count = iov_count;
+ return 0;
+}
+
+static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
+{
+ struct scatterlist *sgl;
+ u32 length = cmd->se_cmd.data_length;
+ int nents = DIV_ROUND_UP(length, PAGE_SIZE);
+ int i = 0, ret;
+ /*
+ * If no SCSI payload is present, allocate the default iovecs used for
+ * iSCSI PDU Header
+ */
+ if (!length)
+ return iscsit_allocate_iovecs(cmd);
+
+ sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
+ if (!sgl)
+ return -ENOMEM;
+
+ sg_init_table(sgl, nents);
+
+ while (length) {
+ int buf_size = min_t(int, length, PAGE_SIZE);
+ struct page *page;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto page_alloc_failed;
+
+ sg_set_page(&sgl[i], page, buf_size, 0);
+
+ length -= buf_size;
+ i++;
+ }
+
+ cmd->t_mem_sg = sgl;
+ cmd->t_mem_sg_nents = nents;
+
+ /* BIDI ops not supported */
+
+ /* Tell the core about our preallocated memory */
+ transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
+ /*
+ * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
+ * so that cmd->se_cmd.t_tasks_se_num has been set.
+ */
+ ret = iscsit_allocate_iovecs(cmd);
+ if (ret < 0)
+ goto page_alloc_failed;
+
+ return 0;
+
+page_alloc_failed:
+ while (i >= 0) {
+ __free_page(sg_page(&sgl[i]));
+ i--;
+ }
+ kfree(cmd->t_mem_sg);
+ cmd->t_mem_sg = NULL;
+ return -ENOMEM;
+}
+
+static int iscsit_handle_scsi_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
+ int dump_immediate_data = 0, send_check_condition = 0, payload_length;
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_scsi_req *hdr;
+
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->cmd_pdus++;
+ if (conn->sess->se_sess->se_node_acl) {
+ spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ conn->sess->se_sess->se_node_acl->num_cmds++;
+ spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ }
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+
+ hdr = (struct iscsi_scsi_req *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->data_length = be32_to_cpu(hdr->data_length);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ /* FIXME; Add checks for AdditionalHeaderSegment */
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
+ !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+ pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
+ " not set. Bad iSCSI Initiator.\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
+ /*
+ * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
+ * that adds support for RESERVE/RELEASE. There is a bug
+ * add with this new functionality that sets R/W bits when
+ * neither CDB carries any READ or WRITE datapayloads.
+ */
+ if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
+ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+ goto done;
+ }
+
+ pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ " set when Expected Data Transfer Length is 0 for"
+ " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+done:
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
+ !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
+ pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
+ " MUST be set if Expected Data Transfer Length is not 0."
+ " Bad iSCSI Initiator\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
+ pr_err("Bidirectional operations not supported!\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ pr_err("Illegally set Immediate Bit in iSCSI Initiator"
+ " Scsi Command PDU.\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if (payload_length && !conn->sess->sess_ops->ImmediateData) {
+ pr_err("ImmediateData=No but DataSegmentLength=%u,"
+ " protocol error.\n", payload_length);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if ((hdr->data_length == payload_length) &&
+ (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
+ pr_err("Expected Data Transfer Length and Length of"
+ " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
+ " bit is not set protocol error\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > hdr->data_length) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " EDTL: %u, protocol error.\n", payload_length,
+ hdr->data_length);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " MaxRecvDataSegmentLength: %u, protocol error.\n",
+ payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " FirstBurstLength: %u, protocol error.\n",
+ payload_length, conn->sess->sess_ops->FirstBurstLength);
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
+ (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
+ DMA_NONE;
+
+ cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction,
+ (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
+ buf, conn);
+
+ pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
+ " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
+ hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
+
+ cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
+ cmd->i_state = ISTATE_NEW_CMD;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ cmd->immediate_data = (payload_length) ? 1 : 0;
+ cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
+ if (cmd->unsolicited_data)
+ cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
+
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ spin_lock_bh(&conn->sess->ttt_lock);
+ cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+ cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ spin_unlock_bh(&conn->sess->ttt_lock);
+ } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->first_burst_len = payload_length;
+
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ struct iscsi_datain_req *dr;
+
+ dr = iscsit_allocate_datain_req();
+ if (!dr)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+
+ iscsit_attach_datain_req(cmd, dr);
+ }
+
+ /*
+ * The CDB is going to an se_device_t.
+ */
+ ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb,
+ get_unaligned_le64(&hdr->lun));
+ if (ret < 0) {
+ if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
+ pr_debug("Responding to non-acl'ed,"
+ " non-existent or non-exported iSCSI LUN:"
+ " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
+ }
+ if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+
+ send_check_condition = 1;
+ goto attach_cmd;
+ }
+ /*
+ * The Initiator Node has access to the LUN (the addressing method
+ * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to
+ * allocate 1->N transport tasks (depending on sector count and
+ * maximum request size the physical HBA(s) can handle.
+ */
+ transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
+ if (transport_ret == -ENOMEM) {
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+ } else if (transport_ret == -EINVAL) {
+ /*
+ * Unsupported SAM Opcode. CHECK_CONDITION will be sent
+ * in iscsit_execute_cmd() during the CmdSN OOO Execution
+ * Mechinism.
+ */
+ send_check_condition = 1;
+ } else {
+ if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+ }
+
+attach_cmd:
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ /*
+ * Check if we need to delay processing because of ALUA
+ * Active/NonOptimized primary access state..
+ */
+ core_alua_check_nonop_delay(&cmd->se_cmd);
+ /*
+ * Allocate and setup SGL used with transport_generic_map_mem_to_cmd().
+ * also call iscsit_allocate_iovecs()
+ */
+ ret = iscsit_alloc_buffs(cmd);
+ if (ret < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+ /*
+ * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
+ * the Immediate Bit is not set, and no Immediate
+ * Data is attached.
+ *
+ * A PDU/CmdSN carrying Immediate Data can only
+ * be processed after the DataCRC has passed.
+ * If the DataCRC fails, the CmdSN MUST NOT
+ * be acknowledged. (See below)
+ */
+ if (!cmd->immediate_data) {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ /*
+ * If no Immediate Data is attached, it's OK to return now.
+ */
+ if (!cmd->immediate_data) {
+ if (send_check_condition)
+ return 0;
+
+ if (cmd->unsolicited_data) {
+ iscsit_set_dataout_sequence_values(cmd);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ }
+
+ return 0;
+ }
+
+ /*
+ * Early CHECK_CONDITIONs never make it to the transport processing
+ * thread. They are processed in CmdSN order by
+ * iscsit_check_received_cmdsn() below.
+ */
+ if (send_check_condition) {
+ immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ dump_immediate_data = 1;
+ goto after_immediate_data;
+ }
+ /*
+ * Call directly into transport_generic_new_cmd() to perform
+ * the backend memory allocation.
+ */
+ ret = transport_generic_new_cmd(&cmd->se_cmd);
+ if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ dump_immediate_data = 1;
+ goto after_immediate_data;
+ }
+
+ immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
+after_immediate_data:
+ if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
+ /*
+ * A PDU/CmdSN carrying Immediate Data passed
+ * DataCRC, check against ExpCmdSN/MaxCmdSN if
+ * Immediate Bit is not set.
+ */
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ /*
+ * Special case for Unsupported SAM WRITE Opcodes
+ * and ImmediateData=Yes.
+ */
+ if (dump_immediate_data) {
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return -1;
+ } else if (cmd->unsolicited_data) {
+ iscsit_set_dataout_sequence_values(cmd);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ }
+
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+
+ } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
+ /*
+ * Immediate Data failed DataCRC and ERL>=1,
+ * silently drop this PDU and let the initiator
+ * plug the CmdSN gap.
+ *
+ * FIXME: Send Unsolicited NOPIN with reserved
+ * TTT here to help the initiator figure out
+ * the missing CmdSN, although they should be
+ * intelligent enough to determine the missing
+ * CmdSN and issue a retry to plug the sequence.
+ */
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
+ return -1;
+
+ return 0;
+}
+
+static u32 iscsit_do_crypto_hash_sg(
+ struct hash_desc *hash,
+ struct iscsi_cmd *cmd,
+ u32 data_offset,
+ u32 data_length,
+ u32 padding,
+ u8 *pad_bytes)
+{
+ u32 data_crc;
+ u32 i;
+ struct scatterlist *sg;
+ unsigned int page_off;
+
+ crypto_hash_init(hash);
+
+ sg = cmd->first_data_sg;
+ page_off = cmd->first_data_sg_off;
+
+ i = 0;
+ while (data_length) {
+ u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
+
+ crypto_hash_update(hash, &sg[i], cur_len);
+
+ data_length -= cur_len;
+ page_off = 0;
+ i++;
+ }
+
+ if (padding) {
+ struct scatterlist pad_sg;
+
+ sg_init_one(&pad_sg, pad_bytes, padding);
+ crypto_hash_update(hash, &pad_sg, padding);
+ }
+ crypto_hash_final(hash, (u8 *) &data_crc);
+
+ return data_crc;
+}
+
+static void iscsit_do_crypto_hash_buf(
+ struct hash_desc *hash,
+ unsigned char *buf,
+ u32 payload_length,
+ u32 padding,
+ u8 *pad_bytes,
+ u8 *data_crc)
+{
+ struct scatterlist sg;
+
+ crypto_hash_init(hash);
+
+ sg_init_one(&sg, (u8 *)buf, payload_length);
+ crypto_hash_update(hash, &sg, payload_length);
+
+ if (padding) {
+ sg_init_one(&sg, pad_bytes, padding);
+ crypto_hash_update(hash, &sg, padding);
+ }
+ crypto_hash_final(hash, data_crc);
+}
+
+static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+{
+ int iov_ret, ooo_cmdsn = 0, ret;
+ u8 data_crc_failed = 0;
+ u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
+ u32 rx_size = 0, payload_length;
+ struct iscsi_cmd *cmd = NULL;
+ struct se_cmd *se_cmd;
+ struct iscsi_data *hdr;
+ struct kvec *iov;
+ unsigned long flags;
+
+ hdr = (struct iscsi_data *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ hdr->datasn = be32_to_cpu(hdr->datasn);
+ hdr->offset = be32_to_cpu(hdr->offset);
+
+ if (!payload_length) {
+ pr_err("DataOUT payload is ZERO, protocol error.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ /* iSCSI write */
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->rx_data_octets += payload_length;
+ if (conn->sess->se_sess->se_node_acl) {
+ spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
+ spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ }
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " MaxRecvDataSegmentLength: %u\n", payload_length,
+ conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
+ payload_length);
+ if (!cmd)
+ return 0;
+
+ pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
+ " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+ hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
+ payload_length, conn->cid);
+
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+ pr_err("Command ITT: 0x%08x received DataOUT after"
+ " last DataOUT received, dumping payload\n",
+ cmd->init_task_tag);
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+ }
+
+ if (cmd->data_direction != DMA_TO_DEVICE) {
+ pr_err("Command ITT: 0x%08x received DataOUT for a"
+ " NON-WRITE command.\n", cmd->init_task_tag);
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+ se_cmd = &cmd->se_cmd;
+ iscsit_mod_dataout_timer(cmd);
+
+ if ((hdr->offset + payload_length) > cmd->data_length) {
+ pr_err("DataOut Offset: %u, Length %u greater than"
+ " iSCSI Command EDTL %u, protocol error.\n",
+ hdr->offset, payload_length, cmd->data_length);
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+ 1, 0, buf, cmd);
+ }
+
+ if (cmd->unsolicited_data) {
+ int dump_unsolicited_data = 0;
+
+ if (conn->sess->sess_ops->InitialR2T) {
+ pr_err("Received unexpected unsolicited data"
+ " while InitialR2T=Yes, protocol error.\n");
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+ return -1;
+ }
+ /*
+ * Special case for dealing with Unsolicited DataOUT
+ * and Unsupported SAM WRITE Opcodes and SE resource allocation
+ * failures;
+ */
+
+ /* Something's amiss if we're not in WRITE_PENDING state... */
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
+ (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ dump_unsolicited_data = 1;
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ if (dump_unsolicited_data) {
+ /*
+ * Check if a delayed TASK_ABORTED status needs to
+ * be sent now if the ISCSI_FLAG_CMD_FINAL has been
+ * received with the unsolicitied data out.
+ */
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+ iscsit_stop_dataout_timer(cmd);
+
+ transport_check_aborted_status(se_cmd,
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+ }
+ } else {
+ /*
+ * For the normal solicited data path:
+ *
+ * Check for a delayed TASK_ABORTED status and dump any
+ * incoming data out payload if one exists. Also, when the
+ * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
+ * data out sequence, we decrement outstanding_r2ts. Once
+ * outstanding_r2ts reaches zero, go ahead and send the delayed
+ * TASK_ABORTED status.
+ */
+ if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+ if (--cmd->outstanding_r2ts < 1) {
+ iscsit_stop_dataout_timer(cmd);
+ transport_check_aborted_status(
+ se_cmd, 1);
+ }
+
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+ }
+ }
+ /*
+ * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
+ * within-command recovery checks before receiving the payload.
+ */
+ ret = iscsit_check_pre_dataout(cmd, buf);
+ if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)
+ return 0;
+ else if (ret == DATAOUT_CANNOT_RECOVER)
+ return -1;
+
+ rx_size += payload_length;
+ iov = &cmd->iov_data[0];
+
+ iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length);
+ if (iov_ret < 0)
+ return -1;
+
+ iov_count += iov_ret;
+
+ padding = ((-payload_length) & 3);
+ if (padding != 0) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = padding;
+ rx_size += padding;
+ pr_debug("Receiving %u padding bytes.\n", padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iov[iov_count].iov_base = &checksum;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (rx_got != rx_size)
+ return -1;
+
+ if (conn->conn_ops->DataDigest) {
+ u32 data_crc;
+
+ data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+ hdr->offset, payload_length, padding,
+ cmd->pad_bytes);
+
+ if (checksum != data_crc) {
+ pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
+ " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
+ " does not match computed 0x%08x\n",
+ hdr->itt, hdr->offset, payload_length,
+ hdr->datasn, checksum, data_crc);
+ data_crc_failed = 1;
+ } else {
+ pr_debug("Got CRC32C DataDigest 0x%08x for"
+ " %u bytes of Data Out\n", checksum,
+ payload_length);
+ }
+ }
+ /*
+ * Increment post receive data and CRC values or perform
+ * within-command recovery.
+ */
+ ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
+ if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
+ return 0;
+ else if (ret == DATAOUT_SEND_R2T) {
+ iscsit_set_dataout_sequence_values(cmd);
+ iscsit_build_r2ts_for_cmd(cmd, conn, 0);
+ } else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
+ /*
+ * Handle extra special case for out of order
+ * Unsolicited Data Out.
+ */
+ spin_lock_bh(&cmd->istate_lock);
+ ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+
+ iscsit_stop_dataout_timer(cmd);
+ return (!ooo_cmdsn) ? transport_generic_handle_data(
+ &cmd->se_cmd) : 0;
+ } else /* DATAOUT_CANNOT_RECOVER */
+ return -1;
+
+ return 0;
+}
+
+static int iscsit_handle_nop_out(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ unsigned char *ping_data = NULL;
+ int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
+ u32 checksum, data_crc, padding = 0, payload_length;
+ u64 lun;
+ struct iscsi_cmd *cmd = NULL;
+ struct kvec *iov = NULL;
+ struct iscsi_nopout *hdr;
+
+ hdr = (struct iscsi_nopout *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ lun = get_unaligned_le64(&hdr->lun);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
+ " not set, protocol error.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
+ " greater than MaxRecvDataSegmentLength: %u, protocol"
+ " error.\n", payload_length,
+ conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
+ (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request",
+ hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
+ payload_length);
+ /*
+ * This is not a response to a Unsolicited NopIN, which means
+ * it can either be a NOPOUT ping request (with a valid ITT),
+ * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
+ * Either way, make sure we allocate an struct iscsi_cmd, as both
+ * can contain ping data.
+ */
+ if (hdr->ttt == 0xFFFFFFFF) {
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return iscsit_add_reject(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
+ cmd->i_state = ISTATE_SEND_NOPIN;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
+ 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->data_direction = DMA_NONE;
+ }
+
+ if (payload_length && (hdr->ttt == 0xFFFFFFFF)) {
+ rx_size = payload_length;
+ ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
+ if (!ping_data) {
+ pr_err("Unable to allocate memory for"
+ " NOPOUT ping data.\n");
+ ret = -1;
+ goto out;
+ }
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = ping_data;
+ iov[niov++].iov_len = payload_length;
+
+ padding = ((-payload_length) & 3);
+ if (padding != 0) {
+ pr_debug("Receiving %u additional bytes"
+ " for padding.\n", padding);
+ iov[niov].iov_base = &cmd->pad_bytes;
+ iov[niov++].iov_len = padding;
+ rx_size += padding;
+ }
+ if (conn->conn_ops->DataDigest) {
+ iov[niov].iov_base = &checksum;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
+ if (rx_got != rx_size) {
+ ret = -1;
+ goto out;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ ping_data, payload_length,
+ padding, cmd->pad_bytes,
+ (u8 *)&data_crc);
+
+ if (checksum != data_crc) {
+ pr_err("Ping data CRC32C DataDigest"
+ " 0x%08x does not match computed 0x%08x\n",
+ checksum, data_crc);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " NOPOUT Ping DataCRC failure while in"
+ " ERL=0.\n");
+ ret = -1;
+ goto out;
+ } else {
+ /*
+ * Silently drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_debug("Dropping NOPOUT"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ ret = 0;
+ goto out;
+ }
+ } else {
+ pr_debug("Got CRC32C DataDigest"
+ " 0x%08x for %u bytes of ping data.\n",
+ checksum, payload_length);
+ }
+ }
+
+ ping_data[payload_length] = '\0';
+ /*
+ * Attach ping data to struct iscsi_cmd->buf_ptr.
+ */
+ cmd->buf_ptr = (void *)ping_data;
+ cmd->buf_ptr_size = payload_length;
+
+ pr_debug("Got %u bytes of NOPOUT ping"
+ " data.\n", payload_length);
+ pr_debug("Ping Data: \"%s\"\n", ping_data);
+ }
+
+ if (hdr->itt != 0xFFFFFFFF) {
+ if (!cmd) {
+ pr_err("Checking CmdSN for NOPOUT,"
+ " but cmd is NULL!\n");
+ return -1;
+ }
+ /*
+ * Initiator is expecting a NopIN ping reply,
+ */
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ iscsit_add_cmd_to_response_queue(cmd, conn,
+ cmd->i_state);
+ return 0;
+ }
+
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ ret = 0;
+ goto ping_out;
+ }
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+
+ return 0;
+ }
+
+ if (hdr->ttt != 0xFFFFFFFF) {
+ /*
+ * This was a response to a unsolicited NOPIN ping.
+ */
+ cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt);
+ if (!cmd)
+ return -1;
+
+ iscsit_stop_nopin_response_timer(conn);
+
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ iscsit_start_nopin_timer(conn);
+ } else {
+ /*
+ * Initiator is not expecting a NOPIN is response.
+ * Just ignore for now.
+ *
+ * iSCSI v19-91 10.18
+ * "A NOP-OUT may also be used to confirm a changed
+ * ExpStatSN if another PDU will not be available
+ * for a long time."
+ */
+ ret = 0;
+ goto out;
+ }
+
+ return 0;
+out:
+ if (cmd)
+ iscsit_release_cmd(cmd);
+ping_out:
+ kfree(ping_data);
+ return ret;
+}
+
+static int iscsit_handle_task_mgt_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_cmd *cmd;
+ struct se_tmr_req *se_tmr;
+ struct iscsi_tmr_req *tmr_req;
+ struct iscsi_tm *hdr;
+ u32 payload_length;
+ int out_of_order_cmdsn = 0;
+ int ret;
+ u8 function;
+
+ hdr = (struct iscsi_tm *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->rtt = be32_to_cpu(hdr->rtt);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn);
+ hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn);
+ hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+ function = hdr->flags;
+
+ pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
+ " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
+ " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
+ hdr->rtt, hdr->refcmdsn, conn->cid);
+
+ if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+ ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+ (hdr->rtt != ISCSI_RESERVED_TAG))) {
+ pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
+ hdr->rtt = ISCSI_RESERVED_TAG;
+ }
+
+ if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
+ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ pr_err("Task Management Request TASK_REASSIGN not"
+ " issued as immediate command, bad iSCSI Initiator"
+ "implementation\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+ if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+ (hdr->refcmdsn != ISCSI_RESERVED_TAG))
+ hdr->refcmdsn = ISCSI_RESERVED_TAG;
+
+ cmd = iscsit_allocate_se_cmd_for_tmr(conn, function);
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
+ cmd->i_state = ISTATE_SEND_TASKMGTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ se_tmr = cmd->se_cmd.se_tmr_req;
+ tmr_req = cmd->tmr_req;
+ /*
+ * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
+ */
+ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+ ret = iscsit_get_lun_for_tmr(cmd,
+ get_unaligned_le64(&hdr->lun));
+ if (ret < 0) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
+ goto attach;
+ }
+ }
+
+ switch (function) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
+ if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ goto attach;
+ }
+ break;
+ case ISCSI_TM_FUNC_ABORT_TASK_SET:
+ case ISCSI_TM_FUNC_CLEAR_ACA:
+ case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ break;
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+ goto attach;
+ }
+ break;
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+ goto attach;
+ }
+ break;
+ case ISCSI_TM_FUNC_TASK_REASSIGN:
+ se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
+ /*
+ * Perform sanity checks on the ExpDataSN only if the
+ * TASK_REASSIGN was successful.
+ */
+ if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
+ break;
+
+ if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
+ buf, cmd);
+ break;
+ default:
+ pr_err("Unknown TMR function: 0x%02x, protocol"
+ " error.\n", function);
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
+ goto attach;
+ }
+
+ if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+ (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+ se_tmr->call_transport = 1;
+attach:
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ out_of_order_cmdsn = 1;
+ else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ return 0;
+ } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+ }
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ if (out_of_order_cmdsn)
+ return 0;
+ /*
+ * Found the referenced task, send to transport for processing.
+ */
+ if (se_tmr->call_transport)
+ return transport_generic_handle_tmr(&cmd->se_cmd);
+
+ /*
+ * Could not find the referenced LUN, task, or Task Management
+ * command not authorized or supported. Change state and
+ * let the tx_thread send the response.
+ *
+ * For connection recovery, this is also the default action for
+ * TMR TASK_REASSIGN.
+ */
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+/* #warning FIXME: Support Text Command parameters besides SendTargets */
+static int iscsit_handle_text_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ char *text_ptr, *text_in;
+ int cmdsn_ret, niov = 0, rx_got, rx_size;
+ u32 checksum = 0, data_crc = 0, payload_length;
+ u32 padding = 0, pad_bytes = 0, text_length = 0;
+ struct iscsi_cmd *cmd;
+ struct kvec iov[3];
+ struct iscsi_text *hdr;
+
+ hdr = (struct iscsi_text *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("Unable to accept text parameter length: %u"
+ "greater than MaxRecvDataSegmentLength %u.\n",
+ payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
+ " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
+ hdr->exp_statsn, payload_length);
+
+ rx_size = text_length = payload_length;
+ if (text_length) {
+ text_in = kzalloc(text_length, GFP_KERNEL);
+ if (!text_in) {
+ pr_err("Unable to allocate memory for"
+ " incoming text parameters\n");
+ return -1;
+ }
+
+ memset(iov, 0, 3 * sizeof(struct kvec));
+ iov[niov].iov_base = text_in;
+ iov[niov++].iov_len = text_length;
+
+ padding = ((-payload_length) & 3);
+ if (padding != 0) {
+ iov[niov].iov_base = &pad_bytes;
+ iov[niov++].iov_len = padding;
+ rx_size += padding;
+ pr_debug("Receiving %u additional bytes"
+ " for padding.\n", padding);
+ }
+ if (conn->conn_ops->DataDigest) {
+ iov[niov].iov_base = &checksum;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &iov[0], niov, rx_size);
+ if (rx_got != rx_size) {
+ kfree(text_in);
+ return -1;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ text_in, text_length,
+ padding, (u8 *)&pad_bytes,
+ (u8 *)&data_crc);
+
+ if (checksum != data_crc) {
+ pr_err("Text data CRC32C DataDigest"
+ " 0x%08x does not match computed"
+ " 0x%08x\n", checksum, data_crc);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Text Data digest failure while in"
+ " ERL=0.\n");
+ kfree(text_in);
+ return -1;
+ } else {
+ /*
+ * Silently drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_debug("Dropping Text"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ kfree(text_in);
+ return 0;
+ }
+ } else {
+ pr_debug("Got CRC32C DataDigest"
+ " 0x%08x for %u bytes of text data.\n",
+ checksum, text_length);
+ }
+ }
+ text_in[text_length - 1] = '\0';
+ pr_debug("Successfully read %d bytes of text"
+ " data.\n", text_length);
+
+ if (strncmp("SendTargets", text_in, 11) != 0) {
+ pr_err("Received Text Data that is not"
+ " SendTargets, cannot continue.\n");
+ kfree(text_in);
+ return -1;
+ }
+ text_ptr = strchr(text_in, '=');
+ if (!text_ptr) {
+ pr_err("No \"=\" separator found in Text Data,"
+ " cannot continue.\n");
+ kfree(text_in);
+ return -1;
+ }
+ if (strncmp("=All", text_ptr, 4) != 0) {
+ pr_err("Unable to locate All value for"
+ " SendTargets key, cannot continue.\n");
+ kfree(text_in);
+ return -1;
+ }
+/*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */
+ kfree(text_in);
+ }
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_TEXT;
+ cmd->i_state = ISTATE_SEND_TEXTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->data_direction = DMA_NONE;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+
+ return 0;
+ }
+
+ return iscsit_execute_cmd(cmd, 0);
+}
+
+int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_conn *conn_p;
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Received logout request CLOSESESSION on CID: %hu"
+ " for SID: %u.\n", conn->cid, conn->sess->sid);
+
+ atomic_set(&sess->session_logout, 1);
+ atomic_set(&conn->conn_logout_remove, 1);
+ conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
+
+ iscsit_inc_conn_usage_count(conn);
+ iscsit_inc_session_usage_count(sess);
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
+ if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
+ continue;
+
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+ conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_conn *l_conn;
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Received logout request CLOSECONNECTION for CID:"
+ " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+ /*
+ * A Logout Request with a CLOSECONNECTION reason code for a CID
+ * can arrive on a connection with a differing CID.
+ */
+ if (conn->cid == cmd->logout_cid) {
+ spin_lock_bh(&conn->state_lock);
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+ conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+
+ atomic_set(&conn->conn_logout_remove, 1);
+ conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_unlock_bh(&conn->state_lock);
+ } else {
+ /*
+ * Handle all different cid CLOSECONNECTION requests in
+ * iscsit_logout_post_handler_diffcid() as to give enough
+ * time for any non immediate command's CmdSN to be
+ * acknowledged on the connection in question.
+ *
+ * Here we simply make sure the CID is still around.
+ */
+ l_conn = iscsit_get_conn_from_cid(sess,
+ cmd->logout_cid);
+ if (!l_conn) {
+ cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+ iscsit_add_cmd_to_response_queue(cmd, conn,
+ cmd->i_state);
+ return 0;
+ }
+
+ iscsit_dec_conn_usage_count(l_conn);
+ }
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
+ " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+ if (sess->sess_ops->ErrorRecoveryLevel != 2) {
+ pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+ " while ERL!=2.\n");
+ cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ if (conn->cid == cmd->logout_cid) {
+ pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+ " with CID: %hu on CID: %hu, implementation error.\n",
+ cmd->logout_cid, conn->cid);
+ cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+static int iscsit_handle_logout_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ int cmdsn_ret, logout_remove = 0;
+ u8 reason_code = 0;
+ struct iscsi_cmd *cmd;
+ struct iscsi_logout *hdr;
+ struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
+
+ hdr = (struct iscsi_logout *) buf;
+ reason_code = (hdr->flags & 0x7f);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->cid = be16_to_cpu(hdr->cid);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ if (tiqn) {
+ spin_lock(&tiqn->logout_stats.lock);
+ if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
+ tiqn->logout_stats.normal_logouts++;
+ else
+ tiqn->logout_stats.abnormal_logouts++;
+ spin_unlock(&tiqn->logout_stats.lock);
+ }
+
+ pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
+ " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
+ hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
+ hdr->cid, conn->cid);
+
+ if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
+ pr_err("Received logout request on connection that"
+ " is not in logged in state, ignoring request.\n");
+ return 0;
+ }
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
+ buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
+ cmd->i_state = ISTATE_SEND_LOGOUTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->logout_cid = hdr->cid;
+ cmd->logout_reason = reason_code;
+ cmd->data_direction = DMA_NONE;
+
+ /*
+ * We need to sleep in these cases (by returning 1) until the Logout
+ * Response gets sent in the tx thread.
+ */
+ if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
+ ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
+ (hdr->cid == conn->cid)))
+ logout_remove = 1;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ /*
+ * Immediate commands are executed, well, immediately.
+ * Non-Immediate Logout Commands are executed in CmdSN order.
+ */
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ int ret = iscsit_execute_cmd(cmd, 0);
+
+ if (ret < 0)
+ return ret;
+ } else {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ logout_remove = 0;
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+ }
+
+ return logout_remove;
+}
+
+static int iscsit_handle_snack(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ u32 unpacked_lun;
+ u64 lun;
+ struct iscsi_snack *hdr;
+
+ hdr = (struct iscsi_snack *) buf;
+ hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+ lun = get_unaligned_le64(&hdr->lun);
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ hdr->begrun = be32_to_cpu(hdr->begrun);
+ hdr->runlength = be32_to_cpu(hdr->runlength);
+
+ pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
+ " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
+ " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
+ hdr->begrun, hdr->runlength, conn->cid);
+
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Initiator sent SNACK request while in"
+ " ErrorRecoveryLevel=0.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+ /*
+ * SNACK_DATA and SNACK_R2T are both 0, so check which function to
+ * call from inside iscsi_send_recovery_datain_or_r2t().
+ */
+ switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
+ case 0:
+ return iscsit_handle_recovery_datain_or_r2t(conn, buf,
+ hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
+ return 0;
+ case ISCSI_FLAG_SNACK_TYPE_STATUS:
+ return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
+ hdr->begrun, hdr->runlength);
+ case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
+ return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun,
+ hdr->runlength);
+ case ISCSI_FLAG_SNACK_TYPE_RDATA:
+ /* FIXME: Support R-Data SNACK */
+ pr_err("R-Data SNACK Not Supported.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ default:
+ pr_err("Unknown SNACK type 0x%02x, protocol"
+ " error.\n", hdr->flags & 0x0f);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ return 0;
+}
+
+static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+ if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+ wait_for_completion_interruptible_timeout(
+ &conn->rx_half_close_comp,
+ ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
+ }
+}
+
+static int iscsit_handle_immediate_data(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u32 length)
+{
+ int iov_ret, rx_got = 0, rx_size = 0;
+ u32 checksum, iov_count = 0, padding = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct kvec *iov;
+
+ iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
+ if (iov_ret < 0)
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+
+ rx_size = length;
+ iov_count = iov_ret;
+ iov = &cmd->iov_data[0];
+
+ padding = ((-length) & 3);
+ if (padding != 0) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = padding;
+ rx_size += padding;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iov[iov_count].iov_base = &checksum;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (rx_got != rx_size) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ u32 data_crc;
+
+ data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+ cmd->write_data_done, length, padding,
+ cmd->pad_bytes);
+
+ if (checksum != data_crc) {
+ pr_err("ImmediateData CRC32C DataDigest 0x%08x"
+ " does not match computed 0x%08x\n", checksum,
+ data_crc);
+
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Immediate Data digest failure while"
+ " in ERL=0.\n");
+ iscsit_add_reject_from_cmd(
+ ISCSI_REASON_DATA_DIGEST_ERROR,
+ 1, 0, buf, cmd);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ } else {
+ iscsit_add_reject_from_cmd(
+ ISCSI_REASON_DATA_DIGEST_ERROR,
+ 0, 0, buf, cmd);
+ return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
+ }
+ } else {
+ pr_debug("Got CRC32C DataDigest 0x%08x for"
+ " %u bytes of Immediate Data\n", checksum,
+ length);
+ }
+ }
+
+ cmd->write_data_done += length;
+
+ if (cmd->write_data_done == cmd->data_length) {
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+ }
+
+ return IMMEDIATE_DATA_NORMAL_OPERATION;
+}
+
+/*
+ * Called with sess->conn_lock held.
+ */
+/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
+ with active network interface */
+static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd;
+ struct iscsi_conn *conn_p;
+
+ /*
+ * Only send a Asynchronous Message on connections whos network
+ * interface is still functional.
+ */
+ list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
+ if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
+ iscsit_inc_conn_usage_count(conn_p);
+ break;
+ }
+ }
+
+ if (!conn_p)
+ return;
+
+ cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
+ if (!cmd) {
+ iscsit_dec_conn_usage_count(conn_p);
+ return;
+ }
+
+ cmd->logout_cid = conn->cid;
+ cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+ cmd->i_state = ISTATE_SEND_ASYNCMSG;
+
+ spin_lock_bh(&conn_p->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list);
+ spin_unlock_bh(&conn_p->cmd_lock);
+
+ iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
+ iscsit_dec_conn_usage_count(conn_p);
+}
+
+static int iscsit_send_conn_drop_async_message(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_async *hdr;
+
+ cmd->tx_size = ISCSI_HDR_LEN;
+ cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+
+ hdr = (struct iscsi_async *) cmd->pdu;
+ hdr->opcode = ISCSI_OP_ASYNC_EVENT;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ cmd->init_task_tag = 0xFFFFFFFF;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
+ hdr->param1 = cpu_to_be16(cmd->logout_cid);
+ hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
+ hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ cmd->tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest to"
+ " Async Message 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = cmd->tx_size;
+ cmd->iov_misc_count = 1;
+
+ pr_debug("Sending Connection Dropped Async Message StatSN:"
+ " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
+ cmd->logout_cid, conn->cid);
+ return 0;
+}
+
+static int iscsit_send_data_in(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int *eodr)
+{
+ int iov_ret = 0, set_statsn = 0;
+ u32 iov_count = 0, tx_size = 0;
+ struct iscsi_datain datain;
+ struct iscsi_datain_req *dr;
+ struct iscsi_data_rsp *hdr;
+ struct kvec *iov;
+
+ memset(&datain, 0, sizeof(struct iscsi_datain));
+ dr = iscsit_get_datain_values(cmd, &datain);
+ if (!dr) {
+ pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
+ cmd->init_task_tag);
+ return -1;
+ }
+
+ /*
+ * Be paranoid and double check the logic for now.
+ */
+ if ((datain.offset + datain.length) > cmd->data_length) {
+ pr_err("Command ITT: 0x%08x, datain.offset: %u and"
+ " datain.length: %u exceeds cmd->data_length: %u\n",
+ cmd->init_task_tag, datain.offset, datain.length,
+ cmd->data_length);
+ return -1;
+ }
+
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->tx_data_octets += datain.length;
+ if (conn->sess->se_sess->se_node_acl) {
+ spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
+ spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ }
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+ /*
+ * Special case for successfully execution w/ both DATAIN
+ * and Sense Data.
+ */
+ if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
+ (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
+ datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
+ else {
+ if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
+ (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
+ set_statsn = 1;
+ } else if (dr->dr_complete ==
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
+ set_statsn = 1;
+ }
+
+ hdr = (struct iscsi_data_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
+ hdr->flags = datain.flags;
+ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ }
+ }
+ hton24(hdr->dlength, datain.length);
+ if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+ int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+ (struct scsi_lun *)&hdr->lun);
+ else
+ put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ?
+ cpu_to_be32(cmd->targ_xfer_tag) :
+ 0xFFFFFFFF;
+ hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) :
+ 0xFFFFFFFF;
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->datasn = cpu_to_be32(datain.data_sn);
+ hdr->offset = cpu_to_be32(datain.offset);
+
+ iov = &cmd->iov_data[0];
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching CRC32 HeaderDigest"
+ " for DataIN PDU 0x%08x\n", *header_digest);
+ }
+
+ iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
+ if (iov_ret < 0)
+ return -1;
+
+ iov_count += iov_ret;
+ tx_size += datain.length;
+
+ cmd->padding = ((-datain.length) & 3);
+ if (cmd->padding) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = cmd->padding;
+ tx_size += cmd->padding;
+
+ pr_debug("Attaching %u padding bytes\n",
+ cmd->padding);
+ }
+ if (conn->conn_ops->DataDigest) {
+ cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
+ datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attached CRC32C DataDigest %d bytes, crc"
+ " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
+ }
+
+ cmd->iov_data_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
+ " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+ cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
+ ntohl(hdr->offset), datain.length, conn->cid);
+
+ if (dr->dr_complete) {
+ *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
+ 2 : 1;
+ iscsit_free_datain_req(cmd, dr);
+ }
+
+ return 0;
+}
+
+static int iscsit_send_logout_response(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int niov = 0, tx_size;
+ struct iscsi_conn *logout_conn = NULL;
+ struct iscsi_conn_recovery *cr = NULL;
+ struct iscsi_session *sess = conn->sess;
+ struct kvec *iov;
+ struct iscsi_logout_rsp *hdr;
+ /*
+ * The actual shutting down of Sessions and/or Connections
+ * for CLOSESESSION and CLOSECONNECTION Logout Requests
+ * is done in scsi_logout_post_handler().
+ */
+ switch (cmd->logout_reason) {
+ case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+ pr_debug("iSCSI session logout successful, setting"
+ " logout response to ISCSI_LOGOUT_SUCCESS.\n");
+ cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+ break;
+ case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+ if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
+ break;
+ /*
+ * For CLOSECONNECTION logout requests carrying
+ * a matching logout CID -> local CID, the reference
+ * for the local CID will have been incremented in
+ * iscsi_logout_closeconnection().
+ *
+ * For CLOSECONNECTION logout requests carrying
+ * a different CID than the connection it arrived
+ * on, the connection responding to cmd->logout_cid
+ * is stopped in iscsit_logout_post_handler_diffcid().
+ */
+
+ pr_debug("iSCSI CID: %hu logout on CID: %hu"
+ " successful.\n", cmd->logout_cid, conn->cid);
+ cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+ break;
+ case ISCSI_LOGOUT_REASON_RECOVERY:
+ if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
+ (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
+ break;
+ /*
+ * If the connection is still active from our point of view
+ * force connection recovery to occur.
+ */
+ logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
+ cmd->logout_cid);
+ if ((logout_conn)) {
+ iscsit_connection_reinstatement_rcfr(logout_conn);
+ iscsit_dec_conn_usage_count(logout_conn);
+ }
+
+ cr = iscsit_get_inactive_connection_recovery_entry(
+ conn->sess, cmd->logout_cid);
+ if (!cr) {
+ pr_err("Unable to locate CID: %hu for"
+ " REMOVECONNFORRECOVERY Logout Request.\n",
+ cmd->logout_cid);
+ cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+ break;
+ }
+
+ iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
+
+ pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
+ " for recovery for CID: %hu on CID: %hu successful.\n",
+ cmd->logout_cid, conn->cid);
+ cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+ break;
+ default:
+ pr_err("Unknown cmd->logout_reason: 0x%02x\n",
+ cmd->logout_reason);
+ return -1;
+ }
+
+ tx_size = ISCSI_HDR_LEN;
+ hdr = (struct iscsi_logout_rsp *)cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_LOGOUT_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->response = cmd->logout_response;
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = cmd->pdu;
+ iov[niov++].iov_len = ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest to"
+ " Logout Response 0x%08x\n", *header_digest);
+ }
+ cmd->iov_misc_count = niov;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
+ " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
+ cmd->init_task_tag, cmd->stat_sn, hdr->response,
+ cmd->logout_cid, conn->cid);
+
+ return 0;
+}
+
+/*
+ * Unsolicited NOPIN, either requesting a response or not.
+ */
+static int iscsit_send_unsolicited_nopin(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int want_response)
+{
+ int tx_size = ISCSI_HDR_LEN;
+ struct iscsi_nopin *hdr;
+
+ hdr = (struct iscsi_nopin *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_NOOP_IN;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest to"
+ " NopIN 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = tx_size;
+ cmd->iov_misc_count = 1;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
+ " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
+
+ return 0;
+}
+
+static int iscsit_send_nopin_response(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int niov = 0, tx_size;
+ u32 padding = 0;
+ struct kvec *iov;
+ struct iscsi_nopin *hdr;
+
+ tx_size = ISCSI_HDR_LEN;
+ hdr = (struct iscsi_nopin *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_NOOP_IN;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, cmd->buf_ptr_size);
+ put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = cmd->pdu;
+ iov[niov++].iov_len = ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest"
+ " to NopIn 0x%08x\n", *header_digest);
+ }
+
+ /*
+ * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
+ * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
+ */
+ if (cmd->buf_ptr_size) {
+ iov[niov].iov_base = cmd->buf_ptr;
+ iov[niov++].iov_len = cmd->buf_ptr_size;
+ tx_size += cmd->buf_ptr_size;
+
+ pr_debug("Echoing back %u bytes of ping"
+ " data.\n", cmd->buf_ptr_size);
+
+ padding = ((-cmd->buf_ptr_size) & 3);
+ if (padding != 0) {
+ iov[niov].iov_base = &cmd->pad_bytes;
+ iov[niov++].iov_len = padding;
+ tx_size += padding;
+ pr_debug("Attaching %u additional"
+ " padding bytes.\n", padding);
+ }
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ cmd->buf_ptr, cmd->buf_ptr_size,
+ padding, (u8 *)&cmd->pad_bytes,
+ (u8 *)&cmd->data_crc);
+
+ iov[niov].iov_base = &cmd->data_crc;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attached DataDigest for %u"
+ " bytes of ping data, CRC 0x%08x\n",
+ cmd->buf_ptr_size, cmd->data_crc);
+ }
+ }
+
+ cmd->iov_misc_count = niov;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
+ " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
+ cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
+
+ return 0;
+}
+
+int iscsit_send_r2t(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int tx_size = 0;
+ struct iscsi_r2t *r2t;
+ struct iscsi_r2t_rsp *hdr;
+
+ r2t = iscsit_get_r2t_from_list(cmd);
+ if (!r2t)
+ return -1;
+
+ hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_R2T;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+ (struct scsi_lun *)&hdr->lun);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ spin_lock_bh(&conn->sess->ttt_lock);
+ r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ if (r2t->targ_xfer_tag == 0xFFFFFFFF)
+ r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ spin_unlock_bh(&conn->sess->ttt_lock);
+ hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
+ hdr->statsn = cpu_to_be32(conn->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
+ hdr->data_offset = cpu_to_be32(r2t->offset);
+ hdr->data_length = cpu_to_be32(r2t->xfer_len);
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for R2T"
+ " PDU 0x%08x\n", *header_digest);
+ }
+
+ pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
+ " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
+ (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
+ r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
+ r2t->offset, r2t->xfer_len, conn->cid);
+
+ cmd->iov_misc_count = 1;
+ cmd->tx_size = tx_size;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ r2t->sent_r2t = 1;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+/*
+ * type 0: Normal Operation.
+ * type 1: Called from Storage Transport.
+ * type 2: Called from iscsi_task_reassign_complete_write() for
+ * connection recovery.
+ */
+int iscsit_build_r2ts_for_cmd(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int type)
+{
+ int first_r2t = 1;
+ u32 offset = 0, xfer_len = 0;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return 0;
+ }
+
+ if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2))
+ if (cmd->r2t_offset < cmd->write_data_done)
+ cmd->r2t_offset = cmd->write_data_done;
+
+ while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ offset = cmd->r2t_offset;
+
+ if (first_r2t && (type == 2)) {
+ xfer_len = ((offset +
+ (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len) >
+ cmd->data_length) ?
+ (cmd->data_length - offset) :
+ (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len));
+ } else {
+ xfer_len = ((offset +
+ conn->sess->sess_ops->MaxBurstLength) >
+ cmd->data_length) ?
+ (cmd->data_length - offset) :
+ conn->sess->sess_ops->MaxBurstLength;
+ }
+ cmd->r2t_offset += xfer_len;
+
+ if (cmd->r2t_offset == cmd->data_length)
+ cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+ } else {
+ struct iscsi_seq *seq;
+
+ seq = iscsit_get_seq_holder_for_r2t(cmd);
+ if (!seq) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ offset = seq->offset;
+ xfer_len = seq->xfer_len;
+
+ if (cmd->seq_send_order == cmd->seq_count)
+ cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+ }
+ cmd->outstanding_r2ts++;
+ first_r2t = 0;
+
+ if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
+ break;
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+static int iscsit_send_status(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ u8 iov_count = 0, recovery;
+ u32 padding = 0, tx_size = 0;
+ struct iscsi_scsi_rsp *hdr;
+ struct kvec *iov;
+
+ recovery = (cmd->i_state != ISTATE_SEND_STATUS);
+ if (!recovery)
+ cmd->stat_sn = conn->stat_sn++;
+
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->rsp_pdus++;
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+
+ hdr = (struct iscsi_scsi_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ }
+ hdr->response = cmd->iscsi_response;
+ hdr->cmd_status = cmd->se_cmd.scsi_status;
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ /*
+ * Attach SENSE DATA payload to iSCSI Response PDU
+ */
+ if (cmd->se_cmd.sense_buffer &&
+ ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+ (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+ padding = -(cmd->se_cmd.scsi_sense_length) & 3;
+ hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length);
+ iov[iov_count].iov_base = cmd->se_cmd.sense_buffer;
+ iov[iov_count++].iov_len =
+ (cmd->se_cmd.scsi_sense_length + padding);
+ tx_size += cmd->se_cmd.scsi_sense_length;
+
+ if (padding) {
+ memset(cmd->se_cmd.sense_buffer +
+ cmd->se_cmd.scsi_sense_length, 0, padding);
+ tx_size += padding;
+ pr_debug("Adding %u bytes of padding to"
+ " SENSE.\n", padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ cmd->se_cmd.sense_buffer,
+ (cmd->se_cmd.scsi_sense_length + padding),
+ 0, NULL, (u8 *)&cmd->data_crc);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching CRC32 DataDigest for"
+ " SENSE, %u bytes CRC 0x%08x\n",
+ (cmd->se_cmd.scsi_sense_length + padding),
+ cmd->data_crc);
+ }
+
+ pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
+ " Response PDU\n",
+ cmd->se_cmd.scsi_sense_length);
+ }
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for Response"
+ " PDU 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
+ " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
+ (!recovery) ? "" : "Recovery ", cmd->init_task_tag,
+ cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
+
+ return 0;
+}
+
+static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
+{
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ return ISCSI_TMF_RSP_COMPLETE;
+ case TMR_TASK_DOES_NOT_EXIST:
+ return ISCSI_TMF_RSP_NO_TASK;
+ case TMR_LUN_DOES_NOT_EXIST:
+ return ISCSI_TMF_RSP_NO_LUN;
+ case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+ return ISCSI_TMF_RSP_NOT_SUPPORTED;
+ case TMR_FUNCTION_AUTHORIZATION_FAILED:
+ return ISCSI_TMF_RSP_AUTH_FAILED;
+ case TMR_FUNCTION_REJECTED:
+ default:
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+}
+
+static int iscsit_send_task_mgt_rsp(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+ struct iscsi_tm_rsp *hdr;
+ u32 tx_size = 0;
+
+ hdr = (struct iscsi_tm_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for Task"
+ " Mgmt Response PDU 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc_count = 1;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built Task Management Response ITT: 0x%08x,"
+ " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
+ cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
+
+ return 0;
+}
+
+static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+{
+ char *payload = NULL;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tiqn *tiqn;
+ struct iscsi_tpg_np *tpg_np;
+ int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
+ unsigned char buf[256];
+
+ buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ?
+ 32768 : conn->conn_ops->MaxRecvDataSegmentLength;
+
+ memset(buf, 0, 256);
+
+ payload = kzalloc(buffer_len, GFP_KERNEL);
+ if (!payload) {
+ pr_err("Unable to allocate memory for sendtargets"
+ " response.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock(&tiqn_lock);
+ list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+ len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+ memcpy((void *)payload + payload_len, buf, len);
+ payload_len += len;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_state_lock);
+ if ((tpg->tpg_state == TPG_STATE_FREE) ||
+ (tpg->tpg_state == TPG_STATE_INACTIVE)) {
+ spin_unlock(&tpg->tpg_state_lock);
+ continue;
+ }
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
+ tpg_np_list) {
+ len = sprintf(buf, "TargetAddress="
+ "%s%s%s:%hu,%hu",
+ (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
+ "[" : "", tpg_np->tpg_np->np_ip,
+ (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
+ "]" : "", tpg_np->tpg_np->np_port,
+ tpg->tpgt);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+ memcpy((void *)payload + payload_len, buf, len);
+ payload_len += len;
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+eob:
+ if (end_of_buf)
+ break;
+ }
+ spin_unlock(&tiqn_lock);
+
+ cmd->buf_ptr = payload;
+
+ return payload_len;
+}
+
+/*
+ * FIXME: Add support for F_BIT and C_BIT when the length is longer than
+ * MaxRecvDataSegmentLength.
+ */
+static int iscsit_send_text_rsp(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_text_rsp *hdr;
+ struct kvec *iov;
+ u32 padding = 0, tx_size = 0;
+ int text_length, iov_count = 0;
+
+ text_length = iscsit_build_sendtargets_response(cmd);
+ if (text_length < 0)
+ return text_length;
+
+ padding = ((-text_length) & 3);
+ if (padding != 0) {
+ memset(cmd->buf_ptr + text_length, 0, padding);
+ pr_debug("Attaching %u additional bytes for"
+ " padding.\n", padding);
+ }
+
+ hdr = (struct iscsi_text_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_TEXT_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, text_length);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ iov[iov_count].iov_base = cmd->buf_ptr;
+ iov[iov_count++].iov_len = text_length + padding;
+
+ tx_size += (ISCSI_HDR_LEN + text_length + padding);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for"
+ " Text Response PDU 0x%08x\n", *header_digest);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ cmd->buf_ptr, (text_length + padding),
+ 0, NULL, (u8 *)&cmd->data_crc);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching DataDigest for %u bytes of text"
+ " data, CRC 0x%08x\n", (text_length + padding),
+ cmd->data_crc);
+ }
+
+ cmd->iov_misc_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
+ " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
+ text_length, conn->cid);
+ return 0;
+}
+
+static int iscsit_send_reject(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ u32 iov_count = 0, tx_size = 0;
+ struct iscsi_reject *hdr;
+ struct kvec *iov;
+
+ hdr = (struct iscsi_reject *) cmd->pdu;
+ hdr->opcode = ISCSI_OP_REJECT;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ iov[iov_count].iov_base = cmd->buf_ptr;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+
+ tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for"
+ " REJECT PDU 0x%08x\n", *header_digest);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)&cmd->data_crc);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 DataDigest for REJECT"
+ " PDU 0x%08x\n", cmd->data_crc);
+ }
+
+ cmd->iov_misc_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
+ " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
+
+ return 0;
+}
+
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+ if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+ wait_for_completion_interruptible_timeout(
+ &conn->tx_half_close_comp,
+ ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
+ }
+}
+
+#ifdef CONFIG_SMP
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+ struct iscsi_thread_set *ts = conn->thread_set;
+ int ord, cpu;
+ /*
+ * thread_id is assigned from iscsit_global->ts_bitmap from
+ * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
+ *
+ * Here we use thread_id to determine which CPU that this
+ * iSCSI connection's iscsi_thread_set will be scheduled to
+ * execute upon.
+ */
+ ord = ts->thread_id % cpumask_weight(cpu_online_mask);
+#if 0
+ pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
+ " thread_id: %d\n", ord, ts->thread_id);
+#endif
+ for_each_online_cpu(cpu) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+ return;
+ }
+ }
+ /*
+ * This should never be reached..
+ */
+ dump_stack();
+ cpumask_setall(conn->conn_cpumask);
+}
+
+static inline void iscsit_thread_check_cpumask(
+ struct iscsi_conn *conn,
+ struct task_struct *p,
+ int mode)
+{
+ char buf[128];
+ /*
+ * mode == 1 signals iscsi_target_tx_thread() usage.
+ * mode == 0 signals iscsi_target_rx_thread() usage.
+ */
+ if (mode == 1) {
+ if (!conn->conn_tx_reset_cpumask)
+ return;
+ conn->conn_tx_reset_cpumask = 0;
+ } else {
+ if (!conn->conn_rx_reset_cpumask)
+ return;
+ conn->conn_rx_reset_cpumask = 0;
+ }
+ /*
+ * Update the CPU mask for this single kthread so that
+ * both TX and RX kthreads are scheduled to run on the
+ * same CPU.
+ */
+ memset(buf, 0, 128);
+ cpumask_scnprintf(buf, 128, conn->conn_cpumask);
+#if 0
+ pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
+ " %s for %s\n", buf, p->comm);
+#endif
+ set_cpus_allowed_ptr(p, conn->conn_cpumask);
+}
+
+#else
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+ return;
+}
+
+#define iscsit_thread_check_cpumask(X, Y, Z) ({})
+#endif /* CONFIG_SMP */
+
+int iscsi_target_tx_thread(void *arg)
+{
+ u8 state;
+ int eodr = 0;
+ int ret = 0;
+ int sent_status = 0;
+ int use_misc = 0;
+ int map_sg = 0;
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_conn *conn;
+ struct iscsi_queue_req *qr = NULL;
+ struct se_cmd *se_cmd;
+ struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+
+restart:
+ conn = iscsi_tx_thread_pre_handler(ts);
+ if (!conn)
+ goto out;
+
+ eodr = map_sg = ret = sent_status = use_misc = 0;
+
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure that both TX and RX per connection kthreads
+ * are scheduled to run on the same CPU.
+ */
+ iscsit_thread_check_cpumask(conn, current, 1);
+
+ schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+
+ if ((ts->status == ISCSI_THREAD_SET_RESET) ||
+ signal_pending(current))
+ goto transport_err;
+
+get_immediate:
+ qr = iscsit_get_cmd_from_immediate_queue(conn);
+ if (qr) {
+ atomic_set(&conn->check_immediate_queue, 0);
+ cmd = qr->cmd;
+ state = qr->state;
+ kmem_cache_free(lio_qr_cache, qr);
+
+ spin_lock_bh(&cmd->istate_lock);
+ switch (state) {
+ case ISTATE_SEND_R2T:
+ spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_send_r2t(cmd, conn);
+ break;
+ case ISTATE_REMOVE:
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ /*
+ * Determine if a struct se_cmd is assoicated with
+ * this struct iscsi_cmd.
+ */
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
+ !(cmd->tmr_req))
+ iscsit_release_cmd(cmd);
+ else
+ transport_generic_free_cmd(&cmd->se_cmd,
+ 1, 0);
+ goto get_immediate;
+ case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_mod_nopin_response_timer(conn);
+ ret = iscsit_send_unsolicited_nopin(cmd,
+ conn, 1);
+ break;
+ case ISTATE_SEND_NOPIN_NO_RESPONSE:
+ spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_send_unsolicited_nopin(cmd,
+ conn, 0);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag, state,
+ conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+ if (ret < 0) {
+ conn->tx_immediate_queue = 0;
+ goto transport_err;
+ }
+
+ if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
+ conn->tx_immediate_queue = 0;
+ iscsit_tx_thread_wait_for_tcp(conn);
+ goto transport_err;
+ }
+
+ spin_lock_bh(&cmd->istate_lock);
+ switch (state) {
+ case ISTATE_SEND_R2T:
+ spin_unlock_bh(&cmd->istate_lock);
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ break;
+ case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
+ spin_unlock_bh(&cmd->istate_lock);
+ break;
+ case ISTATE_SEND_NOPIN_NO_RESPONSE:
+ cmd->i_state = ISTATE_SENT_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ state, conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+ goto get_immediate;
+ } else
+ conn->tx_immediate_queue = 0;
+
+get_response:
+ qr = iscsit_get_cmd_from_response_queue(conn);
+ if (qr) {
+ cmd = qr->cmd;
+ state = qr->state;
+ kmem_cache_free(lio_qr_cache, qr);
+
+ spin_lock_bh(&cmd->istate_lock);
+check_rsp_state:
+ switch (state) {
+ case ISTATE_SEND_DATAIN:
+ spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_send_data_in(cmd, conn,
+ &eodr);
+ map_sg = 1;
+ break;
+ case ISTATE_SEND_STATUS:
+ case ISTATE_SEND_STATUS_RECOVERY:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_status(cmd, conn);
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_logout_response(cmd, conn);
+ break;
+ case ISTATE_SEND_ASYNCMSG:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_conn_drop_async_message(
+ cmd, conn);
+ break;
+ case ISTATE_SEND_NOPIN:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_nopin_response(cmd, conn);
+ break;
+ case ISTATE_SEND_REJECT:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_reject(cmd, conn);
+ break;
+ case ISTATE_SEND_TASKMGTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_task_mgt_rsp(cmd, conn);
+ if (ret != 0)
+ break;
+ ret = iscsit_tmr_post_handler(cmd, conn);
+ if (ret != 0)
+ iscsit_fall_back_to_erl0(conn->sess);
+ break;
+ case ISTATE_SEND_TEXTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_text_rsp(cmd, conn);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ state, conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+ if (ret < 0) {
+ conn->tx_response_queue = 0;
+ goto transport_err;
+ }
+
+ se_cmd = &cmd->se_cmd;
+
+ if (map_sg && !conn->conn_ops->IFMarker) {
+ if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
+ conn->tx_response_queue = 0;
+ iscsit_tx_thread_wait_for_tcp(conn);
+ iscsit_unmap_iovec(cmd);
+ goto transport_err;
+ }
+ } else {
+ if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
+ conn->tx_response_queue = 0;
+ iscsit_tx_thread_wait_for_tcp(conn);
+ iscsit_unmap_iovec(cmd);
+ goto transport_err;
+ }
+ }
+ map_sg = 0;
+ iscsit_unmap_iovec(cmd);
+
+ spin_lock_bh(&cmd->istate_lock);
+ switch (state) {
+ case ISTATE_SEND_DATAIN:
+ if (!eodr)
+ goto check_rsp_state;
+
+ if (eodr == 1) {
+ cmd->i_state = ISTATE_SENT_LAST_DATAIN;
+ sent_status = 1;
+ eodr = use_misc = 0;
+ } else if (eodr == 2) {
+ cmd->i_state = state =
+ ISTATE_SEND_STATUS;
+ sent_status = 0;
+ eodr = use_misc = 0;
+ goto check_rsp_state;
+ }
+ break;
+ case ISTATE_SEND_STATUS:
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ case ISTATE_SEND_ASYNCMSG:
+ case ISTATE_SEND_NOPIN:
+ case ISTATE_SEND_STATUS_RECOVERY:
+ case ISTATE_SEND_TEXTRSP:
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ case ISTATE_SEND_REJECT:
+ use_misc = 0;
+ if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
+ cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
+ spin_unlock_bh(&cmd->istate_lock);
+ complete(&cmd->reject_comp);
+ goto transport_err;
+ }
+ complete(&cmd->reject_comp);
+ break;
+ case ISTATE_SEND_TASKMGTRSP:
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ if (!iscsit_logout_post_handler(cmd, conn))
+ goto restart;
+ spin_lock_bh(&cmd->istate_lock);
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ cmd->i_state, conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+
+ if (sent_status) {
+ cmd->i_state = ISTATE_SENT_STATUS;
+ sent_status = 0;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (atomic_read(&conn->check_immediate_queue))
+ goto get_immediate;
+
+ goto get_response;
+ } else
+ conn->tx_response_queue = 0;
+ }
+
+transport_err:
+ iscsit_take_action_for_connection_exit(conn);
+ goto restart;
+out:
+ return 0;
+}
+
+int iscsi_target_rx_thread(void *arg)
+{
+ int ret;
+ u8 buffer[ISCSI_HDR_LEN], opcode;
+ u32 checksum = 0, digest = 0;
+ struct iscsi_conn *conn = NULL;
+ struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ struct kvec iov;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+
+restart:
+ conn = iscsi_rx_thread_pre_handler(ts);
+ if (!conn)
+ goto out;
+
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure that both TX and RX per connection kthreads
+ * are scheduled to run on the same CPU.
+ */
+ iscsit_thread_check_cpumask(conn, current, 0);
+
+ memset(buffer, 0, ISCSI_HDR_LEN);
+ memset(&iov, 0, sizeof(struct kvec));
+
+ iov.iov_base = buffer;
+ iov.iov_len = ISCSI_HDR_LEN;
+
+ ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+ if (ret != ISCSI_HDR_LEN) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ goto transport_err;
+ }
+
+ /*
+ * Set conn->bad_hdr for use with REJECT PDUs.
+ */
+ memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN);
+
+ if (conn->conn_ops->HeaderDigest) {
+ iov.iov_base = &digest;
+ iov.iov_len = ISCSI_CRC_LEN;
+
+ ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+ if (ret != ISCSI_CRC_LEN) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ goto transport_err;
+ }
+
+ iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ buffer, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)&checksum);
+
+ if (digest != checksum) {
+ pr_err("HeaderDigest CRC32C failed,"
+ " received 0x%08x, computed 0x%08x\n",
+ digest, checksum);
+ /*
+ * Set the PDU to 0xff so it will intentionally
+ * hit default in the switch below.
+ */
+ memset(buffer, 0xff, ISCSI_HDR_LEN);
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->conn_digest_errors++;
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+ } else {
+ pr_debug("Got HeaderDigest CRC32C"
+ " 0x%08x\n", checksum);
+ }
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+ goto transport_err;
+
+ opcode = buffer[0] & ISCSI_OPCODE_MASK;
+
+ if (conn->sess->sess_ops->SessionType &&
+ ((!(opcode & ISCSI_OP_TEXT)) ||
+ (!(opcode & ISCSI_OP_LOGOUT)))) {
+ pr_err("Received illegal iSCSI Opcode: 0x%02x"
+ " while in Discovery Session, rejecting.\n", opcode);
+ iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buffer, conn);
+ goto transport_err;
+ }
+
+ switch (opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_SCSI_DATA_OUT:
+ if (iscsit_handle_data_out(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ if (iscsit_handle_nop_out(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_TEXT:
+ if (iscsit_handle_text_cmd(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_LOGOUT:
+ ret = iscsit_handle_logout_cmd(conn, buffer);
+ if (ret > 0) {
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP * HZ);
+ goto transport_err;
+ } else if (ret < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_SNACK:
+ if (iscsit_handle_snack(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ default:
+ pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
+ opcode);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Cannot recover from unknown"
+ " opcode while ERL=0, closing iSCSI connection"
+ ".\n");
+ goto transport_err;
+ }
+ if (!conn->conn_ops->OFMarker) {
+ pr_err("Unable to recover from unknown"
+ " opcode while OFMarker=No, closing iSCSI"
+ " connection.\n");
+ goto transport_err;
+ }
+ if (iscsit_recover_from_unknown_opcode(conn) < 0) {
+ pr_err("Unable to recover from unknown"
+ " opcode, closing iSCSI connection.\n");
+ goto transport_err;
+ }
+ break;
+ }
+ }
+
+transport_err:
+ if (!signal_pending(current))
+ atomic_set(&conn->transport_failed, 1);
+ iscsit_take_action_for_connection_exit(conn);
+ goto restart;
+out:
+ return 0;
+}
+
+static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
+ struct iscsi_session *sess = conn->sess;
+ struct se_cmd *se_cmd;
+ /*
+ * We expect this function to only ever be called from either RX or TX
+ * thread context via iscsit_close_connection() once the other context
+ * has been reset -> returned sleeping pre-handler state.
+ */
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) {
+
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ iscsit_increment_maxcmdsn(cmd, sess);
+ se_cmd = &cmd->se_cmd;
+ /*
+ * Special cases for active iSCSI TMR, and
+ * transport_lookup_cmd_lun() failing from
+ * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd().
+ */
+ if (cmd->tmr_req && se_cmd->transport_wait_for_tasks)
+ se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
+ else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
+ transport_release_cmd(se_cmd);
+ else
+ iscsit_release_cmd(cmd);
+
+ spin_lock_bh(&conn->cmd_lock);
+ continue;
+ }
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_increment_maxcmdsn(cmd, sess);
+ se_cmd = &cmd->se_cmd;
+
+ if (se_cmd->transport_wait_for_tasks)
+ se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
+
+ spin_lock_bh(&conn->cmd_lock);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+}
+
+static void iscsit_stop_timers_for_cmds(
+ struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+}
+
+int iscsit_close_connection(
+ struct iscsi_conn *conn)
+{
+ int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Closing iSCSI connection CID %hu on SID:"
+ " %u\n", conn->cid, sess->sid);
+ /*
+ * Always up conn_logout_comp just in case the RX Thread is sleeping
+ * and the logout response never got sent because the connection
+ * failed.
+ */
+ complete(&conn->conn_logout_comp);
+
+ iscsi_release_thread_set(conn);
+
+ iscsit_stop_timers_for_cmds(conn);
+ iscsit_stop_nopin_response_timer(conn);
+ iscsit_stop_nopin_timer(conn);
+ iscsit_free_queue_reqs_for_conn(conn);
+
+ /*
+ * During Connection recovery drop unacknowledged out of order
+ * commands for this connection, and prepare the other commands
+ * for realligence.
+ *
+ * During normal operation clear the out of order commands (but
+ * do not free the struct iscsi_ooo_cmdsn's) and release all
+ * struct iscsi_cmds.
+ */
+ if (atomic_read(&conn->connection_recovery)) {
+ iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
+ iscsit_prepare_cmds_for_realligance(conn);
+ } else {
+ iscsit_clear_ooo_cmdsns_for_conn(conn);
+ iscsit_release_commands_from_conn(conn);
+ }
+
+ /*
+ * Handle decrementing session or connection usage count if
+ * a logout response was not able to be sent because the
+ * connection failed. Fall back to Session Recovery here.
+ */
+ if (atomic_read(&conn->conn_logout_remove)) {
+ if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
+ iscsit_dec_conn_usage_count(conn);
+ iscsit_dec_session_usage_count(sess);
+ }
+ if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
+ iscsit_dec_conn_usage_count(conn);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ atomic_set(&sess->session_reinstatement, 0);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+ }
+
+ spin_lock_bh(&sess->conn_lock);
+ list_del(&conn->conn_list);
+
+ /*
+ * Attempt to let the Initiator know this connection failed by
+ * sending an Connection Dropped Async Message on another
+ * active connection.
+ */
+ if (atomic_read(&conn->connection_recovery))
+ iscsit_build_conn_drop_async_message(conn);
+
+ spin_unlock_bh(&sess->conn_lock);
+
+ /*
+ * If connection reinstatement is being performed on this connection,
+ * up the connection reinstatement semaphore that is being blocked on
+ * in iscsit_cause_connection_reinstatement().
+ */
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
+ spin_unlock_bh(&conn->state_lock);
+ complete(&conn->conn_wait_comp);
+ wait_for_completion(&conn->conn_post_wait_comp);
+ spin_lock_bh(&conn->state_lock);
+ }
+
+ /*
+ * If connection reinstatement is being performed on this connection
+ * by receiving a REMOVECONNFORRECOVERY logout request, up the
+ * connection wait rcfr semaphore that is being blocked on
+ * an iscsit_connection_reinstatement_rcfr().
+ */
+ if (atomic_read(&conn->connection_wait_rcfr)) {
+ spin_unlock_bh(&conn->state_lock);
+ complete(&conn->conn_wait_rcfr_comp);
+ wait_for_completion(&conn->conn_post_wait_comp);
+ spin_lock_bh(&conn->state_lock);
+ }
+ atomic_set(&conn->connection_reinstatement, 1);
+ spin_unlock_bh(&conn->state_lock);
+
+ /*
+ * If any other processes are accessing this connection pointer we
+ * must wait until they have completed.
+ */
+ iscsit_check_conn_usage_count(conn);
+
+ if (conn->conn_rx_hash.tfm)
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ if (conn->conn_tx_hash.tfm)
+ crypto_free_hash(conn->conn_tx_hash.tfm);
+
+ if (conn->conn_cpumask)
+ free_cpumask_var(conn->conn_cpumask);
+
+ kfree(conn->conn_ops);
+ conn->conn_ops = NULL;
+
+ if (conn->sock) {
+ if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+ kfree(conn->sock->file);
+ conn->sock->file = NULL;
+ }
+ sock_release(conn->sock);
+ }
+ conn->thread_set = NULL;
+
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+ kfree(conn);
+
+ spin_lock_bh(&sess->conn_lock);
+ atomic_dec(&sess->nconn);
+ pr_debug("Decremented iSCSI connection count to %hu from node:"
+ " %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ /*
+ * Make sure that if one connection fails in an non ERL=2 iSCSI
+ * Session that they all fail.
+ */
+ if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
+ !atomic_read(&sess->session_logout))
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+
+ /*
+ * If this was not the last connection in the session, and we are
+ * performing session reinstatement or falling back to ERL=0, call
+ * iscsit_stop_session() without sleeping to shutdown the other
+ * active connections.
+ */
+ if (atomic_read(&sess->nconn)) {
+ if (!atomic_read(&sess->session_reinstatement) &&
+ !atomic_read(&sess->session_fall_back_to_erl0)) {
+ spin_unlock_bh(&sess->conn_lock);
+ return 0;
+ }
+ if (!atomic_read(&sess->session_stop_active)) {
+ atomic_set(&sess->session_stop_active, 1);
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_stop_session(sess, 0, 0);
+ return 0;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+ return 0;
+ }
+
+ /*
+ * If this was the last connection in the session and one of the
+ * following is occurring:
+ *
+ * Session Reinstatement is not being performed, and are falling back
+ * to ERL=0 call iscsit_close_session().
+ *
+ * Session Logout was requested. iscsit_close_session() will be called
+ * elsewhere.
+ *
+ * Session Continuation is not being performed, start the Time2Retain
+ * handler and check if sleep_on_sess_wait_sem is active.
+ */
+ if (!atomic_read(&sess->session_reinstatement) &&
+ atomic_read(&sess->session_fall_back_to_erl0)) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_close_session(sess);
+
+ return 0;
+ } else if (atomic_read(&sess->session_logout)) {
+ pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+ sess->session_state = TARG_SESS_STATE_FREE;
+ spin_unlock_bh(&sess->conn_lock);
+
+ if (atomic_read(&sess->sleep_on_sess_wait_comp))
+ complete(&sess->session_wait_comp);
+
+ return 0;
+ } else {
+ pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+ sess->session_state = TARG_SESS_STATE_FAILED;
+
+ if (!atomic_read(&sess->session_continuation)) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_start_time2retain_handler(sess);
+ } else
+ spin_unlock_bh(&sess->conn_lock);
+
+ if (atomic_read(&sess->sleep_on_sess_wait_comp))
+ complete(&sess->session_wait_comp);
+
+ return 0;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ return 0;
+}
+
+int iscsit_close_session(struct iscsi_session *sess)
+{
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+ if (atomic_read(&sess->nconn)) {
+ pr_err("%d connection(s) still exist for iSCSI session"
+ " to %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ BUG();
+ }
+
+ spin_lock_bh(&se_tpg->session_lock);
+ atomic_set(&sess->session_logout, 1);
+ atomic_set(&sess->session_reinstatement, 1);
+ iscsit_stop_time2retain_timer(sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ /*
+ * transport_deregister_session_configfs() will clear the
+ * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
+ * can be setting it again with __transport_register_session() in
+ * iscsi_post_login_handler() again after the iscsit_stop_session()
+ * completes in iscsi_np context.
+ */
+ transport_deregister_session_configfs(sess->se_sess);
+
+ /*
+ * If any other processes are accessing this session pointer we must
+ * wait until they have completed. If we are in an interrupt (the
+ * time2retain handler) and contain and active session usage count we
+ * restart the timer and exit.
+ */
+ if (!in_interrupt()) {
+ if (iscsit_check_session_usage_count(sess) == 1)
+ iscsit_stop_session(sess, 1, 1);
+ } else {
+ if (iscsit_check_session_usage_count(sess) == 2) {
+ atomic_set(&sess->session_logout, 0);
+ iscsit_start_time2retain_handler(sess);
+ return 0;
+ }
+ }
+
+ transport_deregister_session(sess->se_sess);
+
+ if (sess->sess_ops->ErrorRecoveryLevel == 2)
+ iscsit_free_connection_recovery_entires(sess);
+
+ iscsit_free_all_ooo_cmdsns(sess);
+
+ spin_lock_bh(&se_tpg->session_lock);
+ pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+ sess->session_state = TARG_SESS_STATE_FREE;
+ pr_debug("Released iSCSI session from node: %s\n",
+ sess->sess_ops->InitiatorName);
+ tpg->nsessions--;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_nsessions--;
+
+ pr_debug("Decremented number of active iSCSI Sessions on"
+ " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
+
+ spin_lock(&sess_idr_lock);
+ idr_remove(&sess_idr, sess->session_index);
+ spin_unlock(&sess_idr_lock);
+
+ kfree(sess->sess_ops);
+ sess->sess_ops = NULL;
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ kfree(sess);
+ return 0;
+}
+
+static void iscsit_logout_post_handler_closesession(
+ struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+ iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+ iscsit_dec_conn_usage_count(conn);
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+ iscsit_close_session(sess);
+}
+
+static void iscsit_logout_post_handler_samecid(
+ struct iscsi_conn *conn)
+{
+ iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+ iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+ iscsit_cause_connection_reinstatement(conn, 1);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+static void iscsit_logout_post_handler_diffcid(
+ struct iscsi_conn *conn,
+ u16 cid)
+{
+ struct iscsi_conn *l_conn;
+ struct iscsi_session *sess = conn->sess;
+
+ if (!sess)
+ return;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
+ if (l_conn->cid == cid) {
+ iscsit_inc_conn_usage_count(l_conn);
+ break;
+ }
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ if (!l_conn)
+ return;
+
+ if (l_conn->sock)
+ l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
+
+ spin_lock_bh(&l_conn->state_lock);
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+ l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+ spin_unlock_bh(&l_conn->state_lock);
+
+ iscsit_cause_connection_reinstatement(l_conn, 1);
+ iscsit_dec_conn_usage_count(l_conn);
+}
+
+/*
+ * Return of 0 causes the TX thread to restart.
+ */
+static int iscsit_logout_post_handler(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int ret = 0;
+
+ switch (cmd->logout_reason) {
+ case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ iscsit_logout_post_handler_closesession(conn);
+ break;
+ }
+ ret = 0;
+ break;
+ case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+ if (conn->cid == cmd->logout_cid) {
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ iscsit_logout_post_handler_samecid(conn);
+ break;
+ }
+ ret = 0;
+ } else {
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ iscsit_logout_post_handler_diffcid(conn,
+ cmd->logout_cid);
+ break;
+ case ISCSI_LOGOUT_CID_NOT_FOUND:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ break;
+ }
+ ret = 1;
+ }
+ break;
+ case ISCSI_LOGOUT_REASON_RECOVERY:
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ case ISCSI_LOGOUT_CID_NOT_FOUND:
+ case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ break;
+ }
+ ret = 1;
+ break;
+ default:
+ break;
+
+ }
+ return ret;
+}
+
+void iscsit_fail_session(struct iscsi_session *sess)
+{
+ struct iscsi_conn *conn;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+ conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+ sess->session_state = TARG_SESS_STATE_FAILED;
+}
+
+int iscsit_free_session(struct iscsi_session *sess)
+{
+ u16 conn_count = atomic_read(&sess->nconn);
+ struct iscsi_conn *conn, *conn_tmp = NULL;
+ int is_last;
+
+ spin_lock_bh(&sess->conn_lock);
+ atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+ list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+ conn_list) {
+ if (conn_count == 0)
+ break;
+
+ if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+ is_last = 1;
+ } else {
+ iscsit_inc_conn_usage_count(conn_tmp);
+ is_last = 0;
+ }
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_cause_connection_reinstatement(conn, 1);
+ spin_lock_bh(&sess->conn_lock);
+
+ iscsit_dec_conn_usage_count(conn);
+ if (is_last == 0)
+ iscsit_dec_conn_usage_count(conn_tmp);
+
+ conn_count--;
+ }
+
+ if (atomic_read(&sess->nconn)) {
+ spin_unlock_bh(&sess->conn_lock);
+ wait_for_completion(&sess->session_wait_comp);
+ } else
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsit_close_session(sess);
+ return 0;
+}
+
+void iscsit_stop_session(
+ struct iscsi_session *sess,
+ int session_sleep,
+ int connection_sleep)
+{
+ u16 conn_count = atomic_read(&sess->nconn);
+ struct iscsi_conn *conn, *conn_tmp = NULL;
+ int is_last;
+
+ spin_lock_bh(&sess->conn_lock);
+ if (session_sleep)
+ atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+ if (connection_sleep) {
+ list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+ conn_list) {
+ if (conn_count == 0)
+ break;
+
+ if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+ is_last = 1;
+ } else {
+ iscsit_inc_conn_usage_count(conn_tmp);
+ is_last = 0;
+ }
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_cause_connection_reinstatement(conn, 1);
+ spin_lock_bh(&sess->conn_lock);
+
+ iscsit_dec_conn_usage_count(conn);
+ if (is_last == 0)
+ iscsit_dec_conn_usage_count(conn_tmp);
+ conn_count--;
+ }
+ } else {
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+ iscsit_cause_connection_reinstatement(conn, 0);
+ }
+
+ if (session_sleep && atomic_read(&sess->nconn)) {
+ spin_unlock_bh(&sess->conn_lock);
+ wait_for_completion(&sess->session_wait_comp);
+ } else
+ spin_unlock_bh(&sess->conn_lock);
+}
+
+int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+{
+ struct iscsi_session *sess;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct se_session *se_sess, *se_sess_tmp;
+ int session_count = 0;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ if (tpg->nsessions && !force) {
+ spin_unlock_bh(&se_tpg->session_lock);
+ return -1;
+ }
+
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+ sess_list) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ continue;
+ }
+ atomic_set(&sess->session_reinstatement, 1);
+ spin_unlock(&sess->conn_lock);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ iscsit_free_session(sess);
+ spin_lock_bh(&se_tpg->session_lock);
+
+ session_count++;
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ pr_debug("Released %d iSCSI Session(s) from Target Portal"
+ " Group: %hu\n", session_count, tpg->tpgt);
+ return 0;
+}
+
+MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
+MODULE_VERSION("4.1.x");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iscsi_target_init_module);
+module_exit(iscsi_target_cleanup_module);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
new file mode 100644
index 00000000000..5db2ddeed5e
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -0,0 +1,42 @@
+#ifndef ISCSI_TARGET_H
+#define ISCSI_TARGET_H
+
+extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
+extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
+extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
+extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
+extern void iscsit_del_tiqn(struct iscsi_tiqn *);
+extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
+ char *, int);
+extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
+ struct iscsi_portal_group *);
+extern int iscsit_del_np(struct iscsi_np *);
+extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
+extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
+extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
+extern int iscsi_target_tx_thread(void *);
+extern int iscsi_target_rx_thread(void *);
+extern int iscsit_close_connection(struct iscsi_conn *);
+extern int iscsit_close_session(struct iscsi_session *);
+extern void iscsit_fail_session(struct iscsi_session *);
+extern int iscsit_free_session(struct iscsi_session *);
+extern void iscsit_stop_session(struct iscsi_session *, int, int);
+extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
+
+extern struct iscsit_global *iscsit_global;
+extern struct target_fabric_configfs *lio_target_fabric_configfs;
+
+extern struct kmem_cache *lio_dr_cache;
+extern struct kmem_cache *lio_ooo_cache;
+extern struct kmem_cache *lio_cmd_cache;
+extern struct kmem_cache *lio_qr_cache;
+extern struct kmem_cache *lio_r2t_cache;
+
+#endif /*** ISCSI_TARGET_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
new file mode 100644
index 00000000000..11fd7430781
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -0,0 +1,490 @@
+/*******************************************************************************
+ * This file houses the main functions for the iSCSI CHAP support
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_auth.h"
+
+static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2])
+{
+ unsigned char result = 0;
+ /*
+ * MSB
+ */
+ if ((val[0] >= 'a') && (val[0] <= 'f'))
+ result = ((val[0] - 'a' + 10) & 0xf) << 4;
+ else
+ if ((val[0] >= 'A') && (val[0] <= 'F'))
+ result = ((val[0] - 'A' + 10) & 0xf) << 4;
+ else /* digit */
+ result = ((val[0] - '0') & 0xf) << 4;
+ /*
+ * LSB
+ */
+ if ((val[1] >= 'a') && (val[1] <= 'f'))
+ result |= ((val[1] - 'a' + 10) & 0xf);
+ else
+ if ((val[1] >= 'A') && (val[1] <= 'F'))
+ result |= ((val[1] - 'A' + 10) & 0xf);
+ else /* digit */
+ result |= ((val[1] - '0') & 0xf);
+
+ return result;
+}
+
+static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
+{
+ int i, j = 0;
+
+ for (i = 0; i < len; i += 2) {
+ dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]);
+ }
+
+ dst[j] = '\0';
+ return j;
+}
+
+static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
+{
+ int i;
+
+ for (i = 0; i < src_len; i++) {
+ sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
+ }
+}
+
+static void chap_set_random(char *data, int length)
+{
+ long r;
+ unsigned n;
+
+ while (length > 0) {
+ get_random_bytes(&r, sizeof(long));
+ r = r ^ (r >> 8);
+ r = r ^ (r >> 4);
+ n = r & 0x7;
+
+ get_random_bytes(&r, sizeof(long));
+ r = r ^ (r >> 8);
+ r = r ^ (r >> 5);
+ n = (n << 3) | (r & 0x7);
+
+ get_random_bytes(&r, sizeof(long));
+ r = r ^ (r >> 8);
+ r = r ^ (r >> 5);
+ n = (n << 2) | (r & 0x3);
+
+ *data++ = n;
+ length--;
+ }
+}
+
+static void chap_gen_challenge(
+ struct iscsi_conn *conn,
+ int caller,
+ char *c_str,
+ unsigned int *c_len)
+{
+ unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+
+ memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+
+ chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+ CHAP_CHALLENGE_LENGTH);
+ /*
+ * Set CHAP_C, and copy the generated challenge into c_str.
+ */
+ *c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
+ *c_len += 1;
+
+ pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
+ challenge_asciihex);
+}
+
+
+static struct iscsi_chap *chap_server_open(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ const char *a_str,
+ char *aic_str,
+ unsigned int *aic_len)
+{
+ struct iscsi_chap *chap;
+
+ if (!(auth->naf_flags & NAF_USERID_SET) ||
+ !(auth->naf_flags & NAF_PASSWORD_SET)) {
+ pr_err("CHAP user or password not set for"
+ " Initiator ACL\n");
+ return NULL;
+ }
+
+ conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
+ if (!conn->auth_protocol)
+ return NULL;
+
+ chap = (struct iscsi_chap *) conn->auth_protocol;
+ /*
+ * We only support MD5 MDA presently.
+ */
+ if (strncmp(a_str, "CHAP_A=5", 8)) {
+ pr_err("CHAP_A is not MD5.\n");
+ return NULL;
+ }
+ pr_debug("[server] Got CHAP_A=5\n");
+ /*
+ * Send back CHAP_A set to MD5.
+ */
+ *aic_len = sprintf(aic_str, "CHAP_A=5");
+ *aic_len += 1;
+ chap->digest_type = CHAP_DIGEST_MD5;
+ pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ /*
+ * Set Identifier.
+ */
+ chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
+ *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
+ *aic_len += 1;
+ pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
+ /*
+ * Generate Challenge.
+ */
+ chap_gen_challenge(conn, 1, aic_str, aic_len);
+
+ return chap;
+}
+
+static void chap_close(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+ conn->auth_protocol = NULL;
+}
+
+static int chap_server_compute_md5(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ char *nr_in_ptr,
+ char *nr_out_ptr,
+ unsigned int *nr_out_len)
+{
+ char *endptr;
+ unsigned char id, digest[MD5_SIGNATURE_SIZE];
+ unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
+ unsigned char identifier[10], *challenge = NULL;
+ unsigned char *challenge_binhex = NULL;
+ unsigned char client_digest[MD5_SIGNATURE_SIZE];
+ unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+ int auth_ret = -1, ret, challenge_len;
+
+ memset(identifier, 0, 10);
+ memset(chap_n, 0, MAX_CHAP_N_SIZE);
+ memset(chap_r, 0, MAX_RESPONSE_LENGTH);
+ memset(digest, 0, MD5_SIGNATURE_SIZE);
+ memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
+ memset(client_digest, 0, MD5_SIGNATURE_SIZE);
+ memset(server_digest, 0, MD5_SIGNATURE_SIZE);
+
+ challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!challenge) {
+ pr_err("Unable to allocate challenge buffer\n");
+ goto out;
+ }
+
+ challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!challenge_binhex) {
+ pr_err("Unable to allocate challenge_binhex buffer\n");
+ goto out;
+ }
+ /*
+ * Extract CHAP_N.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
+ &type) < 0) {
+ pr_err("Could not find CHAP_N.\n");
+ goto out;
+ }
+ if (type == HEX) {
+ pr_err("Could not find CHAP_N.\n");
+ goto out;
+ }
+
+ if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
+ pr_err("CHAP_N values do not match!\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_N=%s\n", chap_n);
+ /*
+ * Extract CHAP_R.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
+ &type) < 0) {
+ pr_err("Could not find CHAP_R.\n");
+ goto out;
+ }
+ if (type != HEX) {
+ pr_err("Could not find CHAP_R.\n");
+ goto out;
+ }
+
+ pr_debug("[server] Got CHAP_R=%s\n", chap_r);
+ chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
+
+ tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("Unable to allocate struct crypto_hash\n");
+ goto out;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ ret = crypto_hash_init(&desc);
+ if (ret < 0) {
+ pr_err("crypto_hash_init() failed\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)&chap->id, 1);
+ ret = crypto_hash_update(&desc, &sg, 1);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for id\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
+ ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for password\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
+ ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for challenge\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ ret = crypto_hash_final(&desc, server_digest);
+ if (ret < 0) {
+ pr_err("crypto_hash_final() failed for server digest\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+ crypto_free_hash(tfm);
+
+ chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+ pr_debug("[server] MD5 Server Digest: %s\n", response);
+
+ if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
+ pr_debug("[server] MD5 Digests do not match!\n\n");
+ goto out;
+ } else
+ pr_debug("[server] MD5 Digests match, CHAP connetication"
+ " successful.\n\n");
+ /*
+ * One way authentication has succeeded, return now if mutual
+ * authentication is not enabled.
+ */
+ if (!auth->authenticate_target) {
+ kfree(challenge);
+ kfree(challenge_binhex);
+ return 0;
+ }
+ /*
+ * Get CHAP_I.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
+ pr_err("Could not find CHAP_I.\n");
+ goto out;
+ }
+
+ if (type == HEX)
+ id = (unsigned char)simple_strtoul((char *)&identifier[2],
+ &endptr, 0);
+ else
+ id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
+ /*
+ * RFC 1994 says Identifier is no more than octet (8 bits).
+ */
+ pr_debug("[server] Got CHAP_I=%d\n", id);
+ /*
+ * Get CHAP_C.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
+ challenge, &type) < 0) {
+ pr_err("Could not find CHAP_C.\n");
+ goto out;
+ }
+
+ if (type != HEX) {
+ pr_err("Could not find CHAP_C.\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_C=%s\n", challenge);
+ challenge_len = chap_string_to_hex(challenge_binhex, challenge,
+ strlen(challenge));
+ if (!challenge_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ /*
+ * Generate CHAP_N and CHAP_R for mutual authentication.
+ */
+ tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("Unable to allocate struct crypto_hash\n");
+ goto out;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ ret = crypto_hash_init(&desc);
+ if (ret < 0) {
+ pr_err("crypto_hash_init() failed\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)&id, 1);
+ ret = crypto_hash_update(&desc, &sg, 1);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for id\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)auth->password_mutual,
+ strlen(auth->password_mutual));
+ ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for"
+ " password_mutual\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+ /*
+ * Convert received challenge to binary hex.
+ */
+ sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
+ ret = crypto_hash_update(&desc, &sg, challenge_len);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for ma challenge\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ ret = crypto_hash_final(&desc, digest);
+ if (ret < 0) {
+ pr_err("crypto_hash_final() failed for ma digest\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+ crypto_free_hash(tfm);
+ /*
+ * Generate CHAP_N and CHAP_R.
+ */
+ *nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
+ *nr_out_len += 1;
+ pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
+ /*
+ * Convert response from binary hex to ascii hext.
+ */
+ chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
+ response);
+ *nr_out_len += 1;
+ pr_debug("[server] Sending CHAP_R=0x%s\n", response);
+ auth_ret = 0;
+out:
+ kfree(challenge);
+ kfree(challenge_binhex);
+ return auth_ret;
+}
+
+static int chap_got_response(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ char *nr_in_ptr,
+ char *nr_out_ptr,
+ unsigned int *nr_out_len)
+{
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+
+ switch (chap->digest_type) {
+ case CHAP_DIGEST_MD5:
+ if (chap_server_compute_md5(conn, auth, nr_in_ptr,
+ nr_out_ptr, nr_out_len) < 0)
+ return -1;
+ return 0;
+ default:
+ pr_err("Unknown CHAP digest type %d!\n",
+ chap->digest_type);
+ return -1;
+ }
+}
+
+u32 chap_main_loop(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ char *in_text,
+ char *out_text,
+ int *in_len,
+ int *out_len)
+{
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+
+ if (!chap) {
+ chap = chap_server_open(conn, auth, in_text, out_text, out_len);
+ if (!chap)
+ return 2;
+ chap->chap_state = CHAP_STAGE_SERVER_AIC;
+ return 0;
+ } else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
+ convert_null_to_semi(in_text, *in_len);
+ if (chap_got_response(conn, auth, in_text, out_text,
+ out_len) < 0) {
+ chap_close(conn);
+ return 2;
+ }
+ if (auth->authenticate_target)
+ chap->chap_state = CHAP_STAGE_SERVER_NR;
+ else
+ *out_len = 0;
+ chap_close(conn);
+ return 1;
+ }
+
+ return 2;
+}
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
new file mode 100644
index 00000000000..2f463c09626
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -0,0 +1,31 @@
+#ifndef _ISCSI_CHAP_H_
+#define _ISCSI_CHAP_H_
+
+#define CHAP_DIGEST_MD5 5
+#define CHAP_DIGEST_SHA 6
+
+#define CHAP_CHALLENGE_LENGTH 16
+#define CHAP_CHALLENGE_STR_LEN 4096
+#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
+#define MAX_CHAP_N_SIZE 512
+
+#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
+
+#define CHAP_STAGE_CLIENT_A 1
+#define CHAP_STAGE_SERVER_AIC 2
+#define CHAP_STAGE_CLIENT_NR 3
+#define CHAP_STAGE_CLIENT_NRIC 4
+#define CHAP_STAGE_SERVER_NR 5
+
+extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
+ int *, int *);
+
+struct iscsi_chap {
+ unsigned char digest_type;
+ unsigned char id;
+ unsigned char challenge[CHAP_CHALLENGE_LENGTH];
+ unsigned int authenticate_target;
+ unsigned int chap_state;
+} ____cacheline_aligned;
+
+#endif /*** _ISCSI_CHAP_H_ ***/
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
new file mode 100644
index 00000000000..f095e65b1cc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -0,0 +1,1882 @@
+/*******************************************************************************
+ * This file contains the configfs implementation for iSCSI Target mode
+ * from the LIO-Target Project.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/configfs.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_fabric_lib.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_stat.h"
+#include "iscsi_target_configfs.h"
+
+struct target_fabric_configfs *lio_target_fabric_configfs;
+
+struct lio_target_configfs_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(void *, char *);
+ ssize_t (*store)(void *, const char *, size_t);
+};
+
+struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
+ struct config_item *item,
+ struct iscsi_tiqn **tiqn_out)
+{
+ struct se_portal_group *se_tpg = container_of(to_config_group(item),
+ struct se_portal_group, tpg_group);
+ struct iscsi_portal_group *tpg =
+ (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
+ int ret;
+
+ if (!tpg) {
+ pr_err("Unable to locate struct iscsi_portal_group "
+ "pointer\n");
+ return NULL;
+ }
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return NULL;
+
+ *tiqn_out = tpg->tpg_tiqn;
+ return tpg;
+}
+
+/* Start items for lio_target_portal_cit */
+
+static ssize_t lio_target_np_show_sctp(
+ struct se_tpg_np *se_tpg_np,
+ char *page)
+{
+ struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+ struct iscsi_tpg_np, se_tpg_np);
+ struct iscsi_tpg_np *tpg_np_sctp;
+ ssize_t rb;
+
+ tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+ if (tpg_np_sctp)
+ rb = sprintf(page, "1\n");
+ else
+ rb = sprintf(page, "0\n");
+
+ return rb;
+}
+
+static ssize_t lio_target_np_store_sctp(
+ struct se_tpg_np *se_tpg_np,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_np *np;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+ struct iscsi_tpg_np, se_tpg_np);
+ struct iscsi_tpg_np *tpg_np_sctp = NULL;
+ char *endptr;
+ u32 op;
+ int ret;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %u\n", op);
+ return -EINVAL;
+ }
+ np = tpg_np->tpg_np;
+ if (!np) {
+ pr_err("Unable to locate struct iscsi_np from"
+ " struct iscsi_tpg_np\n");
+ return -EINVAL;
+ }
+
+ tpg = tpg_np->tpg;
+ if (iscsit_get_tpg(tpg) < 0)
+ return -EINVAL;
+
+ if (op) {
+ /*
+ * Use existing np->np_sockaddr for SCTP network portal reference
+ */
+ tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+ np->np_ip, tpg_np, ISCSI_SCTP_TCP);
+ if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
+ goto out;
+ } else {
+ tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+ if (!tpg_np_sctp)
+ goto out;
+
+ ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
+ if (ret < 0)
+ goto out;
+ }
+
+ iscsit_put_tpg(tpg);
+ return count;
+out:
+ iscsit_put_tpg(tpg);
+ return -EINVAL;
+}
+
+TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_portal_attrs[] = {
+ &lio_target_np_sctp.attr,
+ NULL,
+};
+
+/* Stop items for lio_target_portal_cit */
+
+/* Start items for lio_target_np_cit */
+
+#define MAX_PORTAL_LEN 256
+
+struct se_tpg_np *lio_target_call_addnptotpg(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ char *str, *str2, *ip_str, *port_str;
+ struct __kernel_sockaddr_storage sockaddr;
+ struct sockaddr_in *sock_in;
+ struct sockaddr_in6 *sock_in6;
+ unsigned long port;
+ int ret;
+ char buf[MAX_PORTAL_LEN + 1];
+
+ if (strlen(name) > MAX_PORTAL_LEN) {
+ pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
+ (int)strlen(name), MAX_PORTAL_LEN);
+ return ERR_PTR(-EOVERFLOW);
+ }
+ memset(buf, 0, MAX_PORTAL_LEN + 1);
+ snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
+
+ memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
+
+ str = strstr(buf, "[");
+ if (str) {
+ const char *end;
+
+ str2 = strstr(str, "]");
+ if (!str2) {
+ pr_err("Unable to locate trailing \"]\""
+ " in IPv6 iSCSI network portal address\n");
+ return ERR_PTR(-EINVAL);
+ }
+ str++; /* Skip over leading "[" */
+ *str2 = '\0'; /* Terminate the IPv6 address */
+ str2++; /* Skip over the "]" */
+ port_str = strstr(str2, ":");
+ if (!port_str) {
+ pr_err("Unable to locate \":port\""
+ " in IPv6 iSCSI network portal address\n");
+ return ERR_PTR(-EINVAL);
+ }
+ *port_str = '\0'; /* Terminate string for IP */
+ port_str++; /* Skip over ":" */
+
+ ret = strict_strtoul(port_str, 0, &port);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ sock_in6 = (struct sockaddr_in6 *)&sockaddr;
+ sock_in6->sin6_family = AF_INET6;
+ sock_in6->sin6_port = htons((unsigned short)port);
+ ret = in6_pton(str, IPV6_ADDRESS_SPACE,
+ (void *)&sock_in6->sin6_addr.in6_u, -1, &end);
+ if (ret <= 0) {
+ pr_err("in6_pton returned: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ str = ip_str = &buf[0];
+ port_str = strstr(ip_str, ":");
+ if (!port_str) {
+ pr_err("Unable to locate \":port\""
+ " in IPv4 iSCSI network portal address\n");
+ return ERR_PTR(-EINVAL);
+ }
+ *port_str = '\0'; /* Terminate string for IP */
+ port_str++; /* Skip over ":" */
+
+ ret = strict_strtoul(port_str, 0, &port);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ sock_in = (struct sockaddr_in *)&sockaddr;
+ sock_in->sin_family = AF_INET;
+ sock_in->sin_port = htons((unsigned short)port);
+ sock_in->sin_addr.s_addr = in_aton(ip_str);
+ }
+ tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return ERR_PTR(-EINVAL);
+
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
+ " PORTAL: %s\n",
+ config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+ tpg->tpgt, name);
+ /*
+ * Assume ISCSI_TCP by default. Other network portals for other
+ * iSCSI fabrics:
+ *
+ * Traditional iSCSI over SCTP (initial support)
+ * iSER/TCP (TODO, hardware available)
+ * iSER/SCTP (TODO, software emulation with osc-iwarp)
+ * iSER/IB (TODO, hardware available)
+ *
+ * can be enabled with atributes under
+ * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
+ *
+ */
+ tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
+ ISCSI_TCP);
+ if (IS_ERR(tpg_np)) {
+ iscsit_put_tpg(tpg);
+ return ERR_PTR(PTR_ERR(tpg_np));
+ }
+ pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
+
+ iscsit_put_tpg(tpg);
+ return &tpg_np->se_tpg_np;
+}
+
+static void lio_target_call_delnpfromtpg(
+ struct se_tpg_np *se_tpg_np)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ struct se_portal_group *se_tpg;
+ int ret;
+
+ tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
+ tpg = tpg_np->tpg;
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return;
+
+ se_tpg = &tpg->tpg_se_tpg;
+ pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
+ " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+ tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
+
+ ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
+ if (ret < 0)
+ goto out;
+
+ pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
+out:
+ iscsit_put_tpg(tpg);
+}
+
+/* End items for lio_target_np_cit */
+
+/* Start items for lio_target_nacl_attrib_cit */
+
+#define DEF_NACL_ATTRIB(name) \
+static ssize_t iscsi_nacl_attrib_show_##name( \
+ struct se_node_acl *se_nacl, \
+ char *page) \
+{ \
+ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+ se_node_acl); \
+ \
+ return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
+} \
+ \
+static ssize_t iscsi_nacl_attrib_store_##name( \
+ struct se_node_acl *se_nacl, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+ se_node_acl); \
+ char *endptr; \
+ u32 val; \
+ int ret; \
+ \
+ val = simple_strtoul(page, &endptr, 0); \
+ ret = iscsit_na_##name(nacl, val); \
+ if (ret < 0) \
+ return ret; \
+ \
+ return count; \
+}
+
+#define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode);
+/*
+ * Define iscsi_node_attrib_s_dataout_timeout
+ */
+DEF_NACL_ATTRIB(dataout_timeout);
+NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_dataout_timeout_retries
+ */
+DEF_NACL_ATTRIB(dataout_timeout_retries);
+NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_default_erl
+ */
+DEF_NACL_ATTRIB(default_erl);
+NACL_ATTR(default_erl, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_nopin_timeout
+ */
+DEF_NACL_ATTRIB(nopin_timeout);
+NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_nopin_response_timeout
+ */
+DEF_NACL_ATTRIB(nopin_response_timeout);
+NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_datain_pdu_offsets
+ */
+DEF_NACL_ATTRIB(random_datain_pdu_offsets);
+NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_datain_seq_offsets
+ */
+DEF_NACL_ATTRIB(random_datain_seq_offsets);
+NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_r2t_offsets
+ */
+DEF_NACL_ATTRIB(random_r2t_offsets);
+NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
+ &iscsi_nacl_attrib_dataout_timeout.attr,
+ &iscsi_nacl_attrib_dataout_timeout_retries.attr,
+ &iscsi_nacl_attrib_default_erl.attr,
+ &iscsi_nacl_attrib_nopin_timeout.attr,
+ &iscsi_nacl_attrib_nopin_response_timeout.attr,
+ &iscsi_nacl_attrib_random_datain_pdu_offsets.attr,
+ &iscsi_nacl_attrib_random_datain_seq_offsets.attr,
+ &iscsi_nacl_attrib_random_r2t_offsets.attr,
+ NULL,
+};
+
+/* End items for lio_target_nacl_attrib_cit */
+
+/* Start items for lio_target_nacl_auth_cit */
+
+#define __DEF_NACL_AUTH_STR(prefix, name, flags) \
+static ssize_t __iscsi_##prefix##_show_##name( \
+ struct iscsi_node_acl *nacl, \
+ char *page) \
+{ \
+ struct iscsi_node_auth *auth = &nacl->node_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
+} \
+ \
+static ssize_t __iscsi_##prefix##_store_##name( \
+ struct iscsi_node_acl *nacl, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_node_auth *auth = &nacl->node_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ snprintf(auth->name, PAGE_SIZE, "%s", page); \
+ if (!strncmp("NULL", auth->name, 4)) \
+ auth->naf_flags &= ~flags; \
+ else \
+ auth->naf_flags |= flags; \
+ \
+ if ((auth->naf_flags & NAF_USERID_IN_SET) && \
+ (auth->naf_flags & NAF_PASSWORD_IN_SET)) \
+ auth->authenticate_target = 1; \
+ else \
+ auth->authenticate_target = 0; \
+ \
+ return count; \
+}
+
+#define __DEF_NACL_AUTH_INT(prefix, name) \
+static ssize_t __iscsi_##prefix##_show_##name( \
+ struct iscsi_node_acl *nacl, \
+ char *page) \
+{ \
+ struct iscsi_node_auth *auth = &nacl->node_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
+}
+
+#define DEF_NACL_AUTH_STR(name, flags) \
+ __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
+static ssize_t iscsi_nacl_auth_show_##name( \
+ struct se_node_acl *nacl, \
+ char *page) \
+{ \
+ return __iscsi_nacl_auth_show_##name(container_of(nacl, \
+ struct iscsi_node_acl, se_node_acl), page); \
+} \
+static ssize_t iscsi_nacl_auth_store_##name( \
+ struct se_node_acl *nacl, \
+ const char *page, \
+ size_t count) \
+{ \
+ return __iscsi_nacl_auth_store_##name(container_of(nacl, \
+ struct iscsi_node_acl, se_node_acl), page, count); \
+}
+
+#define DEF_NACL_AUTH_INT(name) \
+ __DEF_NACL_AUTH_INT(nacl_auth, name) \
+static ssize_t iscsi_nacl_auth_show_##name( \
+ struct se_node_acl *nacl, \
+ char *page) \
+{ \
+ return __iscsi_nacl_auth_show_##name(container_of(nacl, \
+ struct iscsi_node_acl, se_node_acl), page); \
+}
+
+#define AUTH_ATTR(_name, _mode) TF_NACL_AUTH_ATTR(iscsi, _name, _mode);
+#define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name);
+
+/*
+ * One-way authentication userid
+ */
+DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
+AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+/*
+ * One-way authentication password
+ */
+DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
+AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+/*
+ * Enforce mutual authentication
+ */
+DEF_NACL_AUTH_INT(authenticate_target);
+AUTH_ATTR_RO(authenticate_target);
+/*
+ * Mutual authentication userid
+ */
+DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+/*
+ * Mutual authentication password
+ */
+DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
+ &iscsi_nacl_auth_userid.attr,
+ &iscsi_nacl_auth_password.attr,
+ &iscsi_nacl_auth_authenticate_target.attr,
+ &iscsi_nacl_auth_userid_mutual.attr,
+ &iscsi_nacl_auth_password_mutual.attr,
+ NULL,
+};
+
+/* End items for lio_target_nacl_auth_cit */
+
+/* Start items for lio_target_nacl_param_cit */
+
+#define DEF_NACL_PARAM(name) \
+static ssize_t iscsi_nacl_param_show_##name( \
+ struct se_node_acl *se_nacl, \
+ char *page) \
+{ \
+ struct iscsi_session *sess; \
+ struct se_session *se_sess; \
+ ssize_t rb; \
+ \
+ spin_lock_bh(&se_nacl->nacl_sess_lock); \
+ se_sess = se_nacl->nacl_sess; \
+ if (!se_sess) { \
+ rb = snprintf(page, PAGE_SIZE, \
+ "No Active iSCSI Session\n"); \
+ } else { \
+ sess = se_sess->fabric_sess_ptr; \
+ rb = snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)sess->sess_ops->name); \
+ } \
+ spin_unlock_bh(&se_nacl->nacl_sess_lock); \
+ \
+ return rb; \
+}
+
+#define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name);
+
+DEF_NACL_PARAM(MaxConnections);
+NACL_PARAM_ATTR(MaxConnections);
+
+DEF_NACL_PARAM(InitialR2T);
+NACL_PARAM_ATTR(InitialR2T);
+
+DEF_NACL_PARAM(ImmediateData);
+NACL_PARAM_ATTR(ImmediateData);
+
+DEF_NACL_PARAM(MaxBurstLength);
+NACL_PARAM_ATTR(MaxBurstLength);
+
+DEF_NACL_PARAM(FirstBurstLength);
+NACL_PARAM_ATTR(FirstBurstLength);
+
+DEF_NACL_PARAM(DefaultTime2Wait);
+NACL_PARAM_ATTR(DefaultTime2Wait);
+
+DEF_NACL_PARAM(DefaultTime2Retain);
+NACL_PARAM_ATTR(DefaultTime2Retain);
+
+DEF_NACL_PARAM(MaxOutstandingR2T);
+NACL_PARAM_ATTR(MaxOutstandingR2T);
+
+DEF_NACL_PARAM(DataPDUInOrder);
+NACL_PARAM_ATTR(DataPDUInOrder);
+
+DEF_NACL_PARAM(DataSequenceInOrder);
+NACL_PARAM_ATTR(DataSequenceInOrder);
+
+DEF_NACL_PARAM(ErrorRecoveryLevel);
+NACL_PARAM_ATTR(ErrorRecoveryLevel);
+
+static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
+ &iscsi_nacl_param_MaxConnections.attr,
+ &iscsi_nacl_param_InitialR2T.attr,
+ &iscsi_nacl_param_ImmediateData.attr,
+ &iscsi_nacl_param_MaxBurstLength.attr,
+ &iscsi_nacl_param_FirstBurstLength.attr,
+ &iscsi_nacl_param_DefaultTime2Wait.attr,
+ &iscsi_nacl_param_DefaultTime2Retain.attr,
+ &iscsi_nacl_param_MaxOutstandingR2T.attr,
+ &iscsi_nacl_param_DataPDUInOrder.attr,
+ &iscsi_nacl_param_DataSequenceInOrder.attr,
+ &iscsi_nacl_param_ErrorRecoveryLevel.attr,
+ NULL,
+};
+
+/* End items for lio_target_nacl_param_cit */
+
+/* Start items for lio_target_acl_cit */
+
+static ssize_t lio_target_nacl_show_info(
+ struct se_node_acl *se_nacl,
+ char *page)
+{
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ struct se_session *se_sess;
+ ssize_t rb = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (!se_sess) {
+ rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
+ " Endpoint: %s\n", se_nacl->initiatorname);
+ } else {
+ sess = se_sess->fabric_sess_ptr;
+
+ if (sess->sess_ops->InitiatorName)
+ rb += sprintf(page+rb, "InitiatorName: %s\n",
+ sess->sess_ops->InitiatorName);
+ if (sess->sess_ops->InitiatorAlias)
+ rb += sprintf(page+rb, "InitiatorAlias: %s\n",
+ sess->sess_ops->InitiatorAlias);
+
+ rb += sprintf(page+rb, "LIO Session ID: %u "
+ "ISID: 0x%02x %02x %02x %02x %02x %02x "
+ "TSIH: %hu ", sess->sid,
+ sess->isid[0], sess->isid[1], sess->isid[2],
+ sess->isid[3], sess->isid[4], sess->isid[5],
+ sess->tsih);
+ rb += sprintf(page+rb, "SessionType: %s\n",
+ (sess->sess_ops->SessionType) ?
+ "Discovery" : "Normal");
+ rb += sprintf(page+rb, "Session State: ");
+ switch (sess->session_state) {
+ case TARG_SESS_STATE_FREE:
+ rb += sprintf(page+rb, "TARG_SESS_FREE\n");
+ break;
+ case TARG_SESS_STATE_ACTIVE:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
+ break;
+ case TARG_SESS_STATE_LOGGED_IN:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
+ break;
+ case TARG_SESS_STATE_FAILED:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
+ break;
+ case TARG_SESS_STATE_IN_CONTINUE:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+ break;
+ default:
+ rb += sprintf(page+rb, "ERROR: Unknown Session"
+ " State!\n");
+ break;
+ }
+
+ rb += sprintf(page+rb, "---------------------[iSCSI Session"
+ " Values]-----------------------\n");
+ rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
+ " : MaxCmdSN : ITT : TTT\n");
+ rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
+ " 0x%08x 0x%08x\n",
+ sess->cmdsn_window,
+ (sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
+ sess->exp_cmd_sn, sess->max_cmd_sn,
+ sess->init_task_tag, sess->targ_xfer_tag);
+ rb += sprintf(page+rb, "----------------------[iSCSI"
+ " Connections]-------------------------\n");
+
+ spin_lock(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ rb += sprintf(page+rb, "CID: %hu Connection"
+ " State: ", conn->cid);
+ switch (conn->conn_state) {
+ case TARG_CONN_STATE_FREE:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_FREE\n");
+ break;
+ case TARG_CONN_STATE_XPT_UP:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_XPT_UP\n");
+ break;
+ case TARG_CONN_STATE_IN_LOGIN:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_IN_LOGIN\n");
+ break;
+ case TARG_CONN_STATE_LOGGED_IN:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_LOGGED_IN\n");
+ break;
+ case TARG_CONN_STATE_IN_LOGOUT:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_IN_LOGOUT\n");
+ break;
+ case TARG_CONN_STATE_LOGOUT_REQUESTED:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_LOGOUT_REQUESTED\n");
+ break;
+ case TARG_CONN_STATE_CLEANUP_WAIT:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_CLEANUP_WAIT\n");
+ break;
+ default:
+ rb += sprintf(page+rb,
+ "ERROR: Unknown Connection State!\n");
+ break;
+ }
+
+ rb += sprintf(page+rb, " Address %s %s", conn->login_ip,
+ (conn->network_transport == ISCSI_TCP) ?
+ "TCP" : "SCTP");
+ rb += sprintf(page+rb, " StatSN: 0x%08x\n",
+ conn->stat_sn);
+ }
+ spin_unlock(&sess->conn_lock);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return rb;
+}
+
+TF_NACL_BASE_ATTR_RO(lio_target, info);
+
+static ssize_t lio_target_nacl_show_cmdsn_depth(
+ struct se_node_acl *se_nacl,
+ char *page)
+{
+ return sprintf(page, "%u\n", se_nacl->queue_depth);
+}
+
+static ssize_t lio_target_nacl_store_cmdsn_depth(
+ struct se_node_acl *se_nacl,
+ const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ struct config_item *acl_ci, *tpg_ci, *wwn_ci;
+ char *endptr;
+ u32 cmdsn_depth = 0;
+ int ret;
+
+ cmdsn_depth = simple_strtoul(page, &endptr, 0);
+ if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+ pr_err("Passed cmdsn_depth: %u exceeds"
+ " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
+ TA_DEFAULT_CMDSN_DEPTH_MAX);
+ return -EINVAL;
+ }
+ acl_ci = &se_nacl->acl_group.cg_item;
+ if (!acl_ci) {
+ pr_err("Unable to locatel acl_ci\n");
+ return -EINVAL;
+ }
+ tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
+ if (!tpg_ci) {
+ pr_err("Unable to locate tpg_ci\n");
+ return -EINVAL;
+ }
+ wwn_ci = &tpg_ci->ci_group->cg_item;
+ if (!wwn_ci) {
+ pr_err("Unable to locate config_item wwn_ci\n");
+ return -EINVAL;
+ }
+
+ if (iscsit_get_tpg(tpg) < 0)
+ return -EINVAL;
+ /*
+ * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
+ */
+ ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
+ config_item_name(acl_ci), cmdsn_depth, 1);
+
+ pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
+ "InitiatorName: %s\n", config_item_name(wwn_ci),
+ config_item_name(tpg_ci), cmdsn_depth,
+ config_item_name(acl_ci));
+
+ iscsit_put_tpg(tpg);
+ return (!ret) ? count : (ssize_t)ret;
+}
+
+TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_initiator_attrs[] = {
+ &lio_target_nacl_info.attr,
+ &lio_target_nacl_cmdsn_depth.attr,
+ NULL,
+};
+
+static struct se_node_acl *lio_tpg_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_node_acl *acl;
+
+ acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
+ if (!acl) {
+ pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
+ return NULL;
+ }
+
+ return &acl->se_node_acl;
+}
+
+static struct se_node_acl *lio_target_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct config_group *stats_cg;
+ struct iscsi_node_acl *acl;
+ struct se_node_acl *se_nacl_new, *se_nacl;
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ u32 cmdsn_depth;
+
+ se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+
+ acl = container_of(se_nacl_new, struct iscsi_node_acl,
+ se_node_acl);
+
+ cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NdoeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, cmdsn_depth);
+ if (IS_ERR(se_nacl))
+ return se_nacl;
+
+ stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
+
+ stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!stats_cg->default_groups) {
+ pr_err("Unable to allocate memory for"
+ " stats_cg->default_groups\n");
+ core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
+ kfree(acl);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
+ stats_cg->default_groups[1] = NULL;
+ config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
+ "iscsi_sess_stats", &iscsi_stat_sess_cit);
+
+ return se_nacl;
+}
+
+static void lio_target_drop_nodeacl(
+ struct se_node_acl *se_nacl)
+{
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct iscsi_node_acl *acl = container_of(se_nacl,
+ struct iscsi_node_acl, se_node_acl);
+ struct config_item *df_item;
+ struct config_group *stats_cg;
+ int i;
+
+ stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
+ for (i = 0; stats_cg->default_groups[i]; i++) {
+ df_item = &stats_cg->default_groups[i]->cg_item;
+ stats_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(stats_cg->default_groups);
+
+ core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
+ kfree(acl);
+}
+
+/* End items for lio_target_acl_cit */
+
+/* Start items for lio_target_tpg_attrib_cit */
+
+#define DEF_TPG_ATTRIB(name) \
+ \
+static ssize_t iscsi_tpg_attrib_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ ssize_t rb; \
+ \
+ if (iscsit_get_tpg(tpg) < 0) \
+ return -EINVAL; \
+ \
+ rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
+ iscsit_put_tpg(tpg); \
+ return rb; \
+} \
+ \
+static ssize_t iscsi_tpg_attrib_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ char *endptr; \
+ u32 val; \
+ int ret; \
+ \
+ if (iscsit_get_tpg(tpg) < 0) \
+ return -EINVAL; \
+ \
+ val = simple_strtoul(page, &endptr, 0); \
+ ret = iscsit_ta_##name(tpg, val); \
+ if (ret < 0) \
+ goto out; \
+ \
+ iscsit_put_tpg(tpg); \
+ return count; \
+out: \
+ iscsit_put_tpg(tpg); \
+ return ret; \
+}
+
+#define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode);
+
+/*
+ * Define iscsi_tpg_attrib_s_authentication
+ */
+DEF_TPG_ATTRIB(authentication);
+TPG_ATTR(authentication, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_login_timeout
+ */
+DEF_TPG_ATTRIB(login_timeout);
+TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_netif_timeout
+ */
+DEF_TPG_ATTRIB(netif_timeout);
+TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_generate_node_acls
+ */
+DEF_TPG_ATTRIB(generate_node_acls);
+TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_default_cmdsn_depth
+ */
+DEF_TPG_ATTRIB(default_cmdsn_depth);
+TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR);
+/*
+ Define iscsi_tpg_attrib_s_cache_dynamic_acls
+ */
+DEF_TPG_ATTRIB(cache_dynamic_acls);
+TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_TPG_ATTRIB(demo_mode_write_protect);
+TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_TPG_ATTRIB(prod_mode_write_protect);
+TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
+ &iscsi_tpg_attrib_authentication.attr,
+ &iscsi_tpg_attrib_login_timeout.attr,
+ &iscsi_tpg_attrib_netif_timeout.attr,
+ &iscsi_tpg_attrib_generate_node_acls.attr,
+ &iscsi_tpg_attrib_default_cmdsn_depth.attr,
+ &iscsi_tpg_attrib_cache_dynamic_acls.attr,
+ &iscsi_tpg_attrib_demo_mode_write_protect.attr,
+ &iscsi_tpg_attrib_prod_mode_write_protect.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_attrib_cit */
+
+/* Start items for lio_target_tpg_param_cit */
+
+#define DEF_TPG_PARAM(name) \
+static ssize_t iscsi_tpg_param_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_param *param; \
+ ssize_t rb; \
+ \
+ if (iscsit_get_tpg(tpg) < 0) \
+ return -EINVAL; \
+ \
+ param = iscsi_find_param_from_key(__stringify(name), \
+ tpg->param_list); \
+ if (!param) { \
+ iscsit_put_tpg(tpg); \
+ return -EINVAL; \
+ } \
+ rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
+ \
+ iscsit_put_tpg(tpg); \
+ return rb; \
+} \
+static ssize_t iscsi_tpg_param_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ char *buf; \
+ int ret; \
+ \
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
+ if (!buf) \
+ return -ENOMEM; \
+ snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
+ buf[strlen(buf)-1] = '\0'; /* Kill newline */ \
+ \
+ if (iscsit_get_tpg(tpg) < 0) { \
+ kfree(buf); \
+ return -EINVAL; \
+ } \
+ \
+ ret = iscsi_change_param_value(buf, tpg->param_list, 1); \
+ if (ret < 0) \
+ goto out; \
+ \
+ kfree(buf); \
+ iscsit_put_tpg(tpg); \
+ return count; \
+out: \
+ kfree(buf); \
+ iscsit_put_tpg(tpg); \
+ return -EINVAL; \
+}
+
+#define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
+
+DEF_TPG_PARAM(AuthMethod);
+TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(HeaderDigest);
+TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataDigest);
+TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxConnections);
+TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(TargetAlias);
+TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(InitialR2T);
+TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(ImmediateData);
+TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxRecvDataSegmentLength);
+TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxBurstLength);
+TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(FirstBurstLength);
+TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DefaultTime2Wait);
+TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DefaultTime2Retain);
+TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxOutstandingR2T);
+TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataPDUInOrder);
+TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataSequenceInOrder);
+TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(ErrorRecoveryLevel);
+TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(IFMarker);
+TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(OFMarker);
+TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(IFMarkInt);
+TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(OFMarkInt);
+TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
+ &iscsi_tpg_param_AuthMethod.attr,
+ &iscsi_tpg_param_HeaderDigest.attr,
+ &iscsi_tpg_param_DataDigest.attr,
+ &iscsi_tpg_param_MaxConnections.attr,
+ &iscsi_tpg_param_TargetAlias.attr,
+ &iscsi_tpg_param_InitialR2T.attr,
+ &iscsi_tpg_param_ImmediateData.attr,
+ &iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
+ &iscsi_tpg_param_MaxBurstLength.attr,
+ &iscsi_tpg_param_FirstBurstLength.attr,
+ &iscsi_tpg_param_DefaultTime2Wait.attr,
+ &iscsi_tpg_param_DefaultTime2Retain.attr,
+ &iscsi_tpg_param_MaxOutstandingR2T.attr,
+ &iscsi_tpg_param_DataPDUInOrder.attr,
+ &iscsi_tpg_param_DataSequenceInOrder.attr,
+ &iscsi_tpg_param_ErrorRecoveryLevel.attr,
+ &iscsi_tpg_param_IFMarker.attr,
+ &iscsi_tpg_param_OFMarker.attr,
+ &iscsi_tpg_param_IFMarkInt.attr,
+ &iscsi_tpg_param_OFMarkInt.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_param_cit */
+
+/* Start items for lio_target_tpg_cit */
+
+static ssize_t lio_target_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ ssize_t len;
+
+ spin_lock(&tpg->tpg_state_lock);
+ len = sprintf(page, "%d\n",
+ (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
+ spin_unlock(&tpg->tpg_state_lock);
+
+ return len;
+}
+
+static ssize_t lio_target_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ char *endptr;
+ u32 op;
+ int ret = 0;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %u\n", op);
+ return -EINVAL;
+ }
+
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (op) {
+ ret = iscsit_tpg_enable_portal_group(tpg);
+ if (ret < 0)
+ goto out;
+ } else {
+ /*
+ * iscsit_tpg_disable_portal_group() assumes force=1
+ */
+ ret = iscsit_tpg_disable_portal_group(tpg, 1);
+ if (ret < 0)
+ goto out;
+ }
+
+ iscsit_put_tpg(tpg);
+ return count;
+out:
+ iscsit_put_tpg(tpg);
+ return -EINVAL;
+}
+
+TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_attrs[] = {
+ &lio_target_tpg_enable.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_cit */
+
+/* Start items for lio_target_tiqn_cit */
+
+struct se_portal_group *lio_target_tiqn_addtpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tiqn *tiqn;
+ char *tpgt_str, *end_ptr;
+ int ret = 0;
+ unsigned short int tpgt;
+
+ tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+ /*
+ * Only tpgt_# directory groups can be created below
+ * target/iscsi/iqn.superturodiskarry/
+ */
+ tpgt_str = strstr(name, "tpgt_");
+ if (!tpgt_str) {
+ pr_err("Unable to locate \"tpgt_#\" directory"
+ " group\n");
+ return NULL;
+ }
+ tpgt_str += 5; /* Skip ahead of "tpgt_" */
+ tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+
+ tpg = iscsit_alloc_portal_group(tiqn, tpgt);
+ if (!tpg)
+ return NULL;
+
+ ret = core_tpg_register(
+ &lio_target_fabric_configfs->tf_ops,
+ wwn, &tpg->tpg_se_tpg, (void *)tpg,
+ TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0)
+ return NULL;
+
+ ret = iscsit_tpg_add_portal_group(tiqn, tpg);
+ if (ret != 0)
+ goto out;
+
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
+ name);
+ return &tpg->tpg_se_tpg;
+out:
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+ kfree(tpg);
+ return NULL;
+}
+
+void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tiqn *tiqn;
+
+ tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tiqn = tpg->tpg_tiqn;
+ /*
+ * iscsit_tpg_del_portal_group() assumes force=1
+ */
+ pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
+ iscsit_tpg_del_portal_group(tiqn, tpg, 1);
+}
+
+/* End items for lio_target_tiqn_cit */
+
+/* Start LIO-Target TIQN struct contig_item lio_target_cit */
+
+static ssize_t lio_target_wwn_show_attr_lio_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
+}
+
+TF_WWN_ATTR_RO(lio_target, lio_version);
+
+static struct configfs_attribute *lio_target_wwn_attrs[] = {
+ &lio_target_wwn_lio_version.attr,
+ NULL,
+};
+
+struct se_wwn *lio_target_call_coreaddtiqn(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct config_group *stats_cg;
+ struct iscsi_tiqn *tiqn;
+
+ tiqn = iscsit_add_tiqn((unsigned char *)name);
+ if (IS_ERR(tiqn))
+ return ERR_PTR(PTR_ERR(tiqn));
+ /*
+ * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
+ */
+ stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+
+ stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+ GFP_KERNEL);
+ if (!stats_cg->default_groups) {
+ pr_err("Unable to allocate memory for"
+ " stats_cg->default_groups\n");
+ iscsit_del_tiqn(tiqn);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
+ stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
+ stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
+ stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
+ stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
+ stats_cg->default_groups[5] = NULL;
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
+ "iscsi_instance", &iscsi_stat_instance_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
+ "iscsi_sess_err", &iscsi_stat_sess_err_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
+ "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
+ "iscsi_login_stats", &iscsi_stat_login_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
+ "iscsi_logout_stats", &iscsi_stat_logout_cit);
+
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+ " %s\n", name);
+ return &tiqn->tiqn_wwn;
+}
+
+void lio_target_call_coredeltiqn(
+ struct se_wwn *wwn)
+{
+ struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+ struct config_item *df_item;
+ struct config_group *stats_cg;
+ int i;
+
+ stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+ for (i = 0; stats_cg->default_groups[i]; i++) {
+ df_item = &stats_cg->default_groups[i]->cg_item;
+ stats_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(stats_cg->default_groups);
+
+ pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
+ tiqn->tiqn);
+ iscsit_del_tiqn(tiqn);
+}
+
+/* End LIO-Target TIQN struct contig_lio_target_cit */
+
+/* Start lio_target_discovery_auth_cit */
+
+#define DEF_DISC_AUTH_STR(name, flags) \
+ __DEF_NACL_AUTH_STR(disc, name, flags) \
+static ssize_t iscsi_disc_show_##name( \
+ struct target_fabric_configfs *tf, \
+ char *page) \
+{ \
+ return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
+ page); \
+} \
+static ssize_t iscsi_disc_store_##name( \
+ struct target_fabric_configfs *tf, \
+ const char *page, \
+ size_t count) \
+{ \
+ return __iscsi_disc_store_##name(&iscsit_global->discovery_acl, \
+ page, count); \
+}
+
+#define DEF_DISC_AUTH_INT(name) \
+ __DEF_NACL_AUTH_INT(disc, name) \
+static ssize_t iscsi_disc_show_##name( \
+ struct target_fabric_configfs *tf, \
+ char *page) \
+{ \
+ return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
+ page); \
+}
+
+#define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode)
+#define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name)
+
+/*
+ * One-way authentication userid
+ */
+DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
+DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+/*
+ * One-way authentication password
+ */
+DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
+DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+/*
+ * Enforce mutual authentication
+ */
+DEF_DISC_AUTH_INT(authenticate_target);
+DISC_AUTH_ATTR_RO(authenticate_target);
+/*
+ * Mutual authentication userid
+ */
+DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+/*
+ * Mutual authentication password
+ */
+DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+
+/*
+ * enforce_discovery_auth
+ */
+static ssize_t iscsi_disc_show_enforce_discovery_auth(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
+
+ return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
+}
+
+static ssize_t iscsi_disc_store_enforce_discovery_auth(
+ struct target_fabric_configfs *tf,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_param *param;
+ struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
+ char *endptr;
+ u32 op;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for enforce_discovery_auth:"
+ " %u\n", op);
+ return -EINVAL;
+ }
+
+ if (!discovery_tpg) {
+ pr_err("iscsit_global->discovery_tpg is NULL\n");
+ return -EINVAL;
+ }
+
+ param = iscsi_find_param_from_key(AUTHMETHOD,
+ discovery_tpg->param_list);
+ if (!param)
+ return -EINVAL;
+
+ if (op) {
+ /*
+ * Reset the AuthMethod key to CHAP.
+ */
+ if (iscsi_update_param_value(param, CHAP) < 0)
+ return -EINVAL;
+
+ discovery_tpg->tpg_attrib.authentication = 1;
+ iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
+ pr_debug("LIO-CORE[0] Successfully enabled"
+ " authentication enforcement for iSCSI"
+ " Discovery TPG\n");
+ } else {
+ /*
+ * Reset the AuthMethod key to CHAP,None
+ */
+ if (iscsi_update_param_value(param, "CHAP,None") < 0)
+ return -EINVAL;
+
+ discovery_tpg->tpg_attrib.authentication = 0;
+ iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
+ pr_debug("LIO-CORE[0] Successfully disabled"
+ " authentication enforcement for iSCSI"
+ " Discovery TPG\n");
+ }
+
+ return count;
+}
+
+DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
+ &iscsi_disc_userid.attr,
+ &iscsi_disc_password.attr,
+ &iscsi_disc_authenticate_target.attr,
+ &iscsi_disc_userid_mutual.attr,
+ &iscsi_disc_password_mutual.attr,
+ &iscsi_disc_enforce_discovery_auth.attr,
+ NULL,
+};
+
+/* End lio_target_discovery_auth_cit */
+
+/* Start functions for target_core_fabric_ops */
+
+static char *iscsi_get_fabric_name(void)
+{
+ return "iSCSI";
+}
+
+static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ return cmd->init_task_tag;
+}
+
+static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ return cmd->i_state;
+}
+
+static int iscsi_is_state_remove(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ return (cmd->i_state == ISTATE_REMOVE);
+}
+
+static int lio_sess_logged_in(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ int ret;
+ /*
+ * Called with spin_lock_bh(&tpg_lock); and
+ * spin_lock(&se_tpg->session_lock); held.
+ */
+ spin_lock(&sess->conn_lock);
+ ret = (sess->session_state != TARG_SESS_STATE_LOGGED_IN);
+ spin_unlock(&sess->conn_lock);
+
+ return ret;
+}
+
+static u32 lio_sess_get_index(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ return sess->session_index;
+}
+
+static u32 lio_sess_get_initiator_sid(
+ struct se_session *se_sess,
+ unsigned char *buf,
+ u32 size)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ /*
+ * iSCSI Initiator Session Identifier from RFC-3720.
+ */
+ return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
+ sess->isid[0], sess->isid[1], sess->isid[2],
+ sess->isid[3], sess->isid[4], sess->isid[5]);
+}
+
+static int lio_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_DATAIN;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static int lio_write_pending(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ if (!cmd->immediate_data && !cmd->unsolicited_data)
+ return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
+
+ return 0;
+}
+
+static int lio_write_pending_status(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ int ret;
+
+ spin_lock_bh(&cmd->istate_lock);
+ ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
+ spin_unlock_bh(&cmd->istate_lock);
+
+ return ret;
+}
+
+static int lio_queue_status(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+ unsigned char *buffer = se_cmd->sense_buffer;
+ /*
+ * From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment
+ * 16-bit SenseLength.
+ */
+ buffer[0] = ((sense_length >> 8) & 0xff);
+ buffer[1] = (sense_length & 0xff);
+ /*
+ * Return two byte offset into allocated sense_buffer.
+ */
+ return 2;
+}
+
+static u16 lio_get_fabric_sense_len(void)
+{
+ /*
+ * Return two byte offset into allocated sense_buffer.
+ */
+ return 2;
+}
+
+static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_TASKMGTRSP;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return &tpg->tpg_tiqn->tiqn[0];
+}
+
+static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return tpg->tpgt;
+}
+
+static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+}
+
+static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int lio_tpg_check_demo_mode_write_protect(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int lio_tpg_check_prod_mode_write_protect(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static void lio_tpg_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_acl)
+{
+ struct iscsi_node_acl *acl = container_of(se_acl,
+ struct iscsi_node_acl, se_node_acl);
+ kfree(acl);
+}
+
+/*
+ * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
+ *
+ * Also, this function calls iscsit_inc_session_usage_count() on the
+ * struct iscsi_session in question.
+ */
+static int lio_tpg_shutdown_session(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ return 0;
+ }
+ atomic_set(&sess->session_reinstatement, 1);
+ spin_unlock(&sess->conn_lock);
+
+ iscsit_inc_session_usage_count(sess);
+ iscsit_stop_time2retain_timer(sess);
+
+ return 1;
+}
+
+/*
+ * Calls iscsit_dec_session_usage_count() as inverse of
+ * lio_tpg_shutdown_session()
+ */
+static void lio_tpg_close_session(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ /*
+ * If the iSCSI Session for the iSCSI Initiator Node exists,
+ * forcefully shutdown the iSCSI NEXUS.
+ */
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+ iscsit_close_session(sess);
+}
+
+static void lio_tpg_stop_session(
+ struct se_session *se_sess,
+ int sess_sleep,
+ int conn_sleep)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ iscsit_stop_session(sess, sess_sleep, conn_sleep);
+}
+
+static void lio_tpg_fall_back_to_erl0(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ iscsit_fall_back_to_erl0(sess);
+}
+
+static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return tpg->tpg_tiqn->tiqn_index;
+}
+
+static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
+{
+ struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
+ se_node_acl);
+
+ ISCSI_NODE_ATTRIB(acl)->nacl = acl;
+ iscsit_set_default_node_attribues(acl);
+}
+
+static void lio_release_cmd(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ iscsit_release_cmd(cmd);
+}
+
+/* End functions for target_core_fabric_ops */
+
+int iscsi_target_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ int ret;
+
+ lio_target_fabric_configfs = NULL;
+ fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
+ if (IS_ERR(fabric)) {
+ pr_err("target_fabric_configfs_init() for"
+ " LIO-Target failed!\n");
+ return PTR_ERR(fabric);
+ }
+ /*
+ * Setup the fabric API of function pointers used by target_core_mod..
+ */
+ fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
+ fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
+ fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
+ fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
+ fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
+ fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
+ fabric->tf_ops.tpg_get_pr_transport_id_len =
+ &iscsi_get_pr_transport_id_len;
+ fabric->tf_ops.tpg_parse_pr_out_transport_id =
+ &iscsi_parse_pr_out_transport_id;
+ fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
+ fabric->tf_ops.tpg_check_demo_mode_cache =
+ &lio_tpg_check_demo_mode_cache;
+ fabric->tf_ops.tpg_check_demo_mode_write_protect =
+ &lio_tpg_check_demo_mode_write_protect;
+ fabric->tf_ops.tpg_check_prod_mode_write_protect =
+ &lio_tpg_check_prod_mode_write_protect;
+ fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
+ fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
+ fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
+ fabric->tf_ops.release_cmd = &lio_release_cmd;
+ fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
+ fabric->tf_ops.close_session = &lio_tpg_close_session;
+ fabric->tf_ops.stop_session = &lio_tpg_stop_session;
+ fabric->tf_ops.fall_back_to_erl0 = &lio_tpg_fall_back_to_erl0;
+ fabric->tf_ops.sess_logged_in = &lio_sess_logged_in;
+ fabric->tf_ops.sess_get_index = &lio_sess_get_index;
+ fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
+ fabric->tf_ops.write_pending = &lio_write_pending;
+ fabric->tf_ops.write_pending_status = &lio_write_pending_status;
+ fabric->tf_ops.set_default_node_attributes =
+ &lio_set_default_node_attributes;
+ fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
+ fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
+ fabric->tf_ops.queue_data_in = &lio_queue_data_in;
+ fabric->tf_ops.queue_status = &lio_queue_status;
+ fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
+ fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
+ fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
+ fabric->tf_ops.is_state_remove = &iscsi_is_state_remove;
+ /*
+ * Setup function pointers for generic logic in target_core_fabric_configfs.c
+ */
+ fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
+ fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
+ fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
+ fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
+ fabric->tf_ops.fabric_post_link = NULL;
+ fabric->tf_ops.fabric_pre_unlink = NULL;
+ fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
+ fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
+ fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
+ fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ * sturct config_item_type's
+ */
+ TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
+
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() for"
+ " LIO-Target failed!\n");
+ target_fabric_configfs_free(fabric);
+ return ret;
+ }
+
+ lio_target_fabric_configfs = fabric;
+ pr_debug("LIO_TARGET[0] - Set fabric ->"
+ " lio_target_fabric_configfs\n");
+ return 0;
+}
+
+
+void iscsi_target_deregister_configfs(void)
+{
+ if (!lio_target_fabric_configfs)
+ return;
+ /*
+ * Shutdown discovery sessions and disable discovery TPG
+ */
+ if (iscsit_global->discovery_tpg)
+ iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+
+ target_fabric_configfs_deregister(lio_target_fabric_configfs);
+ lio_target_fabric_configfs = NULL;
+ pr_debug("LIO_TARGET[0] - Cleared"
+ " lio_target_fabric_configfs\n");
+}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h
new file mode 100644
index 00000000000..8cd5a63c4ed
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.h
@@ -0,0 +1,7 @@
+#ifndef ISCSI_TARGET_CONFIGFS_H
+#define ISCSI_TARGET_CONFIGFS_H
+
+extern int iscsi_target_register_configfs(void);
+extern void iscsi_target_deregister_configfs(void);
+
+#endif /* ISCSI_TARGET_CONFIGFS_H */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
new file mode 100644
index 00000000000..470ed551eeb
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -0,0 +1,859 @@
+#ifndef ISCSI_TARGET_CORE_H
+#define ISCSI_TARGET_CORE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#define ISCSIT_VERSION "v4.1.0-rc1"
+#define ISCSI_MAX_DATASN_MISSING_COUNT 16
+#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
+#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
+#define SECONDS_FOR_ASYNC_LOGOUT 10
+#define SECONDS_FOR_ASYNC_TEXT 10
+#define SECONDS_FOR_LOGOUT_COMP 15
+#define WHITE_SPACE " \t\v\f\n\r"
+
+/* struct iscsi_node_attrib sanity values */
+#define NA_DATAOUT_TIMEOUT 3
+#define NA_DATAOUT_TIMEOUT_MAX 60
+#define NA_DATAOUT_TIMEOUT_MIX 2
+#define NA_DATAOUT_TIMEOUT_RETRIES 5
+#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+#define NA_NOPIN_TIMEOUT 5
+#define NA_NOPIN_TIMEOUT_MAX 60
+#define NA_NOPIN_TIMEOUT_MIN 3
+#define NA_NOPIN_RESPONSE_TIMEOUT 5
+#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
+#define NA_RANDOM_R2T_OFFSETS 0
+#define NA_DEFAULT_ERL 0
+#define NA_DEFAULT_ERL_MAX 2
+#define NA_DEFAULT_ERL_MIN 0
+
+/* struct iscsi_tpg_attrib sanity values */
+#define TA_AUTHENTICATION 1
+#define TA_LOGIN_TIMEOUT 15
+#define TA_LOGIN_TIMEOUT_MAX 30
+#define TA_LOGIN_TIMEOUT_MIN 5
+#define TA_NETIF_TIMEOUT 2
+#define TA_NETIF_TIMEOUT_MAX 15
+#define TA_NETIF_TIMEOUT_MIN 2
+#define TA_GENERATE_NODE_ACLS 0
+#define TA_DEFAULT_CMDSN_DEPTH 16
+#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
+#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
+#define TA_CACHE_DYNAMIC_ACLS 0
+/* Enabled by default in demo mode (generic_node_acls=1) */
+#define TA_DEMO_MODE_WRITE_PROTECT 1
+/* Disabled by default in production mode w/ explict ACLs */
+#define TA_PROD_MODE_WRITE_PROTECT 0
+#define TA_CACHE_CORE_NPS 0
+
+enum tpg_np_network_transport_table {
+ ISCSI_TCP = 0,
+ ISCSI_SCTP_TCP = 1,
+ ISCSI_SCTP_UDP = 2,
+ ISCSI_IWARP_TCP = 3,
+ ISCSI_IWARP_SCTP = 4,
+ ISCSI_INFINIBAND = 5,
+};
+
+/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
+enum target_conn_state_table {
+ TARG_CONN_STATE_FREE = 0x1,
+ TARG_CONN_STATE_XPT_UP = 0x3,
+ TARG_CONN_STATE_IN_LOGIN = 0x4,
+ TARG_CONN_STATE_LOGGED_IN = 0x5,
+ TARG_CONN_STATE_IN_LOGOUT = 0x6,
+ TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
+ TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
+};
+
+/* RFC-3720 7.3.2 Session State Diagram for a Target */
+enum target_sess_state_table {
+ TARG_SESS_STATE_FREE = 0x1,
+ TARG_SESS_STATE_ACTIVE = 0x2,
+ TARG_SESS_STATE_LOGGED_IN = 0x3,
+ TARG_SESS_STATE_FAILED = 0x4,
+ TARG_SESS_STATE_IN_CONTINUE = 0x5,
+};
+
+/* struct iscsi_data_count->type */
+enum data_count_type {
+ ISCSI_RX_DATA = 1,
+ ISCSI_TX_DATA = 2,
+};
+
+/* struct iscsi_datain_req->dr_complete */
+enum datain_req_comp_table {
+ DATAIN_COMPLETE_NORMAL = 1,
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
+ DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
+};
+
+/* struct iscsi_datain_req->recovery */
+enum datain_req_rec_table {
+ DATAIN_WITHIN_COMMAND_RECOVERY = 1,
+ DATAIN_CONNECTION_RECOVERY = 2,
+};
+
+/* struct iscsi_portal_group->state */
+enum tpg_state_table {
+ TPG_STATE_FREE = 0,
+ TPG_STATE_ACTIVE = 1,
+ TPG_STATE_INACTIVE = 2,
+ TPG_STATE_COLD_RESET = 3,
+};
+
+/* struct iscsi_tiqn->tiqn_state */
+enum tiqn_state_table {
+ TIQN_STATE_ACTIVE = 1,
+ TIQN_STATE_SHUTDOWN = 2,
+};
+
+/* struct iscsi_cmd->cmd_flags */
+enum cmd_flags_table {
+ ICF_GOT_LAST_DATAOUT = 0x00000001,
+ ICF_GOT_DATACK_SNACK = 0x00000002,
+ ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
+ ICF_SENT_LAST_R2T = 0x00000008,
+ ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
+ ICF_CONTIG_MEMORY = 0x00000020,
+ ICF_ATTACHED_TO_RQUEUE = 0x00000040,
+ ICF_OOO_CMDSN = 0x00000080,
+ ICF_REJECT_FAIL_CONN = 0x00000100,
+};
+
+/* struct iscsi_cmd->i_state */
+enum cmd_i_state_table {
+ ISTATE_NO_STATE = 0,
+ ISTATE_NEW_CMD = 1,
+ ISTATE_DEFERRED_CMD = 2,
+ ISTATE_UNSOLICITED_DATA = 3,
+ ISTATE_RECEIVE_DATAOUT = 4,
+ ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
+ ISTATE_RECEIVED_LAST_DATAOUT = 6,
+ ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
+ ISTATE_IN_CONNECTION_RECOVERY = 8,
+ ISTATE_RECEIVED_TASKMGT = 9,
+ ISTATE_SEND_ASYNCMSG = 10,
+ ISTATE_SENT_ASYNCMSG = 11,
+ ISTATE_SEND_DATAIN = 12,
+ ISTATE_SEND_LAST_DATAIN = 13,
+ ISTATE_SENT_LAST_DATAIN = 14,
+ ISTATE_SEND_LOGOUTRSP = 15,
+ ISTATE_SENT_LOGOUTRSP = 16,
+ ISTATE_SEND_NOPIN = 17,
+ ISTATE_SENT_NOPIN = 18,
+ ISTATE_SEND_REJECT = 19,
+ ISTATE_SENT_REJECT = 20,
+ ISTATE_SEND_R2T = 21,
+ ISTATE_SENT_R2T = 22,
+ ISTATE_SEND_R2T_RECOVERY = 23,
+ ISTATE_SENT_R2T_RECOVERY = 24,
+ ISTATE_SEND_LAST_R2T = 25,
+ ISTATE_SENT_LAST_R2T = 26,
+ ISTATE_SEND_LAST_R2T_RECOVERY = 27,
+ ISTATE_SENT_LAST_R2T_RECOVERY = 28,
+ ISTATE_SEND_STATUS = 29,
+ ISTATE_SEND_STATUS_BROKEN_PC = 30,
+ ISTATE_SENT_STATUS = 31,
+ ISTATE_SEND_STATUS_RECOVERY = 32,
+ ISTATE_SENT_STATUS_RECOVERY = 33,
+ ISTATE_SEND_TASKMGTRSP = 34,
+ ISTATE_SENT_TASKMGTRSP = 35,
+ ISTATE_SEND_TEXTRSP = 36,
+ ISTATE_SENT_TEXTRSP = 37,
+ ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
+ ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
+ ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
+ ISTATE_REMOVE = 41,
+ ISTATE_FREE = 42,
+};
+
+/* Used for iscsi_recover_cmdsn() return values */
+enum recover_cmdsn_ret_table {
+ CMDSN_ERROR_CANNOT_RECOVER = -1,
+ CMDSN_NORMAL_OPERATION = 0,
+ CMDSN_LOWER_THAN_EXP = 1,
+ CMDSN_HIGHER_THAN_EXP = 2,
+};
+
+/* Used for iscsi_handle_immediate_data() return values */
+enum immedate_data_ret_table {
+ IMMEDIATE_DATA_CANNOT_RECOVER = -1,
+ IMMEDIATE_DATA_NORMAL_OPERATION = 0,
+ IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
+};
+
+/* Used for iscsi_decide_dataout_action() return values */
+enum dataout_action_ret_table {
+ DATAOUT_CANNOT_RECOVER = -1,
+ DATAOUT_NORMAL = 0,
+ DATAOUT_SEND_R2T = 1,
+ DATAOUT_SEND_TO_TRANSPORT = 2,
+ DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
+};
+
+/* Used for struct iscsi_node_auth->naf_flags */
+enum naf_flags_table {
+ NAF_USERID_SET = 0x01,
+ NAF_PASSWORD_SET = 0x02,
+ NAF_USERID_IN_SET = 0x04,
+ NAF_PASSWORD_IN_SET = 0x08,
+};
+
+/* Used by various struct timer_list to manage iSCSI specific state */
+enum iscsi_timer_flags_table {
+ ISCSI_TF_RUNNING = 0x01,
+ ISCSI_TF_STOP = 0x02,
+ ISCSI_TF_EXPIRED = 0x04,
+};
+
+/* Used for struct iscsi_np->np_flags */
+enum np_flags_table {
+ NPF_IP_NETWORK = 0x00,
+ NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
+};
+
+/* Used for struct iscsi_np->np_thread_state */
+enum np_thread_state_table {
+ ISCSI_NP_THREAD_ACTIVE = 1,
+ ISCSI_NP_THREAD_INACTIVE = 2,
+ ISCSI_NP_THREAD_RESET = 3,
+ ISCSI_NP_THREAD_SHUTDOWN = 4,
+ ISCSI_NP_THREAD_EXIT = 5,
+};
+
+struct iscsi_conn_ops {
+ u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
+ u8 DataDigest; /* [0,1] == [None,CRC32C] */
+ u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
+ u8 OFMarker; /* [0,1] == [No,Yes] */
+ u8 IFMarker; /* [0,1] == [No,Yes] */
+ u32 OFMarkInt; /* [1..65535] */
+ u32 IFMarkInt; /* [1..65535] */
+};
+
+struct iscsi_sess_ops {
+ char InitiatorName[224];
+ char InitiatorAlias[256];
+ char TargetName[224];
+ char TargetAlias[256];
+ char TargetAddress[256];
+ u16 TargetPortalGroupTag; /* [0..65535] */
+ u16 MaxConnections; /* [1..65535] */
+ u8 InitialR2T; /* [0,1] == [No,Yes] */
+ u8 ImmediateData; /* [0,1] == [No,Yes] */
+ u32 MaxBurstLength; /* [512..2**24-1] */
+ u32 FirstBurstLength; /* [512..2**24-1] */
+ u16 DefaultTime2Wait; /* [0..3600] */
+ u16 DefaultTime2Retain; /* [0..3600] */
+ u16 MaxOutstandingR2T; /* [1..65535] */
+ u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
+ u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
+ u8 ErrorRecoveryLevel; /* [0..2] */
+ u8 SessionType; /* [0,1] == [Normal,Discovery]*/
+};
+
+struct iscsi_queue_req {
+ int state;
+ struct iscsi_cmd *cmd;
+ struct list_head qr_list;
+};
+
+struct iscsi_data_count {
+ int data_length;
+ int sync_and_steering;
+ enum data_count_type type;
+ u32 iov_count;
+ u32 ss_iov_count;
+ u32 ss_marker_count;
+ struct kvec *iov;
+};
+
+struct iscsi_param_list {
+ struct list_head param_list;
+ struct list_head extra_response_list;
+};
+
+struct iscsi_datain_req {
+ enum datain_req_comp_table dr_complete;
+ int generate_recovery_values;
+ enum datain_req_rec_table recovery;
+ u32 begrun;
+ u32 runlength;
+ u32 data_length;
+ u32 data_offset;
+ u32 data_offset_end;
+ u32 data_sn;
+ u32 next_burst_len;
+ u32 read_data_done;
+ u32 seq_send_order;
+ struct list_head dr_list;
+} ____cacheline_aligned;
+
+struct iscsi_ooo_cmdsn {
+ u16 cid;
+ u32 batch_count;
+ u32 cmdsn;
+ u32 exp_cmdsn;
+ struct iscsi_cmd *cmd;
+ struct list_head ooo_list;
+} ____cacheline_aligned;
+
+struct iscsi_datain {
+ u8 flags;
+ u32 data_sn;
+ u32 length;
+ u32 offset;
+} ____cacheline_aligned;
+
+struct iscsi_r2t {
+ int seq_complete;
+ int recovery_r2t;
+ int sent_r2t;
+ u32 r2t_sn;
+ u32 offset;
+ u32 targ_xfer_tag;
+ u32 xfer_len;
+ struct list_head r2t_list;
+} ____cacheline_aligned;
+
+struct iscsi_cmd {
+ enum iscsi_timer_flags_table dataout_timer_flags;
+ /* DataOUT timeout retries */
+ u8 dataout_timeout_retries;
+ /* Within command recovery count */
+ u8 error_recovery_count;
+ /* iSCSI dependent state for out or order CmdSNs */
+ enum cmd_i_state_table deferred_i_state;
+ /* iSCSI dependent state */
+ enum cmd_i_state_table i_state;
+ /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
+ u8 immediate_cmd;
+ /* Immediate data present */
+ u8 immediate_data;
+ /* iSCSI Opcode */
+ u8 iscsi_opcode;
+ /* iSCSI Response Code */
+ u8 iscsi_response;
+ /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_reason;
+ /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_response;
+ /* MaxCmdSN has been incremented */
+ u8 maxcmdsn_inc;
+ /* Immediate Unsolicited Dataout */
+ u8 unsolicited_data;
+ /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
+ u16 logout_cid;
+ /* Command flags */
+ enum cmd_flags_table cmd_flags;
+ /* Initiator Task Tag assigned from Initiator */
+ u32 init_task_tag;
+ /* Target Transfer Tag assigned from Target */
+ u32 targ_xfer_tag;
+ /* CmdSN assigned from Initiator */
+ u32 cmd_sn;
+ /* ExpStatSN assigned from Initiator */
+ u32 exp_stat_sn;
+ /* StatSN assigned to this ITT */
+ u32 stat_sn;
+ /* DataSN Counter */
+ u32 data_sn;
+ /* R2TSN Counter */
+ u32 r2t_sn;
+ /* Last DataSN acknowledged via DataAck SNACK */
+ u32 acked_data_sn;
+ /* Used for echoing NOPOUT ping data */
+ u32 buf_ptr_size;
+ /* Used to store DataDigest */
+ u32 data_crc;
+ /* Total size in bytes associated with command */
+ u32 data_length;
+ /* Counter for MaxOutstandingR2T */
+ u32 outstanding_r2ts;
+ /* Next R2T Offset when DataSequenceInOrder=Yes */
+ u32 r2t_offset;
+ /* Iovec current and orig count for iscsi_cmd->iov_data */
+ u32 iov_data_count;
+ u32 orig_iov_data_count;
+ /* Number of miscellaneous iovecs used for IP stack calls */
+ u32 iov_misc_count;
+ /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_count;
+ /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+ u32 pdu_send_order;
+ /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_start;
+ u32 residual_count;
+ /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+ u32 seq_send_order;
+ /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_count;
+ /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_no;
+ /* Lowest offset in current DataOUT sequence */
+ u32 seq_start_offset;
+ /* Highest offset in current DataOUT sequence */
+ u32 seq_end_offset;
+ /* Total size in bytes received so far of READ data */
+ u32 read_data_done;
+ /* Total size in bytes received so far of WRITE data */
+ u32 write_data_done;
+ /* Counter for FirstBurstLength key */
+ u32 first_burst_len;
+ /* Counter for MaxBurstLength key */
+ u32 next_burst_len;
+ /* Transfer size used for IP stack calls */
+ u32 tx_size;
+ /* Buffer used for various purposes */
+ void *buf_ptr;
+ /* See include/linux/dma-mapping.h */
+ enum dma_data_direction data_direction;
+ /* iSCSI PDU Header + CRC */
+ unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
+ /* Number of times struct iscsi_cmd is present in immediate queue */
+ atomic_t immed_queue_count;
+ atomic_t response_queue_count;
+ atomic_t transport_sent;
+ spinlock_t datain_lock;
+ spinlock_t dataout_timeout_lock;
+ /* spinlock for protecting struct iscsi_cmd->i_state */
+ spinlock_t istate_lock;
+ /* spinlock for adding within command recovery entries */
+ spinlock_t error_lock;
+ /* spinlock for adding R2Ts */
+ spinlock_t r2t_lock;
+ /* DataIN List */
+ struct list_head datain_list;
+ /* R2T List */
+ struct list_head cmd_r2t_list;
+ struct completion reject_comp;
+ /* Timer for DataOUT */
+ struct timer_list dataout_timer;
+ /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
+ struct kvec *iov_data;
+ /* Iovecs for miscellaneous purposes */
+#define ISCSI_MISC_IOVECS 5
+ struct kvec iov_misc[ISCSI_MISC_IOVECS];
+ /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_list;
+ /* Current struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_ptr;
+ /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_list;
+ /* Current struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_ptr;
+ /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
+ struct iscsi_tmr_req *tmr_req;
+ /* Connection this command is alligient to */
+ struct iscsi_conn *conn;
+ /* Pointer to connection recovery entry */
+ struct iscsi_conn_recovery *cr;
+ /* Session the command is part of, used for connection recovery */
+ struct iscsi_session *sess;
+ /* list_head for connection list */
+ struct list_head i_list;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd se_cmd;
+ /* Sense buffer that will be mapped into outgoing status */
+#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
+ unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
+
+ struct scatterlist *t_mem_sg;
+ u32 t_mem_sg_nents;
+
+ u32 padding;
+ u8 pad_bytes[4];
+
+ struct scatterlist *first_data_sg;
+ u32 first_data_sg_off;
+ u32 kmapped_nents;
+
+} ____cacheline_aligned;
+
+struct iscsi_tmr_req {
+ bool task_reassign:1;
+ u32 ref_cmd_sn;
+ u32 exp_data_sn;
+ struct iscsi_conn_recovery *conn_recovery;
+ struct se_tmr_req *se_tmr_req;
+};
+
+struct iscsi_conn {
+ /* Authentication Successful for this connection */
+ u8 auth_complete;
+ /* State connection is currently in */
+ u8 conn_state;
+ u8 conn_logout_reason;
+ u8 network_transport;
+ enum iscsi_timer_flags_table nopin_timer_flags;
+ enum iscsi_timer_flags_table nopin_response_timer_flags;
+ u8 tx_immediate_queue;
+ u8 tx_response_queue;
+ /* Used to know what thread encountered a transport failure */
+ u8 which_thread;
+ /* connection id assigned by the Initiator */
+ u16 cid;
+ /* Remote TCP Port */
+ u16 login_port;
+ int net_size;
+ u32 auth_id;
+#define CONNFLAG_SCTP_STRUCT_FILE 0x01
+ u32 conn_flags;
+ /* Used for iscsi_tx_login_rsp() */
+ u32 login_itt;
+ u32 exp_statsn;
+ /* Per connection status sequence number */
+ u32 stat_sn;
+ /* IFMarkInt's Current Value */
+ u32 if_marker;
+ /* OFMarkInt's Current Value */
+ u32 of_marker;
+ /* Used for calculating OFMarker offset to next PDU */
+ u32 of_marker_offset;
+ /* Complete Bad PDU for sending reject */
+ unsigned char bad_hdr[ISCSI_HDR_LEN];
+#define IPV6_ADDRESS_SPACE 48
+ unsigned char login_ip[IPV6_ADDRESS_SPACE];
+ int conn_usage_count;
+ int conn_waiting_on_uc;
+ atomic_t check_immediate_queue;
+ atomic_t conn_logout_remove;
+ atomic_t connection_exit;
+ atomic_t connection_recovery;
+ atomic_t connection_reinstatement;
+ atomic_t connection_wait;
+ atomic_t connection_wait_rcfr;
+ atomic_t sleep_on_conn_wait_comp;
+ atomic_t transport_failed;
+ struct completion conn_post_wait_comp;
+ struct completion conn_wait_comp;
+ struct completion conn_wait_rcfr_comp;
+ struct completion conn_waiting_on_uc_comp;
+ struct completion conn_logout_comp;
+ struct completion tx_half_close_comp;
+ struct completion rx_half_close_comp;
+ /* socket used by this connection */
+ struct socket *sock;
+ struct timer_list nopin_timer;
+ struct timer_list nopin_response_timer;
+ struct timer_list transport_timer;
+ /* Spinlock used for add/deleting cmd's from conn_cmd_list */
+ spinlock_t cmd_lock;
+ spinlock_t conn_usage_lock;
+ spinlock_t immed_queue_lock;
+ spinlock_t nopin_timer_lock;
+ spinlock_t response_queue_lock;
+ spinlock_t state_lock;
+ /* libcrypto RX and TX contexts for crc32c */
+ struct hash_desc conn_rx_hash;
+ struct hash_desc conn_tx_hash;
+ /* Used for scheduling TX and RX connection kthreads */
+ cpumask_var_t conn_cpumask;
+ int conn_rx_reset_cpumask:1;
+ int conn_tx_reset_cpumask:1;
+ /* list_head of struct iscsi_cmd for this connection */
+ struct list_head conn_cmd_list;
+ struct list_head immed_queue_list;
+ struct list_head response_queue_list;
+ struct iscsi_conn_ops *conn_ops;
+ struct iscsi_param_list *param_list;
+ /* Used for per connection auth state machine */
+ void *auth_protocol;
+ struct iscsi_login_thread_s *login_thread;
+ struct iscsi_portal_group *tpg;
+ /* Pointer to parent session */
+ struct iscsi_session *sess;
+ /* Pointer to thread_set in use for this conn's threads */
+ struct iscsi_thread_set *thread_set;
+ /* list_head for session connection list */
+ struct list_head conn_list;
+} ____cacheline_aligned;
+
+struct iscsi_conn_recovery {
+ u16 cid;
+ u32 cmd_count;
+ u32 maxrecvdatasegmentlength;
+ int ready_for_reallegiance;
+ struct list_head conn_recovery_cmd_list;
+ spinlock_t conn_recovery_cmd_lock;
+ struct timer_list time2retain_timer;
+ struct iscsi_session *sess;
+ struct list_head cr_list;
+} ____cacheline_aligned;
+
+struct iscsi_session {
+ u8 initiator_vendor;
+ u8 isid[6];
+ enum iscsi_timer_flags_table time2retain_timer_flags;
+ u8 version_active;
+ u16 cid_called;
+ u16 conn_recovery_count;
+ u16 tsih;
+ /* state session is currently in */
+ u32 session_state;
+ /* session wide counter: initiator assigned task tag */
+ u32 init_task_tag;
+ /* session wide counter: target assigned task tag */
+ u32 targ_xfer_tag;
+ u32 cmdsn_window;
+
+ /* protects cmdsn values */
+ struct mutex cmdsn_mutex;
+ /* session wide counter: expected command sequence number */
+ u32 exp_cmd_sn;
+ /* session wide counter: maximum allowed command sequence number */
+ u32 max_cmd_sn;
+ struct list_head sess_ooo_cmdsn_list;
+
+ /* LIO specific session ID */
+ u32 sid;
+ char auth_type[8];
+ /* unique within the target */
+ int session_index;
+ /* Used for session reference counting */
+ int session_usage_count;
+ int session_waiting_on_uc;
+ u32 cmd_pdus;
+ u32 rsp_pdus;
+ u64 tx_data_octets;
+ u64 rx_data_octets;
+ u32 conn_digest_errors;
+ u32 conn_timeout_errors;
+ u64 creation_time;
+ spinlock_t session_stats_lock;
+ /* Number of active connections */
+ atomic_t nconn;
+ atomic_t session_continuation;
+ atomic_t session_fall_back_to_erl0;
+ atomic_t session_logout;
+ atomic_t session_reinstatement;
+ atomic_t session_stop_active;
+ atomic_t sleep_on_sess_wait_comp;
+ atomic_t transport_wait_cmds;
+ /* connection list */
+ struct list_head sess_conn_list;
+ struct list_head cr_active_list;
+ struct list_head cr_inactive_list;
+ spinlock_t conn_lock;
+ spinlock_t cr_a_lock;
+ spinlock_t cr_i_lock;
+ spinlock_t session_usage_lock;
+ spinlock_t ttt_lock;
+ struct completion async_msg_comp;
+ struct completion reinstatement_comp;
+ struct completion session_wait_comp;
+ struct completion session_waiting_on_uc_comp;
+ struct timer_list time2retain_timer;
+ struct iscsi_sess_ops *sess_ops;
+ struct se_session *se_sess;
+ struct iscsi_portal_group *tpg;
+} ____cacheline_aligned;
+
+struct iscsi_login {
+ u8 auth_complete;
+ u8 checked_for_existing;
+ u8 current_stage;
+ u8 leading_connection;
+ u8 first_request;
+ u8 version_min;
+ u8 version_max;
+ char isid[6];
+ u32 cmd_sn;
+ u32 init_task_tag;
+ u32 initial_exp_statsn;
+ u32 rsp_length;
+ u16 cid;
+ u16 tsih;
+ char *req;
+ char *rsp;
+ char *req_buf;
+ char *rsp_buf;
+} ____cacheline_aligned;
+
+struct iscsi_node_attrib {
+ u32 dataout_timeout;
+ u32 dataout_timeout_retries;
+ u32 default_erl;
+ u32 nopin_timeout;
+ u32 nopin_response_timeout;
+ u32 random_datain_pdu_offsets;
+ u32 random_datain_seq_offsets;
+ u32 random_r2t_offsets;
+ u32 tmr_cold_reset;
+ u32 tmr_warm_reset;
+ struct iscsi_node_acl *nacl;
+};
+
+struct se_dev_entry_s;
+
+struct iscsi_node_auth {
+ enum naf_flags_table naf_flags;
+ int authenticate_target;
+ /* Used for iscsit_global->discovery_auth,
+ * set to zero (auth disabled) by default */
+ int enforce_discovery_auth;
+#define MAX_USER_LEN 256
+#define MAX_PASS_LEN 256
+ char userid[MAX_USER_LEN];
+ char password[MAX_PASS_LEN];
+ char userid_mutual[MAX_USER_LEN];
+ char password_mutual[MAX_PASS_LEN];
+};
+
+#include "iscsi_target_stat.h"
+
+struct iscsi_node_stat_grps {
+ struct config_group iscsi_sess_stats_group;
+ struct config_group iscsi_conn_stats_group;
+};
+
+struct iscsi_node_acl {
+ struct iscsi_node_attrib node_attrib;
+ struct iscsi_node_auth node_auth;
+ struct iscsi_node_stat_grps node_stat_grps;
+ struct se_node_acl se_node_acl;
+};
+
+#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
+
+#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
+#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
+
+struct iscsi_tpg_attrib {
+ u32 authentication;
+ u32 login_timeout;
+ u32 netif_timeout;
+ u32 generate_node_acls;
+ u32 cache_dynamic_acls;
+ u32 default_cmdsn_depth;
+ u32 demo_mode_write_protect;
+ u32 prod_mode_write_protect;
+ struct iscsi_portal_group *tpg;
+};
+
+struct iscsi_np {
+ int np_network_transport;
+ int np_ip_proto;
+ int np_sock_type;
+ enum np_thread_state_table np_thread_state;
+ enum iscsi_timer_flags_table np_login_timer_flags;
+ u32 np_exports;
+ enum np_flags_table np_flags;
+ unsigned char np_ip[IPV6_ADDRESS_SPACE];
+ u16 np_port;
+ spinlock_t np_thread_lock;
+ struct completion np_restart_comp;
+ struct socket *np_socket;
+ struct __kernel_sockaddr_storage np_sockaddr;
+ struct task_struct *np_thread;
+ struct timer_list np_login_timer;
+ struct iscsi_portal_group *np_login_tpg;
+ struct list_head np_list;
+} ____cacheline_aligned;
+
+struct iscsi_tpg_np {
+ struct iscsi_np *tpg_np;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np_parent;
+ struct list_head tpg_np_list;
+ struct list_head tpg_np_child_list;
+ struct list_head tpg_np_parent_list;
+ struct se_tpg_np se_tpg_np;
+ spinlock_t tpg_np_parent_lock;
+};
+
+struct iscsi_portal_group {
+ unsigned char tpg_chap_id;
+ /* TPG State */
+ enum tpg_state_table tpg_state;
+ /* Target Portal Group Tag */
+ u16 tpgt;
+ /* Id assigned to target sessions */
+ u16 ntsih;
+ /* Number of active sessions */
+ u32 nsessions;
+ /* Number of Network Portals available for this TPG */
+ u32 num_tpg_nps;
+ /* Per TPG LIO specific session ID. */
+ u32 sid;
+ /* Spinlock for adding/removing Network Portals */
+ spinlock_t tpg_np_lock;
+ spinlock_t tpg_state_lock;
+ struct se_portal_group tpg_se_tpg;
+ struct mutex tpg_access_lock;
+ struct mutex np_login_lock;
+ struct iscsi_tpg_attrib tpg_attrib;
+ /* Pointer to default list of iSCSI parameters for TPG */
+ struct iscsi_param_list *param_list;
+ struct iscsi_tiqn *tpg_tiqn;
+ struct list_head tpg_gnp_list;
+ struct list_head tpg_list;
+} ____cacheline_aligned;
+
+#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
+#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
+#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
+#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
+#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
+
+struct iscsi_wwn_stat_grps {
+ struct config_group iscsi_stat_group;
+ struct config_group iscsi_instance_group;
+ struct config_group iscsi_sess_err_group;
+ struct config_group iscsi_tgt_attr_group;
+ struct config_group iscsi_login_stats_group;
+ struct config_group iscsi_logout_stats_group;
+};
+
+struct iscsi_tiqn {
+#define ISCSI_IQN_LEN 224
+ unsigned char tiqn[ISCSI_IQN_LEN];
+ enum tiqn_state_table tiqn_state;
+ int tiqn_access_count;
+ u32 tiqn_active_tpgs;
+ u32 tiqn_ntpgs;
+ u32 tiqn_num_tpg_nps;
+ u32 tiqn_nsessions;
+ struct list_head tiqn_list;
+ struct list_head tiqn_tpg_list;
+ spinlock_t tiqn_state_lock;
+ spinlock_t tiqn_tpg_lock;
+ struct se_wwn tiqn_wwn;
+ struct iscsi_wwn_stat_grps tiqn_stat_grps;
+ int tiqn_index;
+ struct iscsi_sess_err_stats sess_err_stats;
+ struct iscsi_login_stats login_stats;
+ struct iscsi_logout_stats logout_stats;
+} ____cacheline_aligned;
+
+#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
+
+struct iscsit_global {
+ /* In core shutdown */
+ u32 in_shutdown;
+ u32 active_ts;
+ /* Unique identifier used for the authentication daemon */
+ u32 auth_id;
+ u32 inactive_ts;
+ /* Thread Set bitmap count */
+ int ts_bitmap_count;
+ /* Thread Set bitmap pointer */
+ unsigned long *ts_bitmap;
+ /* Used for iSCSI discovery session authentication */
+ struct iscsi_node_acl discovery_acl;
+ struct iscsi_portal_group *discovery_tpg;
+};
+
+#endif /* ISCSI_TARGET_CORE_H */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
new file mode 100644
index 00000000000..8c049512951
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -0,0 +1,531 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target DataIN value generation functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_datain_values.h"
+
+struct iscsi_datain_req *iscsit_allocate_datain_req(void)
+{
+ struct iscsi_datain_req *dr;
+
+ dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC);
+ if (!dr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_datain_req\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&dr->dr_list);
+
+ return dr;
+}
+
+void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+ spin_lock(&cmd->datain_lock);
+ list_add_tail(&dr->dr_list, &cmd->datain_list);
+ spin_unlock(&cmd->datain_lock);
+}
+
+void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+ spin_lock(&cmd->datain_lock);
+ list_del(&dr->dr_list);
+ spin_unlock(&cmd->datain_lock);
+
+ kmem_cache_free(lio_dr_cache, dr);
+}
+
+void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
+{
+ struct iscsi_datain_req *dr, *dr_tmp;
+
+ spin_lock(&cmd->datain_lock);
+ list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) {
+ list_del(&dr->dr_list);
+ kmem_cache_free(lio_dr_cache, dr);
+ }
+ spin_unlock(&cmd->datain_lock);
+}
+
+struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
+{
+ struct iscsi_datain_req *dr;
+
+ if (list_empty(&cmd->datain_list)) {
+ pr_err("cmd->datain_list is empty for ITT:"
+ " 0x%08x\n", cmd->init_task_tag);
+ return NULL;
+ }
+ list_for_each_entry(dr, &cmd->datain_list, dr_list)
+ break;
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 next_burst_len, read_data_done, read_data_left;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ next_burst_len = (!dr->recovery) ?
+ cmd->next_burst_len : dr->next_burst_len;
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return NULL;
+ }
+
+ if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) &&
+ (read_data_left <= (conn->sess->sess_ops->MaxBurstLength -
+ next_burst_len))) {
+ datain->length = read_data_left;
+
+ datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+ } else {
+ if ((next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ datain->length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ next_burst_len += datain->length;
+ } else {
+ datain->length = (conn->sess->sess_ops->MaxBurstLength -
+ next_burst_len);
+ next_burst_len = 0;
+
+ datain->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+ }
+ }
+
+ datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ datain->offset = read_data_done;
+
+ if (!dr->recovery) {
+ cmd->next_burst_len = next_burst_len;
+ cmd->read_data_done += datain->length;
+ } else {
+ dr->next_burst_len = next_burst_len;
+ dr->read_data_done += datain->length;
+ }
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 offset, read_data_done, read_data_left, seq_send_order;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct iscsi_seq *seq;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+ seq_send_order = (!dr->recovery) ?
+ cmd->seq_send_order : dr->seq_send_order;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return NULL;
+ }
+
+ seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+ if (!seq)
+ return NULL;
+
+ seq->sent = 1;
+
+ if (!dr->recovery && !seq->next_burst_len)
+ seq->first_datasn = cmd->data_sn;
+
+ offset = (seq->offset + seq->next_burst_len);
+
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ datain->length = (cmd->data_length - offset);
+ datain->offset = offset;
+
+ datain->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+ seq->next_burst_len = 0;
+ seq_send_order++;
+ } else {
+ if ((seq->next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ datain->length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ datain->offset = (seq->offset + seq->next_burst_len);
+
+ seq->next_burst_len += datain->length;
+ } else {
+ datain->length = (conn->sess->sess_ops->MaxBurstLength -
+ seq->next_burst_len);
+ datain->offset = (seq->offset + seq->next_burst_len);
+
+ datain->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+ seq->next_burst_len = 0;
+ seq_send_order++;
+ }
+ }
+
+ if ((read_data_done + datain->length) == cmd->data_length)
+ datain->flags |= ISCSI_FLAG_DATA_STATUS;
+
+ datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ if (!dr->recovery) {
+ cmd->seq_send_order = seq_send_order;
+ cmd->read_data_done += datain->length;
+ } else {
+ dr->seq_send_order = seq_send_order;
+ dr->read_data_done += datain->length;
+ }
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+ seq->last_datasn = datain->data_sn;
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 next_burst_len, read_data_done, read_data_left;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct iscsi_pdu *pdu;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ next_burst_len = (!dr->recovery) ?
+ cmd->next_burst_len : dr->next_burst_len;
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return dr;
+ }
+
+ pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL);
+ if (!pdu)
+ return dr;
+
+ if ((read_data_done + pdu->length) == cmd->data_length) {
+ pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+ next_burst_len = 0;
+ } else {
+ if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength)
+ next_burst_len += pdu->length;
+ else {
+ pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+ next_burst_len = 0;
+ }
+ }
+
+ pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ if (!dr->recovery) {
+ cmd->next_burst_len = next_burst_len;
+ cmd->read_data_done += pdu->length;
+ } else {
+ dr->next_burst_len = next_burst_len;
+ dr->read_data_done += pdu->length;
+ }
+
+ datain->flags = pdu->flags;
+ datain->length = pdu->length;
+ datain->offset = pdu->offset;
+ datain->data_sn = pdu->data_sn;
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 read_data_done, read_data_left, seq_send_order;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct iscsi_pdu *pdu;
+ struct iscsi_seq *seq = NULL;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+ seq_send_order = (!dr->recovery) ?
+ cmd->seq_send_order : dr->seq_send_order;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return NULL;
+ }
+
+ seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+ if (!seq)
+ return NULL;
+
+ seq->sent = 1;
+
+ if (!dr->recovery && !seq->next_burst_len)
+ seq->first_datasn = cmd->data_sn;
+
+ pdu = iscsit_get_pdu_holder_for_seq(cmd, seq);
+ if (!pdu)
+ return NULL;
+
+ if (seq->pdu_send_order == seq->pdu_count) {
+ pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+ seq->next_burst_len = 0;
+ seq_send_order++;
+ } else
+ seq->next_burst_len += pdu->length;
+
+ if ((read_data_done + pdu->length) == cmd->data_length)
+ pdu->flags |= ISCSI_FLAG_DATA_STATUS;
+
+ pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ if (!dr->recovery) {
+ cmd->seq_send_order = seq_send_order;
+ cmd->read_data_done += pdu->length;
+ } else {
+ dr->seq_send_order = seq_send_order;
+ dr->read_data_done += pdu->length;
+ }
+
+ datain->flags = pdu->flags;
+ datain->length = pdu->length;
+ datain->offset = pdu->offset;
+ datain->data_sn = pdu->data_sn;
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+ seq->last_datasn = datain->data_sn;
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+struct iscsi_datain_req *iscsit_get_datain_values(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder &&
+ conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_yes_and_yes(cmd, datain);
+ else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+ conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_no_and_yes(cmd, datain);
+ else if (conn->sess->sess_ops->DataSequenceInOrder &&
+ !conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_yes_and_no(cmd, datain);
+ else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+ !conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_no_and_no(cmd, datain);
+
+ return NULL;
+}
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
new file mode 100644
index 00000000000..646429ac5a0
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -0,0 +1,12 @@
+#ifndef ISCSI_TARGET_DATAIN_VALUES_H
+#define ISCSI_TARGET_DATAIN_VALUES_H
+
+extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
+extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
+ struct iscsi_datain *);
+
+#endif /*** ISCSI_TARGET_DATAIN_VALUES_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
new file mode 100644
index 00000000000..a19fa5eea88
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -0,0 +1,87 @@
+/*******************************************************************************
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/scsi_device.h>
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+
+int iscsit_get_lun_for_tmr(
+ struct iscsi_cmd *cmd,
+ u64 lun)
+{
+ u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun);
+}
+
+int iscsit_get_lun_for_cmd(
+ struct iscsi_cmd *cmd,
+ unsigned char *cdb,
+ u64 lun)
+{
+ u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return transport_lookup_cmd_lun(&cmd->se_cmd, unpacked_lun);
+}
+
+void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
+{
+ struct se_node_acl *se_nacl;
+
+ /*
+ * This is a discovery session, the single queue slot was already
+ * assigned in iscsi_login_zero_tsih(). Since only Logout and
+ * Text Opcodes are allowed during discovery we do not have to worry
+ * about the HBA's queue depth here.
+ */
+ if (sess->sess_ops->SessionType)
+ return;
+
+ se_nacl = sess->se_sess->se_node_acl;
+
+ /*
+ * This is a normal session, set the Session's CmdSN window to the
+ * struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
+ * has already been validated as a legal value in
+ * core_set_queue_depth_for_node().
+ */
+ sess->cmdsn_window = se_nacl->queue_depth;
+ sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
+}
+
+void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
+{
+ if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
+ return;
+
+ cmd->maxcmdsn_inc = 1;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ sess->max_cmd_sn += 1;
+ pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
+ mutex_unlock(&sess->cmdsn_mutex);
+}
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
new file mode 100644
index 00000000000..bef1cada15f
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -0,0 +1,9 @@
+#ifndef ISCSI_TARGET_DEVICE_H
+#define ISCSI_TARGET_DEVICE_H
+
+extern int iscsit_get_lun_for_tmr(struct iscsi_cmd *, u64);
+extern int iscsit_get_lun_for_cmd(struct iscsi_cmd *, unsigned char *, u64);
+extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+
+#endif /* ISCSI_TARGET_DEVICE_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
new file mode 100644
index 00000000000..b7ffc3cd40c
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -0,0 +1,1004 @@
+/******************************************************************************
+ * This file contains error recovery level zero functions used by
+ * the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+/*
+ * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
+ * checks against to determine a PDU's Offset+Length is within the current
+ * DataOUT Sequence. Used for DataSequenceInOrder=Yes only.
+ */
+void iscsit_set_dataout_sequence_values(
+ struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ /*
+ * Still set seq_start_offset and seq_end_offset for Unsolicited
+ * DataOUT, even if DataSequenceInOrder=No.
+ */
+ if (cmd->unsolicited_data) {
+ cmd->seq_start_offset = cmd->write_data_done;
+ cmd->seq_end_offset = (cmd->write_data_done +
+ (cmd->data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
+ return;
+ }
+
+ if (!conn->sess->sess_ops->DataSequenceInOrder)
+ return;
+
+ if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
+ cmd->seq_start_offset = cmd->write_data_done;
+ cmd->seq_end_offset = (cmd->data_length >
+ conn->sess->sess_ops->MaxBurstLength) ?
+ (cmd->write_data_done +
+ conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
+ } else {
+ cmd->seq_start_offset = cmd->seq_end_offset;
+ cmd->seq_end_offset = ((cmd->seq_end_offset +
+ conn->sess->sess_ops->MaxBurstLength) >=
+ cmd->data_length) ? cmd->data_length :
+ (cmd->seq_end_offset +
+ conn->sess->sess_ops->MaxBurstLength);
+ }
+}
+
+static int iscsit_dataout_within_command_recovery_check(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * We do the within-command recovery checks here as it is
+ * the first function called in iscsi_check_pre_dataout().
+ * Basically, if we are in within-command recovery and
+ * the PDU does not contain the offset the sequence needs,
+ * dump the payload.
+ *
+ * This only applies to DataPDUInOrder=Yes, for
+ * DataPDUInOrder=No we only re-request the failed PDU
+ * and check that all PDUs in a sequence are received
+ * upon end of sequence.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
+ (cmd->write_data_done != hdr->offset))
+ goto dump;
+
+ cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
+ } else {
+ struct iscsi_seq *seq;
+
+ seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+ if (!seq)
+ return DATAOUT_CANNOT_RECOVER;
+ /*
+ * Set the struct iscsi_seq pointer to reuse later.
+ */
+ cmd->seq_ptr = seq;
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ if ((seq->status ==
+ DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
+ ((seq->offset != hdr->offset) ||
+ (seq->data_sn != hdr->datasn)))
+ goto dump;
+ } else {
+ if ((seq->status ==
+ DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
+ (seq->data_sn != hdr->datasn))
+ goto dump;
+ }
+
+ if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
+ goto dump;
+
+ if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
+ seq->status = 0;
+ }
+
+ return DATAOUT_NORMAL;
+
+dump:
+ pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
+ " 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+}
+
+static int iscsit_dataout_check_unsolicited_sequence(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ u32 first_burst_len;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+
+ if ((hdr->offset < cmd->seq_start_offset) ||
+ ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+ pr_err("Command ITT: 0x%08x with Offset: %u,"
+ " Length: %u outside of Unsolicited Sequence %u:%u while"
+ " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+ hdr->offset, payload_length, cmd->seq_start_offset,
+ cmd->seq_end_offset);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ first_burst_len = (cmd->first_burst_len + payload_length);
+
+ if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+ " for this Unsolicited DataOut Burst.\n",
+ first_burst_len, conn->sess->sess_ops->FirstBurstLength);
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ /*
+ * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+ * checks for the current Unsolicited DataOUT Sequence.
+ */
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+ /*
+ * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+ * sequence checks are handled in
+ * iscsit_dataout_datapduinorder_no_fbit().
+ */
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ goto out;
+
+ if ((first_burst_len != cmd->data_length) &&
+ (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
+ pr_err("Unsolicited non-immediate data"
+ " received %u does not equal FirstBurstLength: %u, and"
+ " does not equal ExpXferLen %u.\n", first_burst_len,
+ conn->sess->sess_ops->FirstBurstLength,
+ cmd->data_length);
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ } else {
+ if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("Command ITT: 0x%08x reached"
+ " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+ " error.\n", cmd->init_task_tag,
+ conn->sess->sess_ops->FirstBurstLength);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ if (first_burst_len == cmd->data_length) {
+ pr_err("Command ITT: 0x%08x reached"
+ " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+ " error.\n", cmd->init_task_tag, cmd->data_length);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ }
+
+out:
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_sequence(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ u32 next_burst_len;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_seq *seq = NULL;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * For DataSequenceInOrder=Yes: Check that the offset and offset+length
+ * is within range as defined by iscsi_set_dataout_sequence_values().
+ *
+ * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
+ * offset+length tuple.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ /*
+ * Due to possibility of recovery DataOUT sent by the initiator
+ * fullfilling an Recovery R2T, it's best to just dump the
+ * payload here, instead of erroring out.
+ */
+ if ((hdr->offset < cmd->seq_start_offset) ||
+ ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+ pr_err("Command ITT: 0x%08x with Offset: %u,"
+ " Length: %u outside of Sequence %u:%u while"
+ " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+ hdr->offset, payload_length, cmd->seq_start_offset,
+ cmd->seq_end_offset);
+
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
+ }
+
+ next_burst_len = (cmd->next_burst_len + payload_length);
+ } else {
+ seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+ if (!seq)
+ return DATAOUT_CANNOT_RECOVER;
+ /*
+ * Set the struct iscsi_seq pointer to reuse later.
+ */
+ cmd->seq_ptr = seq;
+
+ if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
+ }
+
+ next_burst_len = (seq->next_burst_len + payload_length);
+ }
+
+ if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
+ pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
+ " Length: %u exceeds MaxBurstLength: %u. protocol"
+ " error.\n", cmd->init_task_tag,
+ (next_burst_len - payload_length),
+ payload_length, conn->sess->sess_ops->MaxBurstLength);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ /*
+ * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+ * checks for the current DataOUT Sequence.
+ */
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+ /*
+ * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+ * sequence checks are handled in
+ * iscsit_dataout_datapduinorder_no_fbit().
+ */
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ goto out;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if ((next_burst_len <
+ conn->sess->sess_ops->MaxBurstLength) &&
+ ((cmd->write_data_done + payload_length) <
+ cmd->data_length)) {
+ pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+ " before end of DataOUT sequence, protocol"
+ " error.\n", cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ } else {
+ if (next_burst_len < seq->xfer_len) {
+ pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+ " before end of DataOUT sequence, protocol"
+ " error.\n", cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ }
+ } else {
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (next_burst_len ==
+ conn->sess->sess_ops->MaxBurstLength) {
+ pr_err("Command ITT: 0x%08x reached"
+ " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
+ " not set, protocol error.", cmd->init_task_tag,
+ conn->sess->sess_ops->MaxBurstLength);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ if ((cmd->write_data_done + payload_length) ==
+ cmd->data_length) {
+ pr_err("Command ITT: 0x%08x reached"
+ " last DataOUT PDU in sequence but ISCSI_FLAG_"
+ "CMD_FINAL is not set, protocol error.\n",
+ cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ } else {
+ if (next_burst_len == seq->xfer_len) {
+ pr_err("Command ITT: 0x%08x reached"
+ " last DataOUT PDU in sequence but ISCSI_FLAG_"
+ "CMD_FINAL is not set, protocol error.\n",
+ cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ }
+ }
+
+out:
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_datasn(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int dump = 0, recovery = 0;
+ u32 data_sn = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * Considering the target has no method of re-requesting DataOUT
+ * by DataSN, if we receieve a greater DataSN than expected we
+ * assume the functions for DataPDUInOrder=[Yes,No] below will
+ * handle it.
+ *
+ * If the DataSN is less than expected, dump the payload.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder)
+ data_sn = cmd->data_sn;
+ else {
+ struct iscsi_seq *seq = cmd->seq_ptr;
+ data_sn = seq->data_sn;
+ }
+
+ if (hdr->datasn > data_sn) {
+ pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+ " higher than expected 0x%08x.\n", cmd->init_task_tag,
+ hdr->datasn, data_sn);
+ recovery = 1;
+ goto recover;
+ } else if (hdr->datasn < data_sn) {
+ pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+ " lower than expected 0x%08x, discarding payload.\n",
+ cmd->init_task_tag, hdr->datasn, data_sn);
+ dump = 1;
+ goto dump;
+ }
+
+ return DATAOUT_NORMAL;
+
+recover:
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to perform within-command recovery"
+ " while ERL=0.\n");
+ return DATAOUT_CANNOT_RECOVER;
+ }
+dump:
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY :
+ DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_pre_datapduinorder_yes(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int dump = 0, recovery = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * For DataSequenceInOrder=Yes: If the offset is greater than the global
+ * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
+ * occured and fail the connection.
+ *
+ * For DataSequenceInOrder=No: If the offset is greater than the per
+ * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
+ * error has occured and fail the connection.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (hdr->offset != cmd->write_data_done) {
+ pr_err("Command ITT: 0x%08x, received offset"
+ " %u different than expected %u.\n", cmd->init_task_tag,
+ hdr->offset, cmd->write_data_done);
+ recovery = 1;
+ goto recover;
+ }
+ } else {
+ struct iscsi_seq *seq = cmd->seq_ptr;
+
+ if (hdr->offset > seq->offset) {
+ pr_err("Command ITT: 0x%08x, received offset"
+ " %u greater than expected %u.\n", cmd->init_task_tag,
+ hdr->offset, seq->offset);
+ recovery = 1;
+ goto recover;
+ } else if (hdr->offset < seq->offset) {
+ pr_err("Command ITT: 0x%08x, received offset"
+ " %u less than expected %u, discarding payload.\n",
+ cmd->init_task_tag, hdr->offset, seq->offset);
+ dump = 1;
+ goto dump;
+ }
+ }
+
+ return DATAOUT_NORMAL;
+
+recover:
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to perform within-command recovery"
+ " while ERL=0.\n");
+ return DATAOUT_CANNOT_RECOVER;
+ }
+dump:
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ return (recovery) ? iscsit_recover_dataout_sequence(cmd,
+ hdr->offset, payload_length) :
+ (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_pre_datapduinorder_no(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_pdu *pdu;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length);
+ if (!pdu)
+ return DATAOUT_CANNOT_RECOVER;
+
+ cmd->pdu_ptr = pdu;
+
+ switch (pdu->status) {
+ case ISCSI_PDU_NOT_RECEIVED:
+ case ISCSI_PDU_CRC_FAILED:
+ case ISCSI_PDU_TIMED_OUT:
+ break;
+ case ISCSI_PDU_RECEIVED_OK:
+ pr_err("Command ITT: 0x%08x received already gotten"
+ " Offset: %u, Length: %u\n", cmd->init_task_tag,
+ hdr->offset, payload_length);
+ return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
+ default:
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
+{
+ struct iscsi_r2t *r2t;
+
+ if (cmd->unsolicited_data)
+ return 0;
+
+ r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
+ if (!r2t)
+ return -1;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ r2t->seq_complete = 1;
+ cmd->outstanding_r2ts--;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+static int iscsit_dataout_update_datapduinorder_no(
+ struct iscsi_cmd *cmd,
+ u32 data_sn,
+ int f_bit)
+{
+ int ret = 0;
+ struct iscsi_pdu *pdu = cmd->pdu_ptr;
+
+ pdu->data_sn = data_sn;
+
+ switch (pdu->status) {
+ case ISCSI_PDU_NOT_RECEIVED:
+ pdu->status = ISCSI_PDU_RECEIVED_OK;
+ break;
+ case ISCSI_PDU_CRC_FAILED:
+ pdu->status = ISCSI_PDU_RECEIVED_OK;
+ break;
+ case ISCSI_PDU_TIMED_OUT:
+ pdu->status = ISCSI_PDU_RECEIVED_OK;
+ break;
+ default:
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ if (f_bit) {
+ ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_passed(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int ret, send_r2t = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_seq *seq = NULL;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (cmd->unsolicited_data) {
+ if ((cmd->first_burst_len + payload_length) ==
+ conn->sess->sess_ops->FirstBurstLength) {
+ if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ payload_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ send_r2t = 1;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ ret = iscsit_dataout_update_datapduinorder_no(cmd,
+ hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ cmd->first_burst_len += payload_length;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder)
+ cmd->data_sn++;
+ else {
+ seq = cmd->seq_ptr;
+ seq->data_sn++;
+ seq->offset += payload_length;
+ }
+
+ if (send_r2t) {
+ if (seq)
+ seq->status = DATAOUT_SEQUENCE_COMPLETE;
+ cmd->first_burst_len = 0;
+ cmd->unsolicited_data = 0;
+ }
+ } else {
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if ((cmd->next_burst_len + payload_length) ==
+ conn->sess->sess_ops->MaxBurstLength) {
+ if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ payload_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ send_r2t = 1;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ ret = iscsit_dataout_update_datapduinorder_no(
+ cmd, hdr->datasn,
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ cmd->next_burst_len += payload_length;
+ cmd->data_sn++;
+
+ if (send_r2t)
+ cmd->next_burst_len = 0;
+ } else {
+ seq = cmd->seq_ptr;
+
+ if ((seq->next_burst_len + payload_length) ==
+ seq->xfer_len) {
+ if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ payload_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ send_r2t = 1;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ ret = iscsit_dataout_update_datapduinorder_no(
+ cmd, hdr->datasn,
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ seq->data_sn++;
+ seq->offset += payload_length;
+ seq->next_burst_len += payload_length;
+
+ if (send_r2t) {
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_COMPLETE;
+ }
+ }
+ }
+
+ if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
+ cmd->data_sn = 0;
+
+ cmd->write_data_done += payload_length;
+
+ return (cmd->write_data_done == cmd->data_length) ?
+ DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
+ DATAOUT_SEND_R2T : DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_failed(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ goto recover;
+ /*
+ * The rest of this function is only called when DataPDUInOrder=No.
+ */
+ pdu = cmd->pdu_ptr;
+
+ switch (pdu->status) {
+ case ISCSI_PDU_NOT_RECEIVED:
+ pdu->status = ISCSI_PDU_CRC_FAILED;
+ break;
+ case ISCSI_PDU_CRC_FAILED:
+ break;
+ case ISCSI_PDU_TIMED_OUT:
+ pdu->status = ISCSI_PDU_CRC_FAILED;
+ break;
+ default:
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+recover:
+ return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length);
+}
+
+/*
+ * Called from iscsit_handle_data_out() before DataOUT Payload is received
+ * and CRC computed.
+ */
+extern int iscsit_check_pre_dataout(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int ret;
+ struct iscsi_conn *conn = cmd->conn;
+
+ ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+
+ ret = iscsit_dataout_check_datasn(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+
+ if (cmd->unsolicited_data) {
+ ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+ } else {
+ ret = iscsit_dataout_check_sequence(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+ }
+
+ return (conn->sess->sess_ops->DataPDUInOrder) ?
+ iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
+ iscsit_dataout_pre_datapduinorder_no(cmd, buf);
+}
+
+/*
+ * Called from iscsit_handle_data_out() after DataOUT Payload is received
+ * and CRC computed.
+ */
+int iscsit_check_post_dataout(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u8 data_crc_failed)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->dataout_timeout_retries = 0;
+
+ if (!data_crc_failed)
+ return iscsit_dataout_post_crc_passed(cmd, buf);
+ else {
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from DataOUT CRC"
+ " failure while ERL=0, closing session.\n");
+ iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
+ 1, 0, buf, cmd);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
+ 0, 0, buf, cmd);
+ return iscsit_dataout_post_crc_failed(cmd, buf);
+ }
+}
+
+static void iscsit_handle_time2retain_timeout(unsigned long data)
+{
+ struct iscsi_session *sess = (struct iscsi_session *) data;
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&se_tpg->session_lock);
+ return;
+ }
+ if (atomic_read(&sess->session_reinstatement)) {
+ pr_err("Exiting Time2Retain handler because"
+ " session_reinstatement=1\n");
+ spin_unlock_bh(&se_tpg->session_lock);
+ return;
+ }
+ sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
+
+ pr_err("Time2Retain timer expired for SID: %u, cleaning up"
+ " iSCSI session.\n", sess->sid);
+ {
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (tiqn) {
+ spin_lock(&tiqn->sess_err_stats.lock);
+ strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ (void *)sess->sess_ops->InitiatorName);
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ sess->conn_timeout_errors++;
+ spin_unlock(&tiqn->sess_err_stats.lock);
+ }
+ }
+
+ spin_unlock_bh(&se_tpg->session_lock);
+ iscsit_close_session(sess);
+}
+
+extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
+{
+ int tpg_active;
+ /*
+ * Only start Time2Retain timer when the assoicated TPG is still in
+ * an ACTIVE (eg: not disabled or shutdown) state.
+ */
+ spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+ tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+
+ if (!tpg_active)
+ return;
+
+ if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
+ return;
+
+ pr_debug("Starting Time2Retain timer for %u seconds on"
+ " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
+
+ init_timer(&sess->time2retain_timer);
+ sess->time2retain_timer.expires =
+ (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
+ sess->time2retain_timer.data = (unsigned long)sess;
+ sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
+ sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
+ sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&sess->time2retain_timer);
+}
+
+/*
+ * Called with spin_lock_bh(&struct se_portal_group->session_lock) held
+ */
+extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
+{
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+ if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
+ return -1;
+
+ if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
+ return 0;
+
+ sess->time2retain_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ del_timer_sync(&sess->time2retain_timer);
+
+ spin_lock_bh(&se_tpg->session_lock);
+ sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
+ pr_debug("Stopped Time2Retain Timer for SID: %u\n",
+ sess->sid);
+ return 0;
+}
+
+void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+ goto sleep;
+ }
+
+ if (atomic_read(&conn->transport_failed)) {
+ spin_unlock_bh(&conn->state_lock);
+ goto sleep;
+ }
+ spin_unlock_bh(&conn->state_lock);
+
+ iscsi_thread_set_force_reinstatement(conn);
+
+sleep:
+ wait_for_completion(&conn->conn_wait_rcfr_comp);
+ complete(&conn->conn_post_wait_comp);
+}
+
+void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+{
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ if (atomic_read(&conn->transport_failed)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ if (atomic_read(&conn->connection_reinstatement)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ if (iscsi_thread_set_force_reinstatement(conn) < 0) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ atomic_set(&conn->connection_reinstatement, 1);
+ if (!sleep) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ atomic_set(&conn->sleep_on_conn_wait_comp, 1);
+ spin_unlock_bh(&conn->state_lock);
+
+ wait_for_completion(&conn->conn_wait_comp);
+ complete(&conn->conn_post_wait_comp);
+}
+
+void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
+{
+ pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
+ " %u\n", sess->sid);
+
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+}
+
+static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
+ !atomic_read(&sess->session_reinstatement) &&
+ !atomic_read(&sess->session_fall_back_to_erl0))
+ iscsit_connection_recovery_transport_reset(conn);
+ else {
+ pr_debug("Performing cleanup for failed iSCSI"
+ " Connection ID: %hu from %s\n", conn->cid,
+ sess->sess_ops->InitiatorName);
+ iscsit_close_connection(conn);
+ }
+}
+
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+ atomic_set(&conn->connection_exit, 1);
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ spin_unlock_bh(&conn->state_lock);
+ iscsit_close_connection(conn);
+ return;
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+ conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+ spin_unlock_bh(&conn->state_lock);
+
+ iscsit_handle_connection_cleanup(conn);
+}
+
+/*
+ * This is the simple function that makes the magic of
+ * sync and steering happen in the follow paradoxical order:
+ *
+ * 0) Receive conn->of_marker (bytes left until next OFMarker)
+ * bytes into an offload buffer. When we pass the exact number
+ * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
+ * rx_data() will automatically receive the identical u32 marker
+ * values and store it in conn->of_marker_offset;
+ * 1) Now conn->of_marker_offset will contain the offset to the start
+ * of the next iSCSI PDU. Dump these remaining bytes into another
+ * offload buffer.
+ * 2) We are done!
+ * Next byte in the TCP stream will contain the next iSCSI PDU!
+ * Cool Huh?!
+ */
+int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
+{
+ /*
+ * Make sure the remaining bytes to next maker is a sane value.
+ */
+ if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
+ pr_err("Remaining bytes to OFMarker: %u exceeds"
+ " OFMarkInt bytes: %u.\n", conn->of_marker,
+ conn->conn_ops->OFMarkInt * 4);
+ return -1;
+ }
+
+ pr_debug("Advancing %u bytes in TCP stream to get to the"
+ " next OFMarker.\n", conn->of_marker);
+
+ if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
+ return -1;
+
+ /*
+ * Make sure the offset marker we retrived is a valid value.
+ */
+ if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
+ conn->conn_ops->MaxRecvDataSegmentLength)) {
+ pr_err("OfMarker offset value: %u exceeds limit.\n",
+ conn->of_marker_offset);
+ return -1;
+ }
+
+ pr_debug("Discarding %u bytes of TCP stream to get to the"
+ " next iSCSI Opcode.\n", conn->of_marker_offset);
+
+ if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
new file mode 100644
index 00000000000..21acc9a0637
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -0,0 +1,15 @@
+#ifndef ISCSI_TARGET_ERL0_H
+#define ISCSI_TARGET_ERL0_H
+
+extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
+extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
+extern void iscsit_start_time2retain_handler(struct iscsi_session *);
+extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
+extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
+extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
new file mode 100644
index 00000000000..980650792cf
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -0,0 +1,1299 @@
+/*******************************************************************************
+ * This file contains error recovery level one used by the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+#define OFFLOAD_BUF_SIZE 32768
+
+/*
+ * Used to dump excess datain payload for certain error recovery
+ * situations. Receive in OFFLOAD_BUF_SIZE max of datain per rx_data().
+ *
+ * dump_padding_digest denotes if padding and data digests need
+ * to be dumped.
+ */
+int iscsit_dump_data_payload(
+ struct iscsi_conn *conn,
+ u32 buf_len,
+ int dump_padding_digest)
+{
+ char *buf, pad_bytes[4];
+ int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
+ u32 length, padding, offset = 0, size;
+ struct kvec iov;
+
+ length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+
+ buf = kzalloc(length, GFP_ATOMIC);
+ if (!buf) {
+ pr_err("Unable to allocate %u bytes for offload"
+ " buffer.\n", length);
+ return -1;
+ }
+ memset(&iov, 0, sizeof(struct kvec));
+
+ while (offset < buf_len) {
+ size = ((offset + length) > buf_len) ?
+ (buf_len - offset) : length;
+
+ iov.iov_len = size;
+ iov.iov_base = buf;
+
+ rx_got = rx_data(conn, &iov, 1, size);
+ if (rx_got != size) {
+ ret = DATAOUT_CANNOT_RECOVER;
+ goto out;
+ }
+
+ offset += size;
+ }
+
+ if (!dump_padding_digest)
+ goto out;
+
+ padding = ((-buf_len) & 3);
+ if (padding != 0) {
+ iov.iov_len = padding;
+ iov.iov_base = pad_bytes;
+
+ rx_got = rx_data(conn, &iov, 1, padding);
+ if (rx_got != padding) {
+ ret = DATAOUT_CANNOT_RECOVER;
+ goto out;
+ }
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ u32 data_crc;
+
+ iov.iov_len = ISCSI_CRC_LEN;
+ iov.iov_base = &data_crc;
+
+ rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+ if (rx_got != ISCSI_CRC_LEN) {
+ ret = DATAOUT_CANNOT_RECOVER;
+ goto out;
+ }
+ }
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * Used for retransmitting R2Ts from a R2T SNACK request.
+ */
+static int iscsit_send_recovery_r2t_for_snack(
+ struct iscsi_cmd *cmd,
+ struct iscsi_r2t *r2t)
+{
+ /*
+ * If the struct iscsi_r2t has not been sent yet, we can safely
+ * ignore retransmission
+ * of the R2TSN in question.
+ */
+ spin_lock_bh(&cmd->r2t_lock);
+ if (!r2t->sent_r2t) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return 0;
+ }
+ r2t->sent_r2t = 0;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+ return 0;
+}
+
+static int iscsit_handle_r2t_snack(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u32 begrun,
+ u32 runlength)
+{
+ u32 last_r2tsn;
+ struct iscsi_r2t *r2t;
+
+ /*
+ * Make sure the initiator is not requesting retransmission
+ * of R2TSNs already acknowledged by a TMR TASK_REASSIGN.
+ */
+ if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+ (begrun <= cmd->acked_data_sn)) {
+ pr_err("ITT: 0x%08x, R2T SNACK requesting"
+ " retransmission of R2TSN: 0x%08x to 0x%08x but already"
+ " acked to R2TSN: 0x%08x by TMR TASK_REASSIGN,"
+ " protocol error.\n", cmd->init_task_tag, begrun,
+ (begrun + runlength), cmd->acked_data_sn);
+
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+
+ if (runlength) {
+ if ((begrun + runlength) > cmd->r2t_sn) {
+ pr_err("Command ITT: 0x%08x received R2T SNACK"
+ " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
+ " current R2TSN: 0x%08x, protocol error.\n",
+ cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
+ }
+ last_r2tsn = (begrun + runlength);
+ } else
+ last_r2tsn = cmd->r2t_sn;
+
+ while (begrun < last_r2tsn) {
+ r2t = iscsit_get_holder_for_r2tsn(cmd, begrun);
+ if (!r2t)
+ return -1;
+ if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0)
+ return -1;
+
+ begrun++;
+ }
+
+ return 0;
+}
+
+/*
+ * Generates Offsets and NextBurstLength based on Begrun and Runlength
+ * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ * For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only.
+ *
+ * FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr)
+{
+ u32 data_sn = 0, data_sn_count = 0;
+ u32 pdu_start = 0, seq_no = 0;
+ u32 begrun = dr->begrun;
+ struct iscsi_conn *conn = cmd->conn;
+
+ while (begrun > data_sn++) {
+ data_sn_count++;
+ if ((dr->next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ dr->read_data_done +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ dr->next_burst_len +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ } else {
+ dr->read_data_done +=
+ (conn->sess->sess_ops->MaxBurstLength -
+ dr->next_burst_len);
+ dr->next_burst_len = 0;
+ pdu_start += data_sn_count;
+ data_sn_count = 0;
+ seq_no++;
+ }
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ cmd->seq_no = seq_no;
+ cmd->pdu_start = pdu_start;
+ cmd->pdu_send_order = data_sn_count;
+ }
+
+ return 0;
+}
+
+/*
+ * Generates Offsets and NextBurstLength based on Begrun and Runlength
+ * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ * For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only.
+ *
+ * FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr)
+{
+ int found_seq = 0, i;
+ u32 data_sn, read_data_done = 0, seq_send_order = 0;
+ u32 begrun = dr->begrun;
+ u32 runlength = dr->runlength;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_seq *first_seq = NULL, *seq = NULL;
+
+ if (!cmd->seq_list) {
+ pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ return -1;
+ }
+
+ /*
+ * Calculate read_data_done for all sequences containing a
+ * first_datasn and last_datasn less than the BegRun.
+ *
+ * Locate the struct iscsi_seq the BegRun lies within and calculate
+ * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength.
+ *
+ * Also use struct iscsi_seq->seq_send_order to determine where to start.
+ */
+ for (i = 0; i < cmd->seq_count; i++) {
+ seq = &cmd->seq_list[i];
+
+ if (!seq->seq_send_order)
+ first_seq = seq;
+
+ /*
+ * No data has been transferred for this DataIN sequence, so the
+ * seq->first_datasn and seq->last_datasn have not been set.
+ */
+ if (!seq->sent) {
+#if 0
+ pr_err("Ignoring non-sent sequence 0x%08x ->"
+ " 0x%08x\n\n", seq->first_datasn,
+ seq->last_datasn);
+#endif
+ continue;
+ }
+
+ /*
+ * This DataIN sequence is precedes the received BegRun, add the
+ * total xfer_len of the sequence to read_data_done and reset
+ * seq->pdu_send_order.
+ */
+ if ((seq->first_datasn < begrun) &&
+ (seq->last_datasn < begrun)) {
+#if 0
+ pr_err("Pre BegRun sequence 0x%08x ->"
+ " 0x%08x\n", seq->first_datasn,
+ seq->last_datasn);
+#endif
+ read_data_done += cmd->seq_list[i].xfer_len;
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ continue;
+ }
+
+ /*
+ * The BegRun lies within this DataIN sequence.
+ */
+ if ((seq->first_datasn <= begrun) &&
+ (seq->last_datasn >= begrun)) {
+#if 0
+ pr_err("Found sequence begrun: 0x%08x in"
+ " 0x%08x -> 0x%08x\n", begrun,
+ seq->first_datasn, seq->last_datasn);
+#endif
+ seq_send_order = seq->seq_send_order;
+ data_sn = seq->first_datasn;
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ found_seq = 1;
+
+ /*
+ * For DataPDUInOrder=Yes, while the first DataSN of
+ * the sequence is less than the received BegRun, add
+ * the MaxRecvDataSegmentLength to read_data_done and
+ * to the sequence's next_burst_len;
+ *
+ * For DataPDUInOrder=No, while the first DataSN of the
+ * sequence is less than the received BegRun, find the
+ * struct iscsi_pdu of the DataSN in question and add the
+ * MaxRecvDataSegmentLength to read_data_done and to the
+ * sequence's next_burst_len;
+ */
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ while (data_sn < begrun) {
+ seq->pdu_send_order++;
+ read_data_done +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ seq->next_burst_len +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ data_sn++;
+ }
+ } else {
+ int j;
+ struct iscsi_pdu *pdu;
+
+ while (data_sn < begrun) {
+ seq->pdu_send_order++;
+
+ for (j = 0; j < seq->pdu_count; j++) {
+ pdu = &cmd->pdu_list[
+ seq->pdu_start + j];
+ if (pdu->data_sn == data_sn) {
+ read_data_done +=
+ pdu->length;
+ seq->next_burst_len +=
+ pdu->length;
+ }
+ }
+ data_sn++;
+ }
+ }
+ continue;
+ }
+
+ /*
+ * This DataIN sequence is larger than the received BegRun,
+ * reset seq->pdu_send_order and continue.
+ */
+ if ((seq->first_datasn > begrun) ||
+ (seq->last_datasn > begrun)) {
+#if 0
+ pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
+ seq->first_datasn, seq->last_datasn);
+#endif
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ continue;
+ }
+ }
+
+ if (!found_seq) {
+ if (!begrun) {
+ if (!first_seq) {
+ pr_err("ITT: 0x%08x, Begrun: 0x%08x"
+ " but first_seq is NULL\n",
+ cmd->init_task_tag, begrun);
+ return -1;
+ }
+ seq_send_order = first_seq->seq_send_order;
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ goto done;
+ }
+
+ pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x,"
+ " BegRun: 0x%08x, RunLength: 0x%08x while"
+ " DataSequenceInOrder=No and DataPDUInOrder=%s.\n",
+ cmd->init_task_tag, begrun, runlength,
+ (conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No");
+ return -1;
+ }
+
+done:
+ dr->read_data_done = read_data_done;
+ dr->seq_send_order = seq_send_order;
+
+ return 0;
+}
+
+static int iscsit_handle_recovery_datain(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (!atomic_read(&se_cmd->t_transport_complete)) {
+ pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
+ cmd->init_task_tag);
+ return 0;
+ }
+
+ /*
+ * Make sure the initiator is not requesting retransmission
+ * of DataSNs already acknowledged by a Data ACK SNACK.
+ */
+ if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+ (begrun <= cmd->acked_data_sn)) {
+ pr_err("ITT: 0x%08x, Data SNACK requesting"
+ " retransmission of DataSN: 0x%08x to 0x%08x but"
+ " already acked to DataSN: 0x%08x by Data ACK SNACK,"
+ " protocol error.\n", cmd->init_task_tag, begrun,
+ (begrun + runlength), cmd->acked_data_sn);
+
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+
+ /*
+ * Make sure BegRun and RunLength in the Data SNACK are sane.
+ * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent.
+ */
+ if ((begrun + runlength) > (cmd->data_sn - 1)) {
+ pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
+ ": 0x%08x greater than maximum DataSN: 0x%08x.\n",
+ begrun, runlength, (cmd->data_sn - 1));
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+ 1, 0, buf, cmd);
+ }
+
+ dr = iscsit_allocate_datain_req();
+ if (!dr)
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 0, buf, cmd);
+
+ dr->data_sn = dr->begrun = begrun;
+ dr->runlength = runlength;
+ dr->generate_recovery_values = 1;
+ dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY;
+
+ iscsit_attach_datain_req(cmd, dr);
+
+ cmd->i_state = ISTATE_SEND_DATAIN;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+int iscsit_handle_recovery_datain_or_r2t(
+ struct iscsi_conn *conn,
+ unsigned char *buf,
+ u32 init_task_tag,
+ u32 targ_xfer_tag,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_cmd *cmd;
+
+ cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
+ if (!cmd)
+ return 0;
+
+ /*
+ * FIXME: This will not work for bidi commands.
+ */
+ switch (cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength);
+ case DMA_FROM_DEVICE:
+ return iscsit_handle_recovery_datain(cmd, buf, begrun,
+ runlength);
+ default:
+ pr_err("Unknown cmd->data_direction: 0x%02x\n",
+ cmd->data_direction);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
+int iscsit_handle_status_snack(
+ struct iscsi_conn *conn,
+ u32 init_task_tag,
+ u32 targ_xfer_tag,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_cmd *cmd = NULL;
+ u32 last_statsn;
+ int found_cmd;
+
+ if (conn->exp_statsn > begrun) {
+ pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
+ " 0x%08x but already got ExpStatSN: 0x%08x on CID:"
+ " %hu.\n", begrun, runlength, conn->exp_statsn,
+ conn->cid);
+ return 0;
+ }
+
+ last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength);
+
+ while (begrun < last_statsn) {
+ found_cmd = 0;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->stat_sn == begrun) {
+ found_cmd = 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!found_cmd) {
+ pr_err("Unable to find StatSN: 0x%08x for"
+ " a Status SNACK, assuming this was a"
+ " protactic SNACK for an untransmitted"
+ " StatSN, ignoring.\n", begrun);
+ begrun++;
+ continue;
+ }
+
+ spin_lock_bh(&cmd->istate_lock);
+ if (cmd->i_state == ISTATE_SEND_DATAIN) {
+ spin_unlock_bh(&cmd->istate_lock);
+ pr_err("Ignoring Status SNACK for BegRun:"
+ " 0x%08x, RunLength: 0x%08x, assuming this was"
+ " a protactic SNACK for an untransmitted"
+ " StatSN\n", begrun, runlength);
+ begrun++;
+ continue;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ cmd->i_state = ISTATE_SEND_STATUS_RECOVERY;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ begrun++;
+ }
+
+ return 0;
+}
+
+int iscsit_handle_data_ack(
+ struct iscsi_conn *conn,
+ u32 targ_xfer_tag,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_cmd *cmd = NULL;
+
+ cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
+ if (!cmd) {
+ pr_err("Data ACK SNACK for TTT: 0x%08x is"
+ " invalid.\n", targ_xfer_tag);
+ return -1;
+ }
+
+ if (begrun <= cmd->acked_data_sn) {
+ pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is"
+ " less than the already acked DataSN: 0x%08x.\n",
+ cmd->init_task_tag, begrun, cmd->acked_data_sn);
+ return -1;
+ }
+
+ /*
+ * For Data ACK SNACK, BegRun is the next expected DataSN.
+ * (see iSCSI v19: 10.16.6)
+ */
+ cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = (begrun - 1);
+
+ pr_debug("Received Data ACK SNACK for ITT: 0x%08x,"
+ " updated acked DataSN to 0x%08x.\n",
+ cmd->init_task_tag, cmd->acked_data_sn);
+
+ return 0;
+}
+
+static int iscsit_send_recovery_r2t(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 xfer_len)
+{
+ int ret;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0);
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return ret;
+}
+
+int iscsit_dataout_datapduinorder_no_fbit(
+ struct iscsi_cmd *cmd,
+ struct iscsi_pdu *pdu)
+{
+ int i, send_recovery_r2t = 0, recovery = 0;
+ u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *first_pdu = NULL;
+
+ /*
+ * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count
+ * of the DataOUT sequence.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ for (i = 0; i < cmd->pdu_count; i++) {
+ if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
+ if (!first_pdu)
+ first_pdu = &cmd->pdu_list[i];
+ xfer_len += cmd->pdu_list[i].length;
+ pdu_count++;
+ } else if (pdu_count)
+ break;
+ }
+ } else {
+ struct iscsi_seq *seq = cmd->seq_ptr;
+
+ first_pdu = &cmd->pdu_list[seq->pdu_start];
+ pdu_count = seq->pdu_count;
+ }
+
+ if (!first_pdu || !pdu_count)
+ return DATAOUT_CANNOT_RECOVER;
+
+ /*
+ * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu.
+ * The following ugly logic does batching of not received PDUs.
+ */
+ for (i = 0; i < pdu_count; i++) {
+ if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) {
+ if (!send_recovery_r2t)
+ continue;
+
+ if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ send_recovery_r2t = length = offset = 0;
+ continue;
+ }
+ /*
+ * Set recovery = 1 for any missing, CRC failed, or timed
+ * out PDUs to let the DataOUT logic know that this sequence
+ * has not been completed yet.
+ *
+ * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED.
+ * We assume if the PDU either failed CRC or timed out
+ * that a Recovery R2T has already been sent.
+ */
+ recovery = 1;
+
+ if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED)
+ continue;
+
+ if (!offset)
+ offset = first_pdu[i].offset;
+ length += first_pdu[i].length;
+
+ send_recovery_r2t = 1;
+ }
+
+ if (send_recovery_r2t)
+ if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static int iscsit_recalculate_dataout_values(
+ struct iscsi_cmd *cmd,
+ u32 pdu_offset,
+ u32 pdu_length,
+ u32 *r2t_offset,
+ u32 *r2t_length)
+{
+ int i;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = NULL;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ cmd->data_sn = 0;
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ *r2t_offset = cmd->write_data_done;
+ *r2t_length = (cmd->seq_end_offset -
+ cmd->write_data_done);
+ return 0;
+ }
+
+ *r2t_offset = cmd->seq_start_offset;
+ *r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset);
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ if ((pdu->offset >= cmd->seq_start_offset) &&
+ ((pdu->offset + pdu->length) <=
+ cmd->seq_end_offset)) {
+ if (!cmd->unsolicited_data)
+ cmd->next_burst_len -= pdu->length;
+ else
+ cmd->first_burst_len -= pdu->length;
+
+ cmd->write_data_done -= pdu->length;
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+ } else {
+ struct iscsi_seq *seq = NULL;
+
+ seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length);
+ if (!seq)
+ return -1;
+
+ *r2t_offset = seq->orig_offset;
+ *r2t_length = seq->xfer_len;
+
+ cmd->write_data_done -= (seq->offset - seq->orig_offset);
+ if (cmd->immediate_data)
+ cmd->first_burst_len = cmd->write_data_done;
+
+ seq->data_sn = 0;
+ seq->offset = seq->orig_offset;
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ return 0;
+
+ for (i = 0; i < seq->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+
+ return 0;
+}
+
+int iscsit_recover_dataout_sequence(
+ struct iscsi_cmd *cmd,
+ u32 pdu_offset,
+ u32 pdu_length)
+{
+ u32 r2t_length = 0, r2t_offset = 0;
+
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+ &r2t_offset, &r2t_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length);
+
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
+{
+ struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL;
+
+ ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC);
+ if (!ooo_cmdsn) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_ooo_cmdsn.\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&ooo_cmdsn->ooo_list);
+
+ return ooo_cmdsn;
+}
+
+/*
+ * Called with sess->cmdsn_mutex held.
+ */
+static int iscsit_attach_ooo_cmdsn(
+ struct iscsi_session *sess,
+ struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+ struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
+ /*
+ * We attach the struct iscsi_ooo_cmdsn entry to the out of order
+ * list in increasing CmdSN order.
+ * This allows iscsi_execute_ooo_cmdsns() to detect any
+ * additional CmdSN holes while performing delayed execution.
+ */
+ if (list_empty(&sess->sess_ooo_cmdsn_list))
+ list_add_tail(&ooo_cmdsn->ooo_list,
+ &sess->sess_ooo_cmdsn_list);
+ else {
+ ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+ typeof(*ooo_tail), ooo_list);
+ /*
+ * CmdSN is greater than the tail of the list.
+ */
+ if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
+ list_add_tail(&ooo_cmdsn->ooo_list,
+ &sess->sess_ooo_cmdsn_list);
+ else {
+ /*
+ * CmdSN is either lower than the head, or somewhere
+ * in the middle.
+ */
+ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
+ ooo_list) {
+ while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
+ continue;
+
+ list_add(&ooo_cmdsn->ooo_list,
+ &ooo_tmp->ooo_list);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Removes an struct iscsi_ooo_cmdsn from a session's list,
+ * called with struct iscsi_session->cmdsn_mutex held.
+ */
+void iscsit_remove_ooo_cmdsn(
+ struct iscsi_session *sess,
+ struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+ list_del(&ooo_cmdsn->ooo_list);
+ kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+}
+
+void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_ooo_cmdsn *ooo_cmdsn;
+ struct iscsi_session *sess = conn->sess;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
+ if (ooo_cmdsn->cid != conn->cid)
+ continue;
+
+ ooo_cmdsn->cmd = NULL;
+ }
+ mutex_unlock(&sess->cmdsn_mutex);
+}
+
+/*
+ * Called with sess->cmdsn_mutex held.
+ */
+int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
+{
+ int ooo_count = 0;
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+ list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+ &sess->sess_ooo_cmdsn_list, ooo_list) {
+ if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
+ continue;
+
+ if (!ooo_cmdsn->cmd) {
+ sess->exp_cmd_sn++;
+ iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+ continue;
+ }
+
+ cmd = ooo_cmdsn->cmd;
+ cmd->i_state = cmd->deferred_i_state;
+ ooo_count++;
+ sess->exp_cmd_sn++;
+ pr_debug("Executing out of order CmdSN: 0x%08x,"
+ " incremented ExpCmdSN to 0x%08x.\n",
+ cmd->cmd_sn, sess->exp_cmd_sn);
+
+ iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+
+ if (iscsit_execute_cmd(cmd, 1) < 0)
+ return -1;
+
+ continue;
+ }
+
+ return ooo_count;
+}
+
+/*
+ * Called either:
+ *
+ * 1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns()
+ * or iscsi_check_received_cmdsn().
+ * 2. With no locks held directly from iscsi_handle_XXX_pdu() functions
+ * for immediate commands.
+ */
+int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ int lr = 0;
+
+ spin_lock_bh(&cmd->istate_lock);
+ if (ooo)
+ cmd->cmd_flags &= ~ICF_OOO_CMDSN;
+
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ /*
+ * Go ahead and send the CHECK_CONDITION status for
+ * any SCSI CDB exceptions that may have occurred, also
+ * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
+ */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+ if (se_cmd->se_cmd_flags &
+ SCF_SCSI_RESERVATION_CONFLICT) {
+ cmd->i_state = ISTATE_SEND_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+ cmd->i_state);
+ return 0;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+ /*
+ * Determine if delayed TASK_ABORTED status for WRITEs
+ * should be sent now if no unsolicited data out
+ * payloads are expected, or if the delayed status
+ * should be sent after unsolicited data out with
+ * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
+ */
+ if (transport_check_aborted_status(se_cmd,
+ (cmd->unsolicited_data == 0)) != 0)
+ return 0;
+ /*
+ * Otherwise send CHECK_CONDITION and sense for
+ * exception
+ */
+ return transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->scsi_sense_reason, 0);
+ }
+ /*
+ * Special case for delayed CmdSN with Immediate
+ * Data and/or Unsolicited Data Out attached.
+ */
+ if (cmd->immediate_data) {
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+ spin_unlock_bh(&cmd->istate_lock);
+ return transport_generic_handle_data(
+ &cmd->se_cmd);
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (!(cmd->cmd_flags &
+ ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+ /*
+ * Send the delayed TASK_ABORTED status for
+ * WRITEs if no more unsolicitied data is
+ * expected.
+ */
+ if (transport_check_aborted_status(se_cmd, 1)
+ != 0)
+ return 0;
+
+ iscsit_set_dataout_sequence_values(cmd);
+ iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
+ }
+ return 0;
+ }
+ /*
+ * The default handler.
+ */
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if ((cmd->data_direction == DMA_TO_DEVICE) &&
+ !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+ /*
+ * Send the delayed TASK_ABORTED status for WRITEs if
+ * no more nsolicitied data is expected.
+ */
+ if (transport_check_aborted_status(se_cmd, 1) != 0)
+ return 0;
+
+ iscsit_set_dataout_sequence_values(cmd);
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ }
+ return transport_handle_cdb_direct(&cmd->se_cmd);
+
+ case ISCSI_OP_NOOP_OUT:
+ case ISCSI_OP_TEXT:
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+ cmd->i_state);
+ return 0;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ return transport_generic_handle_tmr(&cmd->se_cmd);
+ case ISCSI_OP_LOGOUT:
+ spin_unlock_bh(&cmd->istate_lock);
+ switch (cmd->logout_reason) {
+ case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+ lr = iscsit_logout_closesession(cmd, cmd->conn);
+ break;
+ case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+ lr = iscsit_logout_closeconnection(cmd, cmd->conn);
+ break;
+ case ISCSI_LOGOUT_REASON_RECOVERY:
+ lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn);
+ break;
+ default:
+ pr_err("Unknown iSCSI Logout Request Code:"
+ " 0x%02x\n", cmd->logout_reason);
+ return -1;
+ }
+
+ return lr;
+ default:
+ spin_unlock_bh(&cmd->istate_lock);
+ pr_err("Cannot perform out of order execution for"
+ " unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode);
+ return -1;
+ }
+
+ return 0;
+}
+
+void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
+{
+ struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+ &sess->sess_ooo_cmdsn_list, ooo_list) {
+
+ list_del(&ooo_cmdsn->ooo_list);
+ kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+ }
+ mutex_unlock(&sess->cmdsn_mutex);
+}
+
+int iscsit_handle_ooo_cmdsn(
+ struct iscsi_session *sess,
+ struct iscsi_cmd *cmd,
+ u32 cmdsn)
+{
+ int batch = 0;
+ struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL;
+
+ cmd->deferred_i_state = cmd->i_state;
+ cmd->i_state = ISTATE_DEFERRED_CMD;
+ cmd->cmd_flags |= ICF_OOO_CMDSN;
+
+ if (list_empty(&sess->sess_ooo_cmdsn_list))
+ batch = 1;
+ else {
+ ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+ typeof(*ooo_tail), ooo_list);
+ if (ooo_tail->cmdsn != (cmdsn - 1))
+ batch = 1;
+ }
+
+ ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
+ if (!ooo_cmdsn)
+ return CMDSN_ERROR_CANNOT_RECOVER;
+
+ ooo_cmdsn->cmd = cmd;
+ ooo_cmdsn->batch_count = (batch) ?
+ (cmdsn - sess->exp_cmd_sn) : 1;
+ ooo_cmdsn->cid = cmd->conn->cid;
+ ooo_cmdsn->exp_cmdsn = sess->exp_cmd_sn;
+ ooo_cmdsn->cmdsn = cmdsn;
+
+ if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
+ kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+ return CMDSN_ERROR_CANNOT_RECOVER;
+ }
+
+ return CMDSN_HIGHER_THAN_EXP;
+}
+
+static int iscsit_set_dataout_timeout_values(
+ struct iscsi_cmd *cmd,
+ u32 *offset,
+ u32 *length)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_r2t *r2t;
+
+ if (cmd->unsolicited_data) {
+ *offset = 0;
+ *length = (conn->sess->sess_ops->FirstBurstLength >
+ cmd->data_length) ?
+ cmd->data_length :
+ conn->sess->sess_ops->FirstBurstLength;
+ return 0;
+ }
+
+ spin_lock_bh(&cmd->r2t_lock);
+ if (list_empty(&cmd->cmd_r2t_list)) {
+ pr_err("cmd->cmd_r2t_list is empty!\n");
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) {
+ *offset = r2t->offset;
+ *length = r2t->xfer_len;
+ spin_unlock_bh(&cmd->r2t_lock);
+ return 0;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ pr_err("Unable to locate any incomplete DataOUT"
+ " sequences for ITT: 0x%08x.\n", cmd->init_task_tag);
+
+ return -1;
+}
+
+/*
+ * NOTE: Called from interrupt (timer) context.
+ */
+static void iscsit_handle_dataout_timeout(unsigned long data)
+{
+ u32 pdu_length = 0, pdu_offset = 0;
+ u32 r2t_length = 0, r2t_offset = 0;
+ struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_session *sess = NULL;
+ struct iscsi_node_attrib *na;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ if (cmd->dataout_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_dec_conn_usage_count(conn);
+ return;
+ }
+ cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+ sess = conn->sess;
+ na = iscsit_tpg_get_node_attrib(sess);
+
+ if (!sess->sess_ops->ErrorRecoveryLevel) {
+ pr_debug("Unable to recover from DataOut timeout while"
+ " in ERL=0.\n");
+ goto failure;
+ }
+
+ if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
+ pr_debug("Command ITT: 0x%08x exceeded max retries"
+ " for DataOUT timeout %u, closing iSCSI connection.\n",
+ cmd->init_task_tag, na->dataout_timeout_retries);
+ goto failure;
+ }
+
+ cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ pdu_offset = cmd->write_data_done;
+ if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len)) > cmd->data_length)
+ pdu_length = (cmd->data_length -
+ cmd->write_data_done);
+ else
+ pdu_length = (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len);
+ } else {
+ pdu_offset = cmd->seq_start_offset;
+ pdu_length = (cmd->seq_end_offset -
+ cmd->seq_start_offset);
+ }
+ } else {
+ if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset,
+ &pdu_length) < 0)
+ goto failure;
+ }
+
+ if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+ &r2t_offset, &r2t_length) < 0)
+ goto failure;
+
+ pr_debug("Command ITT: 0x%08x timed out waiting for"
+ " completion of %sDataOUT Sequence Offset: %u, Length: %u\n",
+ cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " :
+ "", r2t_offset, r2t_length);
+
+ if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0)
+ goto failure;
+
+ iscsit_start_dataout_timer(cmd, conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_dec_conn_usage_count(conn);
+
+ return;
+
+failure:
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_cause_connection_reinstatement(conn, 0);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ return;
+ }
+
+ mod_timer(&cmd->dataout_timer,
+ (get_jiffies_64() + na->dataout_timeout * HZ));
+ pr_debug("Updated DataOUT timer for ITT: 0x%08x",
+ cmd->init_task_tag);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
+
+/*
+ * Called with cmd->dataout_timeout_lock held.
+ */
+void iscsit_start_dataout_timer(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+
+ if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
+ return;
+
+ pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
+ " CID: %hu.\n", cmd->init_task_tag, conn->cid);
+
+ init_timer(&cmd->dataout_timer);
+ cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
+ cmd->dataout_timer.data = (unsigned long)cmd;
+ cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
+ cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
+ cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&cmd->dataout_timer);
+}
+
+void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
+{
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ return;
+ }
+ cmd->dataout_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+
+ del_timer_sync(&cmd->dataout_timer);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+ pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n",
+ cmd->init_task_tag);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
new file mode 100644
index 00000000000..85e67e29de6
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -0,0 +1,26 @@
+#ifndef ISCSI_TARGET_ERL1_H
+#define ISCSI_TARGET_ERL1_H
+
+extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
+ u32, u32, u32, u32);
+extern int iscsit_handle_status_snack(struct iscsi_conn *, u32, u32,
+ u32, u32);
+extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
+extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
+extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
+extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
+extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
+extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
+extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
+extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
+extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+
+#endif /* ISCSI_TARGET_ERL1_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
new file mode 100644
index 00000000000..91a4d170bda
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -0,0 +1,474 @@
+/*******************************************************************************
+ * This file contains error recovery level two functions used by
+ * the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+/*
+ * FIXME: Does RData SNACK apply here as well?
+ */
+void iscsit_create_conn_recovery_datain_values(
+ struct iscsi_cmd *cmd,
+ u32 exp_data_sn)
+{
+ u32 data_sn = 0;
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->next_burst_len = 0;
+ cmd->read_data_done = 0;
+
+ while (exp_data_sn > data_sn) {
+ if ((cmd->next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ cmd->read_data_done +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ cmd->next_burst_len +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ } else {
+ cmd->read_data_done +=
+ (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len);
+ cmd->next_burst_len = 0;
+ }
+ data_sn++;
+ }
+}
+
+void iscsit_create_conn_recovery_dataout_values(
+ struct iscsi_cmd *cmd)
+{
+ u32 write_data_done = 0;
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->data_sn = 0;
+ cmd->next_burst_len = 0;
+
+ while (cmd->write_data_done > write_data_done) {
+ if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
+ cmd->write_data_done)
+ write_data_done += conn->sess->sess_ops->MaxBurstLength;
+ else
+ break;
+ }
+
+ cmd->write_data_done = write_data_done;
+}
+
+static int iscsit_attach_active_connection_recovery_entry(
+ struct iscsi_session *sess,
+ struct iscsi_conn_recovery *cr)
+{
+ spin_lock(&sess->cr_a_lock);
+ list_add_tail(&cr->cr_list, &sess->cr_active_list);
+ spin_unlock(&sess->cr_a_lock);
+
+ return 0;
+}
+
+static int iscsit_attach_inactive_connection_recovery_entry(
+ struct iscsi_session *sess,
+ struct iscsi_conn_recovery *cr)
+{
+ spin_lock(&sess->cr_i_lock);
+ list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
+
+ sess->conn_recovery_count++;
+ pr_debug("Incremented connection recovery count to %u for"
+ " SID: %u\n", sess->conn_recovery_count, sess->sid);
+ spin_unlock(&sess->cr_i_lock);
+
+ return 0;
+}
+
+struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+ struct iscsi_session *sess,
+ u16 cid)
+{
+ struct iscsi_conn_recovery *cr;
+
+ spin_lock(&sess->cr_i_lock);
+ list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+ if (cr->cid == cid) {
+ spin_unlock(&sess->cr_i_lock);
+ return cr;
+ }
+ }
+ spin_unlock(&sess->cr_i_lock);
+
+ return NULL;
+}
+
+void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+{
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_conn_recovery *cr, *cr_tmp;
+
+ spin_lock(&sess->cr_a_lock);
+ list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
+ list_del(&cr->cr_list);
+ spin_unlock(&sess->cr_a_lock);
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_list) {
+
+ list_del(&cmd->i_list);
+ cmd->conn = NULL;
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_lock(&sess->cr_a_lock);
+
+ kfree(cr);
+ }
+ spin_unlock(&sess->cr_a_lock);
+
+ spin_lock(&sess->cr_i_lock);
+ list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
+ list_del(&cr->cr_list);
+ spin_unlock(&sess->cr_i_lock);
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_list) {
+
+ list_del(&cmd->i_list);
+ cmd->conn = NULL;
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_lock(&sess->cr_i_lock);
+
+ kfree(cr);
+ }
+ spin_unlock(&sess->cr_i_lock);
+}
+
+int iscsit_remove_active_connection_recovery_entry(
+ struct iscsi_conn_recovery *cr,
+ struct iscsi_session *sess)
+{
+ spin_lock(&sess->cr_a_lock);
+ list_del(&cr->cr_list);
+
+ sess->conn_recovery_count--;
+ pr_debug("Decremented connection recovery count to %u for"
+ " SID: %u\n", sess->conn_recovery_count, sess->sid);
+ spin_unlock(&sess->cr_a_lock);
+
+ kfree(cr);
+
+ return 0;
+}
+
+int iscsit_remove_inactive_connection_recovery_entry(
+ struct iscsi_conn_recovery *cr,
+ struct iscsi_session *sess)
+{
+ spin_lock(&sess->cr_i_lock);
+ list_del(&cr->cr_list);
+ spin_unlock(&sess->cr_i_lock);
+
+ return 0;
+}
+
+/*
+ * Called with cr->conn_recovery_cmd_lock help.
+ */
+int iscsit_remove_cmd_from_connection_recovery(
+ struct iscsi_cmd *cmd,
+ struct iscsi_session *sess)
+{
+ struct iscsi_conn_recovery *cr;
+
+ if (!cmd->cr) {
+ pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+ " is NULL!\n", cmd->init_task_tag);
+ BUG();
+ }
+ cr = cmd->cr;
+
+ list_del(&cmd->i_list);
+ return --cr->cmd_count;
+}
+
+void iscsit_discard_cr_cmds_by_expstatsn(
+ struct iscsi_conn_recovery *cr,
+ u32 exp_statsn)
+{
+ u32 dropped_count = 0;
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_session *sess = cr->sess;
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_list) {
+
+ if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
+ (cmd->deferred_i_state != ISTATE_REMOVE)) ||
+ (cmd->stat_sn >= exp_statsn)) {
+ continue;
+ }
+
+ dropped_count++;
+ pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
+ " 0x%08x, CID: %hu.\n", cmd->init_task_tag,
+ cmd->stat_sn, cr->cid);
+
+ iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 0);
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+
+ pr_debug("Dropped %u total acknowledged commands on"
+ " CID: %hu less than old ExpStatSN: 0x%08x\n",
+ dropped_count, cr->cid, exp_statsn);
+
+ if (!cr->cmd_count) {
+ pr_debug("No commands to be reassigned for failed"
+ " connection CID: %hu on SID: %u\n",
+ cr->cid, sess->sid);
+ iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+ iscsit_attach_active_connection_recovery_entry(sess, cr);
+ pr_debug("iSCSI connection recovery successful for CID:"
+ " %hu on SID: %u\n", cr->cid, sess->sid);
+ iscsit_remove_active_connection_recovery_entry(cr, sess);
+ } else {
+ iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+ iscsit_attach_active_connection_recovery_entry(sess, cr);
+ }
+}
+
+int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+ u32 dropped_count = 0;
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+ struct iscsi_session *sess = conn->sess;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+ &sess->sess_ooo_cmdsn_list, ooo_list) {
+
+ if (ooo_cmdsn->cid != conn->cid)
+ continue;
+
+ dropped_count++;
+ pr_debug("Dropping unacknowledged CmdSN:"
+ " 0x%08x during connection recovery on CID: %hu\n",
+ ooo_cmdsn->cmdsn, conn->cid);
+ iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+ }
+ mutex_unlock(&sess->cmdsn_mutex);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+ if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
+ continue;
+
+ list_del(&cmd->i_list);
+
+ spin_unlock_bh(&conn->cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock_bh(&conn->cmd_lock);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_debug("Dropped %u total unacknowledged commands on CID:"
+ " %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
+ sess->exp_cmd_sn);
+ return 0;
+}
+
+int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+{
+ u32 cmd_count = 0;
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_conn_recovery *cr;
+
+ /*
+ * Allocate an struct iscsi_conn_recovery for this connection.
+ * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
+ * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
+ * connection's command list for connection recovery.
+ */
+ cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
+ if (!cr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_conn_recovery.\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&cr->cr_list);
+ INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
+ spin_lock_init(&cr->conn_recovery_cmd_lock);
+ /*
+ * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
+ * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
+ * list_del(&cmd->i_list); to release the command to the
+ * session pool and remove it from the connection's list.
+ *
+ * Also stop the DataOUT timer, which will be restarted after
+ * sending the TMR response.
+ */
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+
+ if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
+ (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
+ pr_debug("Not performing realligence on"
+ " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
+ " CID: %hu\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, cmd->cmd_sn, conn->cid);
+
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 0);
+ spin_lock_bh(&conn->cmd_lock);
+ continue;
+ }
+
+ /*
+ * Special case where commands greater than or equal to
+ * the session's ExpCmdSN are attached to the connection
+ * list but not to the out of order CmdSN list. The one
+ * obvious case is when a command with immediate data
+ * attached must only check the CmdSN against ExpCmdSN
+ * after the data is received. The special case below
+ * is when the connection fails before data is received,
+ * but also may apply to other PDUs, so it has been
+ * made generic here.
+ */
+ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
+ (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock_bh(&conn->cmd_lock);
+ continue;
+ }
+
+ cmd_count++;
+ pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
+ " realligence.\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
+ conn->cid);
+
+ cmd->deferred_i_state = cmd->i_state;
+ cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
+
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+
+ cmd->sess = conn->sess;
+
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_free_all_datain_reqs(cmd);
+
+ if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
+ cmd->se_cmd.transport_wait_for_tasks)
+ cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd,
+ 0, 0);
+ /*
+ * Add the struct iscsi_cmd to the connection recovery cmd list
+ */
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+
+ spin_lock_bh(&conn->cmd_lock);
+ cmd->cr = cr;
+ cmd->conn = NULL;
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+ /*
+ * Fill in the various values in the preallocated struct iscsi_conn_recovery.
+ */
+ cr->cid = conn->cid;
+ cr->cmd_count = cmd_count;
+ cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
+ cr->sess = conn->sess;
+
+ iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
+
+ return 0;
+}
+
+int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
+{
+ atomic_set(&conn->connection_recovery, 1);
+
+ if (iscsit_close_connection(conn) < 0)
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
new file mode 100644
index 00000000000..22f8d24780a
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -0,0 +1,18 @@
+#ifndef ISCSI_TARGET_ERL2_H
+#define ISCSI_TARGET_ERL2_H
+
+extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, u32);
+extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
+extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+ struct iscsi_session *, u16);
+extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+extern int iscsit_remove_active_connection_recovery_entry(
+ struct iscsi_conn_recovery *, struct iscsi_session *);
+extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
+ struct iscsi_session *);
+extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
+extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
+extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_ERL2_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
new file mode 100644
index 00000000000..bcaf82f4703
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -0,0 +1,1232 @@
+/*******************************************************************************
+ * This file contains the login functions used by the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_stat.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+extern struct idr sess_idr;
+extern struct mutex auth_id_lock;
+extern spinlock_t sess_idr_lock;
+
+static int iscsi_login_init_conn(struct iscsi_conn *conn)
+{
+ INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_cmd_list);
+ INIT_LIST_HEAD(&conn->immed_queue_list);
+ INIT_LIST_HEAD(&conn->response_queue_list);
+ init_completion(&conn->conn_post_wait_comp);
+ init_completion(&conn->conn_wait_comp);
+ init_completion(&conn->conn_wait_rcfr_comp);
+ init_completion(&conn->conn_waiting_on_uc_comp);
+ init_completion(&conn->conn_logout_comp);
+ init_completion(&conn->rx_half_close_comp);
+ init_completion(&conn->tx_half_close_comp);
+ spin_lock_init(&conn->cmd_lock);
+ spin_lock_init(&conn->conn_usage_lock);
+ spin_lock_init(&conn->immed_queue_lock);
+ spin_lock_init(&conn->nopin_timer_lock);
+ spin_lock_init(&conn->response_queue_lock);
+ spin_lock_init(&conn->state_lock);
+
+ if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate conn->conn_cpumask\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
+ * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
+ */
+int iscsi_login_setup_crypto(struct iscsi_conn *conn)
+{
+ /*
+ * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
+ * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
+ * to software 1x8 byte slicing from crc32c.ko
+ */
+ conn->conn_rx_hash.flags = 0;
+ conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(conn->conn_rx_hash.tfm)) {
+ pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
+ return -ENOMEM;
+ }
+
+ conn->conn_tx_hash.flags = 0;
+ conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(conn->conn_tx_hash.tfm)) {
+ pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int iscsi_login_check_initiator_version(
+ struct iscsi_conn *conn,
+ u8 version_max,
+ u8 version_min)
+{
+ if ((version_max != 0x00) || (version_min != 0x00)) {
+ pr_err("Unsupported iSCSI IETF Pre-RFC Revision,"
+ " version Min/Max 0x%02x/0x%02x, rejecting login.\n",
+ version_min, version_max);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_NO_VERSION);
+ return -1;
+ }
+
+ return 0;
+}
+
+int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+{
+ int sessiontype;
+ struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_session *sess = NULL, *sess_p = NULL;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct se_session *se_sess, *se_sess_tmp;
+
+ initiatorname_param = iscsi_find_param_from_key(
+ INITIATORNAME, conn->param_list);
+ if (!initiatorname_param)
+ return -1;
+
+ sessiontype_param = iscsi_find_param_from_key(
+ SESSIONTYPE, conn->param_list);
+ if (!sessiontype_param)
+ return -1;
+
+ sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+ sess_list) {
+
+ sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ spin_lock(&sess_p->conn_lock);
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess_p->conn_lock);
+ continue;
+ }
+ if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
+ (!strcmp((void *)sess_p->sess_ops->InitiatorName,
+ (void *)initiatorname_param->value) &&
+ (sess_p->sess_ops->SessionType == sessiontype))) {
+ atomic_set(&sess_p->session_reinstatement, 1);
+ spin_unlock(&sess_p->conn_lock);
+ iscsit_inc_session_usage_count(sess_p);
+ iscsit_stop_time2retain_timer(sess_p);
+ sess = sess_p;
+ break;
+ }
+ spin_unlock(&sess_p->conn_lock);
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+ /*
+ * If the Time2Retain handler has expired, the session is already gone.
+ */
+ if (!sess)
+ return 0;
+
+ pr_debug("%s iSCSI Session SID %u is still active for %s,"
+ " preforming session reinstatement.\n", (sessiontype) ?
+ "Discovery" : "Normal", sess->sid,
+ sess->sess_ops->InitiatorName);
+
+ spin_lock_bh(&sess->conn_lock);
+ if (sess->session_state == TARG_SESS_STATE_FAILED) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_dec_session_usage_count(sess);
+ return iscsit_close_session(sess);
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+
+ return iscsit_close_session(sess);
+}
+
+static void iscsi_login_set_conn_values(
+ struct iscsi_session *sess,
+ struct iscsi_conn *conn,
+ u16 cid)
+{
+ conn->sess = sess;
+ conn->cid = cid;
+ /*
+ * Generate a random Status sequence number (statsn) for the new
+ * iSCSI connection.
+ */
+ get_random_bytes(&conn->stat_sn, sizeof(u32));
+
+ mutex_lock(&auth_id_lock);
+ conn->auth_id = iscsit_global->auth_id++;
+ mutex_unlock(&auth_id_lock);
+}
+
+/*
+ * This is the leading connection of a new session,
+ * or session reinstatement.
+ */
+static int iscsi_login_zero_tsih_s1(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_session *sess = NULL;
+ struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+ sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
+ if (!sess) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ pr_err("Could not allocate memory for session\n");
+ return -1;
+ }
+
+ iscsi_login_set_conn_values(sess, conn, pdu->cid);
+ sess->init_task_tag = pdu->itt;
+ memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
+ sess->exp_cmd_sn = pdu->cmdsn;
+ INIT_LIST_HEAD(&sess->sess_conn_list);
+ INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
+ INIT_LIST_HEAD(&sess->cr_active_list);
+ INIT_LIST_HEAD(&sess->cr_inactive_list);
+ init_completion(&sess->async_msg_comp);
+ init_completion(&sess->reinstatement_comp);
+ init_completion(&sess->session_wait_comp);
+ init_completion(&sess->session_waiting_on_uc_comp);
+ mutex_init(&sess->cmdsn_mutex);
+ spin_lock_init(&sess->conn_lock);
+ spin_lock_init(&sess->cr_a_lock);
+ spin_lock_init(&sess->cr_i_lock);
+ spin_lock_init(&sess->session_usage_lock);
+ spin_lock_init(&sess->ttt_lock);
+
+ if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
+ pr_err("idr_pre_get() for sess_idr failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ spin_lock(&sess_idr_lock);
+ idr_get_new(&sess_idr, NULL, &sess->session_index);
+ spin_unlock(&sess_idr_lock);
+
+ sess->creation_time = get_jiffies_64();
+ spin_lock_init(&sess->session_stats_lock);
+ /*
+ * The FFP CmdSN window values will be allocated from the TPG's
+ * Initiator Node's ACL once the login has been successfully completed.
+ */
+ sess->max_cmd_sn = pdu->cmdsn;
+
+ sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
+ if (!sess->sess_ops) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_sess_ops.\n");
+ return -1;
+ }
+
+ sess->se_sess = transport_init_session();
+ if (!sess->se_sess) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_login_zero_tsih_s2(
+ struct iscsi_conn *conn)
+{
+ struct iscsi_node_attrib *na;
+ struct iscsi_session *sess = conn->sess;
+ unsigned char buf[32];
+
+ sess->tpg = conn->tpg;
+
+ /*
+ * Assign a new TPG Session Handle. Note this is protected with
+ * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
+ */
+ sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ if (!sess->tsih)
+ sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+
+ /*
+ * Create the default params from user defined values..
+ */
+ if (iscsi_copy_param_list(&conn->param_list,
+ ISCSI_TPG_C(conn)->param_list, 1) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ iscsi_set_keys_to_negotiate(0, conn->param_list);
+
+ if (sess->sess_ops->SessionType)
+ return iscsi_set_keys_irrelevant_for_discovery(
+ conn->param_list);
+
+ na = iscsit_tpg_get_node_attrib(sess);
+
+ /*
+ * Need to send TargetPortalGroupTag back in first login response
+ * on any iSCSI connection where the Initiator provides TargetName.
+ * See 5.3.1. Login Phase Start
+ *
+ * In our case, we have already located the struct iscsi_tiqn at this point.
+ */
+ memset(buf, 0, 32);
+ sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ /*
+ * Workaround for Initiators that have broken connection recovery logic.
+ *
+ * "We would really like to get rid of this." Linux-iSCSI.org team
+ */
+ memset(buf, 0, 32);
+ sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Remove PSTATE_NEGOTIATE for the four FIM related keys.
+ * The Initiator node will be able to enable FIM by proposing them itself.
+ */
+int iscsi_login_disable_FIM_keys(
+ struct iscsi_param_list *param_list,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_param *param;
+
+ param = iscsi_find_param_from_key("OFMarker", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " OFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ param = iscsi_find_param_from_key("OFMarkInt", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " IFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ param = iscsi_find_param_from_key("IFMarker", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " IFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ param = iscsi_find_param_from_key("IFMarkInt", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " IFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ return 0;
+}
+
+static int iscsi_login_non_zero_tsih_s1(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+ iscsi_login_set_conn_values(NULL, conn, pdu->cid);
+ return 0;
+}
+
+/*
+ * Add a new connection to an existing session.
+ */
+static int iscsi_login_non_zero_tsih_s2(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_session *sess = NULL, *sess_p = NULL;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct se_session *se_sess, *se_sess_tmp;
+ struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+ sess_list) {
+
+ sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
+ continue;
+ if (!memcmp((const void *)sess_p->isid,
+ (const void *)pdu->isid, 6) &&
+ (sess_p->tsih == pdu->tsih)) {
+ iscsit_inc_session_usage_count(sess_p);
+ iscsit_stop_time2retain_timer(sess_p);
+ sess = sess_p;
+ break;
+ }
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ /*
+ * If the Time2Retain handler has expired, the session is already gone.
+ */
+ if (!sess) {
+ pr_err("Initiator attempting to add a connection to"
+ " a non-existent session, rejecting iSCSI Login.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_NO_SESSION);
+ return -1;
+ }
+
+ /*
+ * Stop the Time2Retain timer if this is a failed session, we restart
+ * the timer if the login is not successful.
+ */
+ spin_lock_bh(&sess->conn_lock);
+ if (sess->session_state == TARG_SESS_STATE_FAILED)
+ atomic_set(&sess->session_continuation, 1);
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsi_login_set_conn_values(sess, conn, pdu->cid);
+
+ if (iscsi_copy_param_list(&conn->param_list,
+ ISCSI_TPG_C(conn)->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ iscsi_set_keys_to_negotiate(0, conn->param_list);
+ /*
+ * Need to send TargetPortalGroupTag back in first login response
+ * on any iSCSI connection where the Initiator provides TargetName.
+ * See 5.3.1. Login Phase Start
+ *
+ * In our case, we have already located the struct iscsi_tiqn at this point.
+ */
+ memset(buf, 0, 32);
+ sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ return iscsi_login_disable_FIM_keys(conn->param_list, conn);
+}
+
+int iscsi_login_post_auth_non_zero_tsih(
+ struct iscsi_conn *conn,
+ u16 cid,
+ u32 exp_statsn)
+{
+ struct iscsi_conn *conn_ptr = NULL;
+ struct iscsi_conn_recovery *cr = NULL;
+ struct iscsi_session *sess = conn->sess;
+
+ /*
+ * By following item 5 in the login table, if we have found
+ * an existing ISID and a valid/existing TSIH and an existing
+ * CID we do connection reinstatement. Currently we dont not
+ * support it so we send back an non-zero status class to the
+ * initiator and release the new connection.
+ */
+ conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
+ if ((conn_ptr)) {
+ pr_err("Connection exists with CID %hu for %s,"
+ " performing connection reinstatement.\n",
+ conn_ptr->cid, sess->sess_ops->InitiatorName);
+
+ iscsit_connection_reinstatement_rcfr(conn_ptr);
+ iscsit_dec_conn_usage_count(conn_ptr);
+ }
+
+ /*
+ * Check for any connection recovery entires containing CID.
+ * We use the original ExpStatSN sent in the first login request
+ * to acknowledge commands for the failed connection.
+ *
+ * Also note that an explict logout may have already been sent,
+ * but the response may not be sent due to additional connection
+ * loss.
+ */
+ if (sess->sess_ops->ErrorRecoveryLevel == 2) {
+ cr = iscsit_get_inactive_connection_recovery_entry(
+ sess, cid);
+ if ((cr)) {
+ pr_debug("Performing implicit logout"
+ " for connection recovery on CID: %hu\n",
+ conn->cid);
+ iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn);
+ }
+ }
+
+ /*
+ * Else we follow item 4 from the login table in that we have
+ * found an existing ISID and a valid/existing TSIH and a new
+ * CID we go ahead and continue to add a new connection to the
+ * session.
+ */
+ pr_debug("Adding CID %hu to existing session for %s.\n",
+ cid, sess->sess_ops->InitiatorName);
+
+ if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) {
+ pr_err("Adding additional connection to this session"
+ " would exceed MaxConnections %d, login failed.\n",
+ sess->sess_ops->MaxConnections);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_ISID_ERROR);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ if (!sess->sess_ops->SessionType)
+ iscsit_start_nopin_timer(conn);
+}
+
+static int iscsi_post_login_handler(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+ u8 zero_tsih)
+{
+ int stop_timer = 0;
+ struct iscsi_session *sess = conn->sess;
+ struct se_session *se_sess = sess->se_sess;
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct iscsi_thread_set *ts;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS,
+ ISCSI_LOGIN_STATUS_ACCEPT);
+
+ pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n");
+ conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
+
+ iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
+ iscsit_set_sync_and_steering_values(conn);
+ /*
+ * SCSI Initiator -> SCSI Target Port Mapping
+ */
+ ts = iscsi_get_thread_set();
+ if (!zero_tsih) {
+ iscsi_set_session_parameters(sess->sess_ops,
+ conn->param_list, 0);
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+
+ spin_lock_bh(&sess->conn_lock);
+ atomic_set(&sess->session_continuation, 0);
+ if (sess->session_state == TARG_SESS_STATE_FAILED) {
+ pr_debug("Moving to"
+ " TARG_SESS_STATE_LOGGED_IN.\n");
+ sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+ stop_timer = 1;
+ }
+
+ pr_debug("iSCSI Login successful on CID: %hu from %s to"
+ " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip,
+ np->np_port, tpg->tpgt);
+
+ list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+ atomic_inc(&sess->nconn);
+ pr_debug("Incremented iSCSI Connection count to %hu"
+ " from node: %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsi_post_login_start_timers(conn);
+ iscsi_activate_thread_set(conn, ts);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+ */
+ iscsit_thread_get_cpumask(conn);
+ conn->conn_rx_reset_cpumask = 1;
+ conn->conn_tx_reset_cpumask = 1;
+
+ iscsit_dec_conn_usage_count(conn);
+ if (stop_timer) {
+ spin_lock_bh(&se_tpg->session_lock);
+ iscsit_stop_time2retain_timer(sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+ }
+ iscsit_dec_session_usage_count(sess);
+ return 0;
+ }
+
+ iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+
+ iscsit_determine_maxcmdsn(sess);
+
+ spin_lock_bh(&se_tpg->session_lock);
+ __transport_register_session(&sess->tpg->tpg_se_tpg,
+ se_sess->se_node_acl, se_sess, (void *)sess);
+ pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
+ sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+
+ pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
+ conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt);
+
+ spin_lock_bh(&sess->conn_lock);
+ list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+ atomic_inc(&sess->nconn);
+ pr_debug("Incremented iSCSI Connection count to %hu from node:"
+ " %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ spin_unlock_bh(&sess->conn_lock);
+
+ sess->sid = tpg->sid++;
+ if (!sess->sid)
+ sess->sid = tpg->sid++;
+ pr_debug("Established iSCSI session from node: %s\n",
+ sess->sess_ops->InitiatorName);
+
+ tpg->nsessions++;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_nsessions++;
+
+ pr_debug("Incremented number of active iSCSI sessions to %u on"
+ " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ iscsi_post_login_start_timers(conn);
+ iscsi_activate_thread_set(conn, ts);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+ */
+ iscsit_thread_get_cpumask(conn);
+ conn->conn_rx_reset_cpumask = 1;
+ conn->conn_tx_reset_cpumask = 1;
+
+ iscsit_dec_conn_usage_count(conn);
+
+ return 0;
+}
+
+static void iscsi_handle_login_thread_timeout(unsigned long data)
+{
+ struct iscsi_np *np = (struct iscsi_np *) data;
+
+ spin_lock_bh(&np->np_thread_lock);
+ pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
+ np->np_ip, np->np_port);
+
+ if (np->np_login_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return;
+ }
+
+ if (np->np_thread)
+ send_sig(SIGINT, np->np_thread, 1);
+
+ np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_start_login_thread_timer(struct iscsi_np *np)
+{
+ /*
+ * This used the TA_LOGIN_TIMEOUT constant because at this
+ * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
+ */
+ spin_lock_bh(&np->np_thread_lock);
+ init_timer(&np->np_login_timer);
+ np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
+ np->np_login_timer.data = (unsigned long)np;
+ np->np_login_timer.function = iscsi_handle_login_thread_timeout;
+ np->np_login_timer_flags &= ~ISCSI_TF_STOP;
+ np->np_login_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&np->np_login_timer);
+
+ pr_debug("Added timeout timer to iSCSI login request for"
+ " %u seconds.\n", TA_LOGIN_TIMEOUT);
+ spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
+{
+ spin_lock_bh(&np->np_thread_lock);
+ if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return;
+ }
+ np->np_login_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ del_timer_sync(&np->np_login_timer);
+
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&np->np_thread_lock);
+}
+
+int iscsi_target_setup_login_socket(
+ struct iscsi_np *np,
+ struct __kernel_sockaddr_storage *sockaddr)
+{
+ struct socket *sock;
+ int backlog = 5, ret, opt = 0, len;
+
+ switch (np->np_network_transport) {
+ case ISCSI_TCP:
+ np->np_ip_proto = IPPROTO_TCP;
+ np->np_sock_type = SOCK_STREAM;
+ break;
+ case ISCSI_SCTP_TCP:
+ np->np_ip_proto = IPPROTO_SCTP;
+ np->np_sock_type = SOCK_STREAM;
+ break;
+ case ISCSI_SCTP_UDP:
+ np->np_ip_proto = IPPROTO_SCTP;
+ np->np_sock_type = SOCK_SEQPACKET;
+ break;
+ case ISCSI_IWARP_TCP:
+ case ISCSI_IWARP_SCTP:
+ case ISCSI_INFINIBAND:
+ default:
+ pr_err("Unsupported network_transport: %d\n",
+ np->np_network_transport);
+ return -EINVAL;
+ }
+
+ ret = sock_create(sockaddr->ss_family, np->np_sock_type,
+ np->np_ip_proto, &sock);
+ if (ret < 0) {
+ pr_err("sock_create() failed.\n");
+ return ret;
+ }
+ np->np_socket = sock;
+ /*
+ * The SCTP stack needs struct socket->file.
+ */
+ if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+ (np->np_network_transport == ISCSI_SCTP_UDP)) {
+ if (!sock->file) {
+ sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
+ if (!sock->file) {
+ pr_err("Unable to allocate struct"
+ " file for SCTP\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ np->np_flags |= NPF_SCTP_STRUCT_FILE;
+ }
+ }
+ /*
+ * Setup the np->np_sockaddr from the passed sockaddr setup
+ * in iscsi_target_configfs.c code..
+ */
+ memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
+ sizeof(struct __kernel_sockaddr_storage));
+
+ if (sockaddr->ss_family == AF_INET6)
+ len = sizeof(struct sockaddr_in6);
+ else
+ len = sizeof(struct sockaddr_in);
+ /*
+ * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+ */
+ opt = 1;
+ if (np->np_network_transport == ISCSI_TCP) {
+ ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ pr_err("kernel_setsockopt() for TCP_NODELAY"
+ " failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ pr_err("kernel_setsockopt() for SO_REUSEADDR"
+ " failed\n");
+ goto fail;
+ }
+
+ ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
+ if (ret < 0) {
+ pr_err("kernel_bind() failed: %d\n", ret);
+ goto fail;
+ }
+
+ ret = kernel_listen(sock, backlog);
+ if (ret != 0) {
+ pr_err("kernel_listen() failed: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ np->np_socket = NULL;
+ if (sock) {
+ if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+ kfree(sock->file);
+ sock->file = NULL;
+ }
+
+ sock_release(sock);
+ }
+ return ret;
+}
+
+static int __iscsi_target_login_thread(struct iscsi_np *np)
+{
+ u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
+ int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
+ struct iscsi_conn *conn = NULL;
+ struct iscsi_login *login;
+ struct iscsi_portal_group *tpg = NULL;
+ struct socket *new_sock, *sock;
+ struct kvec iov;
+ struct iscsi_login_req *pdu;
+ struct sockaddr_in sock_in;
+ struct sockaddr_in6 sock_in6;
+
+ flush_signals(current);
+ set_sctp_conn_flag = 0;
+ sock = np->np_socket;
+ ip_proto = np->np_ip_proto;
+ sock_type = np->np_sock_type;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ complete(&np->np_restart_comp);
+ } else {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ if (kernel_accept(sock, &new_sock, 0) < 0) {
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ spin_unlock_bh(&np->np_thread_lock);
+ complete(&np->np_restart_comp);
+ /* Get another socket */
+ return 1;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+ goto out;
+ }
+ /*
+ * The SCTP stack needs struct socket->file.
+ */
+ if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+ (np->np_network_transport == ISCSI_SCTP_UDP)) {
+ if (!new_sock->file) {
+ new_sock->file = kzalloc(
+ sizeof(struct file), GFP_KERNEL);
+ if (!new_sock->file) {
+ pr_err("Unable to allocate struct"
+ " file for SCTP\n");
+ sock_release(new_sock);
+ /* Get another socket */
+ return 1;
+ }
+ set_sctp_conn_flag = 1;
+ }
+ }
+
+ iscsi_start_login_thread_timer(np);
+
+ conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for"
+ " new connection\n");
+ if (set_sctp_conn_flag) {
+ kfree(new_sock->file);
+ new_sock->file = NULL;
+ }
+ sock_release(new_sock);
+ /* Get another socket */
+ return 1;
+ }
+
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+ conn->sock = new_sock;
+
+ if (set_sctp_conn_flag)
+ conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
+
+ pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
+ conn->conn_state = TARG_CONN_STATE_XPT_UP;
+
+ /*
+ * Allocate conn->conn_ops early as a failure calling
+ * iscsit_tx_login_rsp() below will call tx_data().
+ */
+ conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+ if (!conn->conn_ops) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_conn_ops.\n");
+ goto new_sess_out;
+ }
+ /*
+ * Perform the remaining iSCSI connection initialization items..
+ */
+ if (iscsi_login_init_conn(conn) < 0)
+ goto new_sess_out;
+
+ memset(buffer, 0, ISCSI_HDR_LEN);
+ memset(&iov, 0, sizeof(struct kvec));
+ iov.iov_base = buffer;
+ iov.iov_len = ISCSI_HDR_LEN;
+
+ if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
+ pr_err("rx_data() returned an error.\n");
+ goto new_sess_out;
+ }
+
+ iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK);
+ if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
+ pr_err("First opcode is not login request,"
+ " failing login request.\n");
+ goto new_sess_out;
+ }
+
+ pdu = (struct iscsi_login_req *) buffer;
+ pdu->cid = be16_to_cpu(pdu->cid);
+ pdu->tsih = be16_to_cpu(pdu->tsih);
+ pdu->itt = be32_to_cpu(pdu->itt);
+ pdu->cmdsn = be32_to_cpu(pdu->cmdsn);
+ pdu->exp_statsn = be32_to_cpu(pdu->exp_statsn);
+ /*
+ * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
+ * when Status-Class != 0.
+ */
+ conn->login_itt = pdu->itt;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ pr_err("iSCSI Network Portal on %s:%hu currently not"
+ " active.\n", np->np_ip, np->np_port);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ goto new_sess_out;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ if (np->np_sockaddr.ss_family == AF_INET6) {
+ memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
+
+ if (conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in6, &err, 1) < 0) {
+ pr_err("sock_ops->getname() failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+#if 0
+ if (!iscsi_ntop6((const unsigned char *)
+ &sock_in6.sin6_addr.in6_u,
+ (char *)&conn->ipv6_login_ip[0],
+ IPV6_ADDRESS_SPACE)) {
+ pr_err("iscsi_ntop6() failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+#else
+ pr_debug("Skipping iscsi_ntop6()\n");
+#endif
+ } else {
+ memset(&sock_in, 0, sizeof(struct sockaddr_in));
+
+ if (conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in, &err, 1) < 0) {
+ pr_err("sock_ops->getname() failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+ sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
+ conn->login_port = ntohs(sock_in.sin_port);
+ }
+
+ conn->network_transport = np->np_network_transport;
+
+ pr_debug("Received iSCSI login request from %s on %s Network"
+ " Portal %s:%hu\n", conn->login_ip,
+ (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
+ np->np_ip, np->np_port);
+
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
+ conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
+
+ if (iscsi_login_check_initiator_version(conn, pdu->max_version,
+ pdu->min_version) < 0)
+ goto new_sess_out;
+
+ zero_tsih = (pdu->tsih == 0x0000);
+ if ((zero_tsih)) {
+ /*
+ * This is the leading connection of a new session.
+ * We wait until after authentication to check for
+ * session reinstatement.
+ */
+ if (iscsi_login_zero_tsih_s1(conn, buffer) < 0)
+ goto new_sess_out;
+ } else {
+ /*
+ * Add a new connection to an existing session.
+ * We check for a non-existant session in
+ * iscsi_login_non_zero_tsih_s2() below based
+ * on ISID/TSIH, but wait until after authentication
+ * to check for connection reinstatement, etc.
+ */
+ if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
+ goto new_sess_out;
+ }
+
+ /*
+ * This will process the first login request, and call
+ * iscsi_target_locate_portal(), and return a valid struct iscsi_login.
+ */
+ login = iscsi_target_init_negotiation(np, conn, buffer);
+ if (!login) {
+ tpg = conn->tpg;
+ goto new_sess_out;
+ }
+
+ tpg = conn->tpg;
+ if (!tpg) {
+ pr_err("Unable to locate struct iscsi_conn->tpg\n");
+ goto new_sess_out;
+ }
+
+ if (zero_tsih) {
+ if (iscsi_login_zero_tsih_s2(conn) < 0) {
+ iscsi_target_nego_release(login, conn);
+ goto new_sess_out;
+ }
+ } else {
+ if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) {
+ iscsi_target_nego_release(login, conn);
+ goto old_sess_out;
+ }
+ }
+
+ if (iscsi_target_start_negotiation(login, conn) < 0)
+ goto new_sess_out;
+
+ if (!conn->sess) {
+ pr_err("struct iscsi_conn session pointer is NULL!\n");
+ goto new_sess_out;
+ }
+
+ iscsi_stop_login_thread_timer(np);
+
+ if (signal_pending(current))
+ goto new_sess_out;
+
+ ret = iscsi_post_login_handler(np, conn, zero_tsih);
+
+ if (ret < 0)
+ goto new_sess_out;
+
+ iscsit_deaccess_np(np, tpg);
+ tpg = NULL;
+ /* Get another socket */
+ return 1;
+
+new_sess_out:
+ pr_err("iSCSI Login negotiation failed.\n");
+ iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ if (!zero_tsih || !conn->sess)
+ goto old_sess_out;
+ if (conn->sess->se_sess)
+ transport_free_session(conn->sess->se_sess);
+ if (conn->sess->session_index != 0) {
+ spin_lock_bh(&sess_idr_lock);
+ idr_remove(&sess_idr, conn->sess->session_index);
+ spin_unlock_bh(&sess_idr_lock);
+ }
+ if (conn->sess->sess_ops)
+ kfree(conn->sess->sess_ops);
+ if (conn->sess)
+ kfree(conn->sess);
+old_sess_out:
+ iscsi_stop_login_thread_timer(np);
+ /*
+ * If login negotiation fails check if the Time2Retain timer
+ * needs to be restarted.
+ */
+ if (!zero_tsih && conn->sess) {
+ spin_lock_bh(&conn->sess->conn_lock);
+ if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
+ struct se_portal_group *se_tpg =
+ &ISCSI_TPG_C(conn)->tpg_se_tpg;
+
+ atomic_set(&conn->sess->session_continuation, 0);
+ spin_unlock_bh(&conn->sess->conn_lock);
+ spin_lock_bh(&se_tpg->session_lock);
+ iscsit_start_time2retain_handler(conn->sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+ } else
+ spin_unlock_bh(&conn->sess->conn_lock);
+ iscsit_dec_session_usage_count(conn->sess);
+ }
+
+ if (!IS_ERR(conn->conn_rx_hash.tfm))
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ if (!IS_ERR(conn->conn_tx_hash.tfm))
+ crypto_free_hash(conn->conn_tx_hash.tfm);
+
+ if (conn->conn_cpumask)
+ free_cpumask_var(conn->conn_cpumask);
+
+ kfree(conn->conn_ops);
+
+ if (conn->param_list) {
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+ }
+ if (conn->sock) {
+ if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+ kfree(conn->sock->file);
+ conn->sock->file = NULL;
+ }
+ sock_release(conn->sock);
+ }
+ kfree(conn);
+
+ if (tpg) {
+ iscsit_deaccess_np(np, tpg);
+ tpg = NULL;
+ }
+
+out:
+ stop = kthread_should_stop();
+ if (!stop && signal_pending(current)) {
+ spin_lock_bh(&np->np_thread_lock);
+ stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
+ spin_unlock_bh(&np->np_thread_lock);
+ }
+ /* Wait for another socket.. */
+ if (!stop)
+ return 1;
+
+ iscsi_stop_login_thread_timer(np);
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_thread_state = ISCSI_NP_THREAD_EXIT;
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+}
+
+int iscsi_target_login_thread(void *arg)
+{
+ struct iscsi_np *np = (struct iscsi_np *)arg;
+ int ret;
+
+ allow_signal(SIGINT);
+
+ while (!kthread_should_stop()) {
+ ret = __iscsi_target_login_thread(np);
+ /*
+ * We break and exit here unless another sock_accept() call
+ * is expected.
+ */
+ if (ret != 1)
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
new file mode 100644
index 00000000000..091dcae2532
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -0,0 +1,12 @@
+#ifndef ISCSI_TARGET_LOGIN_H
+#define ISCSI_TARGET_LOGIN_H
+
+extern int iscsi_login_setup_crypto(struct iscsi_conn *);
+extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
+extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
+extern int iscsi_target_setup_login_socket(struct iscsi_np *,
+ struct __kernel_sockaddr_storage *);
+extern int iscsi_target_login_thread(void *);
+extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
new file mode 100644
index 00000000000..4d087ac1106
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -0,0 +1,1067 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/ctype.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_tpg.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_auth.h"
+
+#define MAX_LOGIN_PDUS 7
+#define TEXT_LEN 4096
+
+void convert_null_to_semi(char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] == '\0')
+ buf[i] = ';';
+}
+
+int strlen_semi(char *buf)
+{
+ int i = 0;
+
+ while (buf[i] != '\0') {
+ if (buf[i] == ';')
+ return i;
+ i++;
+ }
+
+ return -1;
+}
+
+int extract_param(
+ const char *in_buf,
+ const char *pattern,
+ unsigned int max_length,
+ char *out_buf,
+ unsigned char *type)
+{
+ char *ptr;
+ int len;
+
+ if (!in_buf || !pattern || !out_buf || !type)
+ return -1;
+
+ ptr = strstr(in_buf, pattern);
+ if (!ptr)
+ return -1;
+
+ ptr = strstr(ptr, "=");
+ if (!ptr)
+ return -1;
+
+ ptr += 1;
+ if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
+ ptr += 2; /* skip 0x */
+ *type = HEX;
+ } else
+ *type = DECIMAL;
+
+ len = strlen_semi(ptr);
+ if (len < 0)
+ return -1;
+
+ if (len > max_length) {
+ pr_err("Length of input: %d exeeds max_length:"
+ " %d\n", len, max_length);
+ return -1;
+ }
+ memcpy(out_buf, ptr, len);
+ out_buf[len] = '\0';
+
+ return 0;
+}
+
+static u32 iscsi_handle_authentication(
+ struct iscsi_conn *conn,
+ char *in_buf,
+ char *out_buf,
+ int in_length,
+ int *out_length,
+ unsigned char *authtype)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_auth *auth;
+ struct iscsi_node_acl *iscsi_nacl;
+ struct se_node_acl *se_nacl;
+
+ if (!sess->sess_ops->SessionType) {
+ /*
+ * For SessionType=Normal
+ */
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate struct se_node_acl for"
+ " CHAP auth\n");
+ return -1;
+ }
+ iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
+ se_node_acl);
+ if (!iscsi_nacl) {
+ pr_err("Unable to locate struct iscsi_node_acl for"
+ " CHAP auth\n");
+ return -1;
+ }
+
+ auth = ISCSI_NODE_AUTH(iscsi_nacl);
+ } else {
+ /*
+ * For SessionType=Discovery
+ */
+ auth = &iscsit_global->discovery_acl.node_auth;
+ }
+
+ if (strstr("CHAP", authtype))
+ strcpy(conn->sess->auth_type, "CHAP");
+ else
+ strcpy(conn->sess->auth_type, NONE);
+
+ if (strstr("None", authtype))
+ return 1;
+#ifdef CANSRP
+ else if (strstr("SRP", authtype))
+ return srp_main_loop(conn, auth, in_buf, out_buf,
+ &in_length, out_length);
+#endif
+ else if (strstr("CHAP", authtype))
+ return chap_main_loop(conn, auth, in_buf, out_buf,
+ &in_length, out_length);
+ else if (strstr("SPKM1", authtype))
+ return 2;
+ else if (strstr("SPKM2", authtype))
+ return 2;
+ else if (strstr("KRB5", authtype))
+ return 2;
+ else
+ return 2;
+}
+
+static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+}
+
+static int iscsi_target_check_login_request(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ int req_csg, req_nsg, rsp_csg, rsp_nsg;
+ u32 payload_length;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ switch (login_req->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ break;
+ default:
+ pr_err("Received unknown opcode 0x%02x.\n",
+ login_req->opcode & ISCSI_OPCODE_MASK);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+ pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE"
+ " and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+ rsp_csg = (login_rsp->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+ req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
+ rsp_nsg = (login_rsp->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
+
+ if (req_csg != login->current_stage) {
+ pr_err("Initiator unexpectedly changed login stage"
+ " from %d to %d, login failed.\n", login->current_stage,
+ req_csg);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if ((req_nsg == 2) || (req_csg >= 2) ||
+ ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
+ (req_nsg <= req_csg))) {
+ pr_err("Illegal login_req->flags Combination, CSG: %d,"
+ " NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg,
+ req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT));
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if ((login_req->max_version != login->version_max) ||
+ (login_req->min_version != login->version_min)) {
+ pr_err("Login request changed Version Max/Nin"
+ " unexpectedly to 0x%02x/0x%02x, protocol error\n",
+ login_req->max_version, login_req->min_version);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if (memcmp(login_req->isid, login->isid, 6) != 0) {
+ pr_err("Login request changed ISID unexpectedly,"
+ " protocol error.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if (login_req->itt != login->init_task_tag) {
+ pr_err("Login request changed ITT unexpectedly to"
+ " 0x%08x, protocol error.\n", login_req->itt);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if (payload_length > MAX_KEY_VALUE_PAIRS) {
+ pr_err("Login request payload exceeds default"
+ " MaxRecvDataSegmentLength: %u, protocol error.\n",
+ MAX_KEY_VALUE_PAIRS);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_target_check_first_request(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ struct iscsi_param *param = NULL;
+ struct se_node_acl *se_nacl;
+
+ login->first_request = 0;
+
+ list_for_each_entry(param, &conn->param_list->param_list, p_list) {
+ if (!strncmp(param->name, SESSIONTYPE, 11)) {
+ if (!IS_PSTATE_ACCEPTOR(param)) {
+ pr_err("SessionType key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ return -1;
+ }
+ if (!strncmp(param->value, DISCOVERY, 9))
+ return 0;
+ }
+
+ if (!strncmp(param->name, INITIATORNAME, 13)) {
+ if (!IS_PSTATE_ACCEPTOR(param)) {
+ if (!login->leading_connection)
+ continue;
+
+ pr_err("InitiatorName key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ return -1;
+ }
+
+ /*
+ * For non-leading connections, double check that the
+ * received InitiatorName matches the existing session's
+ * struct iscsi_node_acl.
+ */
+ if (!login->leading_connection) {
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate"
+ " struct se_node_acl\n");
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+ return -1;
+ }
+
+ if (strcmp(param->value,
+ se_nacl->initiatorname)) {
+ pr_err("Incorrect"
+ " InitiatorName: %s for this"
+ " iSCSI Initiator Node.\n",
+ param->value);
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+ return -1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ u32 padding = 0;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+ login_rsp->opcode = ISCSI_OP_LOGIN_RSP;
+ hton24(login_rsp->dlength, login->rsp_length);
+ memcpy(login_rsp->isid, login->isid, 6);
+ login_rsp->tsih = cpu_to_be16(login->tsih);
+ login_rsp->itt = cpu_to_be32(login->init_task_tag);
+ login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
+ login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
+ " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
+ " %u\n", login_rsp->flags, ntohl(login_rsp->itt),
+ ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
+ ntohl(login_rsp->statsn), login->rsp_length);
+
+ padding = ((-login->rsp_length) & 3);
+
+ if (iscsi_login_tx_data(
+ conn,
+ login->rsp,
+ login->rsp_buf,
+ login->rsp_length + padding) < 0)
+ return -1;
+
+ login->rsp_length = 0;
+ login_rsp->tsih = be16_to_cpu(login_rsp->tsih);
+ login_rsp->itt = be32_to_cpu(login_rsp->itt);
+ login_rsp->statsn = be32_to_cpu(login_rsp->statsn);
+ mutex_lock(&sess->cmdsn_mutex);
+ login_rsp->exp_cmdsn = be32_to_cpu(sess->exp_cmd_sn);
+ login_rsp->max_cmdsn = be32_to_cpu(sess->max_cmd_sn);
+ mutex_unlock(&sess->cmdsn_mutex);
+
+ return 0;
+}
+
+static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ u32 padding = 0, payload_length;
+ struct iscsi_login_req *login_req;
+
+ if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
+ return -1;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ payload_length = ntoh24(login_req->dlength);
+ login_req->tsih = be16_to_cpu(login_req->tsih);
+ login_req->itt = be32_to_cpu(login_req->itt);
+ login_req->cid = be16_to_cpu(login_req->cid);
+ login_req->cmdsn = be32_to_cpu(login_req->cmdsn);
+ login_req->exp_statsn = be32_to_cpu(login_req->exp_statsn);
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, login_req->cid, payload_length);
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ padding = ((-payload_length) & 3);
+ memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+
+ if (iscsi_login_rx_data(
+ conn,
+ login->req_buf,
+ payload_length + padding) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ if (iscsi_target_do_tx_login_io(conn, login) < 0)
+ return -1;
+
+ if (iscsi_target_do_rx_login_io(conn, login) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int iscsi_target_get_initial_payload(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ u32 padding = 0, payload_length;
+ struct iscsi_login_req *login_req;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ payload_length = ntoh24(login_req->dlength);
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, payload_length);
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ padding = ((-payload_length) & 3);
+
+ if (iscsi_login_rx_data(
+ conn,
+ login->req_buf,
+ payload_length + padding) < 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * NOTE: We check for existing sessions or connections AFTER the initiator
+ * has been successfully authenticated in order to protect against faked
+ * ISID/TSIH combinations.
+ */
+static int iscsi_target_check_for_existing_instances(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ if (login->checked_for_existing)
+ return 0;
+
+ login->checked_for_existing = 1;
+
+ if (!login->tsih)
+ return iscsi_check_for_session_reinstatement(conn);
+ else
+ return iscsi_login_post_auth_non_zero_tsih(conn, login->cid,
+ login->initial_exp_statsn);
+}
+
+static int iscsi_target_do_authentication(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ int authret;
+ u32 payload_length;
+ struct iscsi_param *param;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (!param)
+ return -1;
+
+ authret = iscsi_handle_authentication(
+ conn,
+ login->req_buf,
+ login->rsp_buf,
+ payload_length,
+ &login->rsp_length,
+ param->value);
+ switch (authret) {
+ case 0:
+ pr_debug("Received OK response"
+ " from LIO Authentication, continuing.\n");
+ break;
+ case 1:
+ pr_debug("iSCSI security negotiation"
+ " completed sucessfully.\n");
+ login->auth_complete = 1;
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+ login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+ ISCSI_FLAG_LOGIN_TRANSIT);
+ login->current_stage = 1;
+ }
+ return iscsi_target_check_for_existing_instances(
+ conn, login);
+ case 2:
+ pr_err("Security negotiation"
+ " failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ default:
+ pr_err("Received unknown error %d from LIO"
+ " Authentication\n", authret);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_target_handle_csg_zero(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ int ret;
+ u32 payload_length;
+ struct iscsi_param *param;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (!param)
+ return -1;
+
+ ret = iscsi_decode_text_input(
+ PHASE_SECURITY|PHASE_DECLARATIVE,
+ SENDER_INITIATOR|SENDER_RECEIVER,
+ login->req_buf,
+ payload_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (ret > 0) {
+ if (login->auth_complete) {
+ pr_err("Initiator has already been"
+ " successfully authenticated, but is still"
+ " sending %s keys.\n", param->value);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ goto do_auth;
+ }
+
+ if (login->first_request)
+ if (iscsi_target_check_first_request(conn, login) < 0)
+ return -1;
+
+ ret = iscsi_encode_text_output(
+ PHASE_SECURITY|PHASE_DECLARATIVE,
+ SENDER_TARGET,
+ login->rsp_buf,
+ &login->rsp_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (!iscsi_check_negotiated_keys(conn->param_list)) {
+ if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ !strncmp(param->value, NONE, 4)) {
+ pr_err("Initiator sent AuthMethod=None but"
+ " Target is enforcing iSCSI Authentication,"
+ " login failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
+
+ if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ !login->auth_complete)
+ return 0;
+
+ if (strncmp(param->value, NONE, 4) && !login->auth_complete)
+ return 0;
+
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+ login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+ ISCSI_FLAG_LOGIN_TRANSIT;
+ login->current_stage = 1;
+ }
+ }
+
+ return 0;
+do_auth:
+ return iscsi_target_do_authentication(conn, login);
+}
+
+static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ int ret;
+ u32 payload_length;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ ret = iscsi_decode_text_input(
+ PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+ SENDER_INITIATOR|SENDER_RECEIVER,
+ login->req_buf,
+ payload_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (login->first_request)
+ if (iscsi_target_check_first_request(conn, login) < 0)
+ return -1;
+
+ if (iscsi_target_check_for_existing_instances(conn, login) < 0)
+ return -1;
+
+ ret = iscsi_encode_text_output(
+ PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+ SENDER_TARGET,
+ login->rsp_buf,
+ &login->rsp_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (!login->auth_complete &&
+ ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+ pr_err("Initiator is requesting CSG: 1, has not been"
+ " successfully authenticated, and the Target is"
+ " enforcing iSCSI Authentication, login failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
+
+ if (!iscsi_check_negotiated_keys(conn->param_list))
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT))
+ login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 |
+ ISCSI_FLAG_LOGIN_TRANSIT;
+
+ return 0;
+}
+
+static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ int pdu_count = 0;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+ while (1) {
+ if (++pdu_count > MAX_LOGIN_PDUS) {
+ pr_err("MAX_LOGIN_PDUS count reached.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ return -1;
+ }
+
+ switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) {
+ case 0:
+ login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK);
+ if (iscsi_target_handle_csg_zero(conn, login) < 0)
+ return -1;
+ break;
+ case 1:
+ login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1;
+ if (iscsi_target_handle_csg_one(conn, login) < 0)
+ return -1;
+ if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+ login->tsih = conn->sess->tsih;
+ if (iscsi_target_do_tx_login_io(conn,
+ login) < 0)
+ return -1;
+ return 0;
+ }
+ break;
+ default:
+ pr_err("Illegal CSG: %d received from"
+ " Initiator, protocol error.\n",
+ (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
+ >> 2);
+ break;
+ }
+
+ if (iscsi_target_do_login_io(conn, login) < 0)
+ return -1;
+
+ if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+ login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
+ login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
+ }
+ }
+
+ return 0;
+}
+
+static void iscsi_initiatorname_tolower(
+ char *param_buf)
+{
+ char *c;
+ u32 iqn_size = strlen(param_buf), i;
+
+ for (i = 0; i < iqn_size; i++) {
+ c = (char *)&param_buf[i];
+ if (!isupper(*c))
+ continue;
+
+ *c = tolower(*c);
+ }
+}
+
+/*
+ * Processes the first Login Request..
+ */
+static int iscsi_target_locate_portal(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
+ char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_tiqn *tiqn;
+ struct iscsi_login_req *login_req;
+ struct iscsi_targ_login_rsp *login_rsp;
+ u32 payload_length;
+ int sessiontype = 0, ret = 0;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_targ_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ login->first_request = 1;
+ login->leading_connection = (!login_req->tsih) ? 1 : 0;
+ login->current_stage =
+ (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+ login->version_min = login_req->min_version;
+ login->version_max = login_req->max_version;
+ memcpy(login->isid, login_req->isid, 6);
+ login->cmd_sn = login_req->cmdsn;
+ login->init_task_tag = login_req->itt;
+ login->initial_exp_statsn = login_req->exp_statsn;
+ login->cid = login_req->cid;
+ login->tsih = login_req->tsih;
+
+ if (iscsi_target_get_initial_payload(conn, login) < 0)
+ return -1;
+
+ tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
+ if (!tmpbuf) {
+ pr_err("Unable to allocate memory for tmpbuf.\n");
+ return -1;
+ }
+
+ memcpy(tmpbuf, login->req_buf, payload_length);
+ tmpbuf[payload_length] = '\0';
+ start = tmpbuf;
+ end = (start + payload_length);
+
+ /*
+ * Locate the initial keys expected from the Initiator node in
+ * the first login request in order to progress with the login phase.
+ */
+ while (start < end) {
+ if (iscsi_extract_key_value(start, &key, &value) < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ if (!strncmp(key, "InitiatorName", 13))
+ i_buf = value;
+ else if (!strncmp(key, "SessionType", 11))
+ s_buf = value;
+ else if (!strncmp(key, "TargetName", 10))
+ t_buf = value;
+
+ start += strlen(key) + strlen(value) + 2;
+ }
+
+ /*
+ * See 5.3. Login Phase.
+ */
+ if (!i_buf) {
+ pr_err("InitiatorName key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ ret = -1;
+ goto out;
+ }
+ /*
+ * Convert the incoming InitiatorName to lowercase following
+ * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs
+ * are NOT case sensitive.
+ */
+ iscsi_initiatorname_tolower(i_buf);
+
+ if (!s_buf) {
+ if (!login->leading_connection)
+ goto get_target;
+
+ pr_err("SessionType key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ ret = -1;
+ goto out;
+ }
+
+ /*
+ * Use default portal group for discovery sessions.
+ */
+ sessiontype = strncmp(s_buf, DISCOVERY, 9);
+ if (!sessiontype) {
+ conn->tpg = iscsit_global->discovery_tpg;
+ if (!login->leading_connection)
+ goto get_target;
+
+ sess->sess_ops->SessionType = 1;
+ /*
+ * Setup crc32c modules from libcrypto
+ */
+ if (iscsi_login_setup_crypto(conn) < 0) {
+ pr_err("iscsi_login_setup_crypto() failed\n");
+ ret = -1;
+ goto out;
+ }
+ /*
+ * Serialize access across the discovery struct iscsi_portal_group to
+ * process login attempt.
+ */
+ if (iscsit_access_np(np, conn->tpg) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ goto out;
+ }
+ ret = 0;
+ goto out;
+ }
+
+get_target:
+ if (!t_buf) {
+ pr_err("TargetName key not received"
+ " in first login request while"
+ " SessionType=Normal.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ ret = -1;
+ goto out;
+ }
+
+ /*
+ * Locate Target IQN from Storage Node.
+ */
+ tiqn = iscsit_get_tiqn_for_login(t_buf);
+ if (!tiqn) {
+ pr_err("Unable to locate Target IQN: %s in"
+ " Storage Node\n", t_buf);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ goto out;
+ }
+ pr_debug("Located Storage Object: %s\n", tiqn->tiqn);
+
+ /*
+ * Locate Target Portal Group from Storage Node.
+ */
+ conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
+ if (!conn->tpg) {
+ pr_err("Unable to locate Target Portal Group"
+ " on %s\n", tiqn->tiqn);
+ iscsit_put_tiqn_for_login(tiqn);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ goto out;
+ }
+ pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
+ /*
+ * Setup crc32c modules from libcrypto
+ */
+ if (iscsi_login_setup_crypto(conn) < 0) {
+ pr_err("iscsi_login_setup_crypto() failed\n");
+ ret = -1;
+ goto out;
+ }
+ /*
+ * Serialize access across the struct iscsi_portal_group to
+ * process login attempt.
+ */
+ if (iscsit_access_np(np, conn->tpg) < 0) {
+ iscsit_put_tiqn_for_login(tiqn);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ conn->tpg = NULL;
+ goto out;
+ }
+
+ /*
+ * conn->sess->node_acl will be set when the referenced
+ * struct iscsi_session is located from received ISID+TSIH in
+ * iscsi_login_non_zero_tsih_s2().
+ */
+ if (!login->leading_connection) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * This value is required in iscsi_login_zero_tsih_s2()
+ */
+ sess->sess_ops->SessionType = 0;
+
+ /*
+ * Locate incoming Initiator IQN reference from Storage Node.
+ */
+ sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+ &conn->tpg->tpg_se_tpg, i_buf);
+ if (!sess->se_sess->se_node_acl) {
+ pr_err("iSCSI Initiator Node: %s is not authorized to"
+ " access iSCSI target portal group: %hu.\n",
+ i_buf, conn->tpg->tpgt);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_TGT_FORBIDDEN);
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(tmpbuf);
+ return ret;
+}
+
+struct iscsi_login *iscsi_target_init_negotiation(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+ char *login_pdu)
+{
+ struct iscsi_login *login;
+
+ login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
+ if (!login) {
+ pr_err("Unable to allocate memory for struct iscsi_login.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return NULL;
+ }
+
+ login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!login->req) {
+ pr_err("Unable to allocate memory for Login Request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ goto out;
+ }
+ memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
+
+ login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+ if (!login->req_buf) {
+ pr_err("Unable to allocate memory for response buffer.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ goto out;
+ }
+ /*
+ * SessionType: Discovery
+ *
+ * Locates Default Portal
+ *
+ * SessionType: Normal
+ *
+ * Locates Target Portal from NP -> Target IQN
+ */
+ if (iscsi_target_locate_portal(np, conn, login) < 0) {
+ pr_err("iSCSI Login negotiation failed.\n");
+ goto out;
+ }
+
+ return login;
+out:
+ kfree(login->req);
+ kfree(login->req_buf);
+ kfree(login);
+
+ return NULL;
+}
+
+int iscsi_target_start_negotiation(
+ struct iscsi_login *login,
+ struct iscsi_conn *conn)
+{
+ int ret = -1;
+
+ login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!login->rsp) {
+ pr_err("Unable to allocate memory for"
+ " Login Response.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ ret = -1;
+ goto out;
+ }
+
+ login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+ if (!login->rsp_buf) {
+ pr_err("Unable to allocate memory for"
+ " request buffer.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ ret = -1;
+ goto out;
+ }
+
+ ret = iscsi_target_do_login(conn, login);
+out:
+ if (ret != 0)
+ iscsi_remove_failed_auth_entry(conn);
+
+ iscsi_target_nego_release(login, conn);
+ return ret;
+}
+
+void iscsi_target_nego_release(
+ struct iscsi_login *login,
+ struct iscsi_conn *conn)
+{
+ kfree(login->req);
+ kfree(login->rsp);
+ kfree(login->req_buf);
+ kfree(login->rsp_buf);
+ kfree(login);
+}
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
new file mode 100644
index 00000000000..92e133a5158
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -0,0 +1,17 @@
+#ifndef ISCSI_TARGET_NEGO_H
+#define ISCSI_TARGET_NEGO_H
+
+#define DECIMAL 0
+#define HEX 1
+
+extern void convert_null_to_semi(char *, int);
+extern int extract_param(const char *, const char *, unsigned int, char *,
+ unsigned char *);
+extern struct iscsi_login *iscsi_target_init_negotiation(
+ struct iscsi_np *, struct iscsi_conn *, char *);
+extern int iscsi_target_start_negotiation(
+ struct iscsi_login *, struct iscsi_conn *);
+extern void iscsi_target_nego_release(
+ struct iscsi_login *, struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
new file mode 100644
index 00000000000..aeafbe0cd7d
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -0,0 +1,263 @@
+/*******************************************************************************
+ * This file contains the main functions related to Initiator Node Attributes.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_nodeattrib.h"
+
+static inline char *iscsit_na_get_initiatorname(
+ struct iscsi_node_acl *nacl)
+{
+ struct se_node_acl *se_nacl = &nacl->se_node_acl;
+
+ return &se_nacl->initiatorname[0];
+}
+
+void iscsit_set_default_node_attribues(
+ struct iscsi_node_acl *acl)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ a->dataout_timeout = NA_DATAOUT_TIMEOUT;
+ a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
+ a->nopin_timeout = NA_NOPIN_TIMEOUT;
+ a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
+ a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
+ a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
+ a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
+ a->default_erl = NA_DEFAULT_ERL;
+}
+
+extern int iscsit_na_dataout_timeout(
+ struct iscsi_node_acl *acl,
+ u32 dataout_timeout)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
+ pr_err("Requested DataOut Timeout %u larger than"
+ " maximum %u\n", dataout_timeout,
+ NA_DATAOUT_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
+ pr_err("Requested DataOut Timeout %u smaller than"
+ " minimum %u\n", dataout_timeout,
+ NA_DATAOUT_TIMEOUT_MIX);
+ return -EINVAL;
+ }
+
+ a->dataout_timeout = dataout_timeout;
+ pr_debug("Set DataOut Timeout to %u for Initiator Node"
+ " %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_dataout_timeout_retries(
+ struct iscsi_node_acl *acl,
+ u32 dataout_timeout_retries)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
+ pr_err("Requested DataOut Timeout Retries %u larger"
+ " than maximum %u", dataout_timeout_retries,
+ NA_DATAOUT_TIMEOUT_RETRIES_MAX);
+ return -EINVAL;
+ } else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
+ pr_err("Requested DataOut Timeout Retries %u smaller"
+ " than minimum %u", dataout_timeout_retries,
+ NA_DATAOUT_TIMEOUT_RETRIES_MIN);
+ return -EINVAL;
+ }
+
+ a->dataout_timeout_retries = dataout_timeout_retries;
+ pr_debug("Set DataOut Timeout Retries to %u for"
+ " Initiator Node %s\n", a->dataout_timeout_retries,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_nopin_timeout(
+ struct iscsi_node_acl *acl,
+ u32 nopin_timeout)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
+ struct se_session *se_sess;
+ u32 orig_nopin_timeout = a->nopin_timeout;
+
+ if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
+ pr_err("Requested NopIn Timeout %u larger than maximum"
+ " %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
+ (nopin_timeout != 0)) {
+ pr_err("Requested NopIn Timeout %u smaller than"
+ " minimum %u and not 0\n", nopin_timeout,
+ NA_NOPIN_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->nopin_timeout = nopin_timeout;
+ pr_debug("Set NopIn Timeout to %u for Initiator"
+ " Node %s\n", a->nopin_timeout,
+ iscsit_na_get_initiatorname(acl));
+ /*
+ * Reenable disabled nopin_timeout timer for all iSCSI connections.
+ */
+ if (!orig_nopin_timeout) {
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+ spin_lock(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list,
+ conn_list) {
+ if (conn->conn_state !=
+ TARG_CONN_STATE_LOGGED_IN)
+ continue;
+
+ spin_lock(&conn->nopin_timer_lock);
+ __iscsit_start_nopin_timer(conn);
+ spin_unlock(&conn->nopin_timer_lock);
+ }
+ spin_unlock(&sess->conn_lock);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+ }
+
+ return 0;
+}
+
+extern int iscsit_na_nopin_response_timeout(
+ struct iscsi_node_acl *acl,
+ u32 nopin_response_timeout)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
+ pr_err("Requested NopIn Response Timeout %u larger"
+ " than maximum %u\n", nopin_response_timeout,
+ NA_NOPIN_RESPONSE_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
+ pr_err("Requested NopIn Response Timeout %u smaller"
+ " than minimum %u\n", nopin_response_timeout,
+ NA_NOPIN_RESPONSE_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->nopin_response_timeout = nopin_response_timeout;
+ pr_debug("Set NopIn Response Timeout to %u for"
+ " Initiator Node %s\n", a->nopin_timeout,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_random_datain_pdu_offsets(
+ struct iscsi_node_acl *acl,
+ u32 random_datain_pdu_offsets)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
+ pr_err("Requested Random DataIN PDU Offsets: %u not"
+ " 0 or 1\n", random_datain_pdu_offsets);
+ return -EINVAL;
+ }
+
+ a->random_datain_pdu_offsets = random_datain_pdu_offsets;
+ pr_debug("Set Random DataIN PDU Offsets to %u for"
+ " Initiator Node %s\n", a->random_datain_pdu_offsets,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_random_datain_seq_offsets(
+ struct iscsi_node_acl *acl,
+ u32 random_datain_seq_offsets)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
+ pr_err("Requested Random DataIN Sequence Offsets: %u"
+ " not 0 or 1\n", random_datain_seq_offsets);
+ return -EINVAL;
+ }
+
+ a->random_datain_seq_offsets = random_datain_seq_offsets;
+ pr_debug("Set Random DataIN Sequence Offsets to %u for"
+ " Initiator Node %s\n", a->random_datain_seq_offsets,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_random_r2t_offsets(
+ struct iscsi_node_acl *acl,
+ u32 random_r2t_offsets)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
+ pr_err("Requested Random R2T Offsets: %u not"
+ " 0 or 1\n", random_r2t_offsets);
+ return -EINVAL;
+ }
+
+ a->random_r2t_offsets = random_r2t_offsets;
+ pr_debug("Set Random R2T Offsets to %u for"
+ " Initiator Node %s\n", a->random_r2t_offsets,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_default_erl(
+ struct iscsi_node_acl *acl,
+ u32 default_erl)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
+ pr_err("Requested default ERL: %u not 0, 1, or 2\n",
+ default_erl);
+ return -EINVAL;
+ }
+
+ a->default_erl = default_erl;
+ pr_debug("Set use ERL0 flag to %u for Initiator"
+ " Node %s\n", a->default_erl,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
new file mode 100644
index 00000000000..c970b326ef2
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_NODEATTRIB_H
+#define ISCSI_TARGET_NODEATTRIB_H
+
+extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
+extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_response_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_pdu_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32);
+
+#endif /* ISCSI_TARGET_NODEATTRIB_H */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
new file mode 100644
index 00000000000..252e246cf51
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -0,0 +1,1905 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_parameters.h"
+
+int iscsi_login_rx_data(
+ struct iscsi_conn *conn,
+ char *buf,
+ int length)
+{
+ int rx_got;
+ struct kvec iov;
+
+ memset(&iov, 0, sizeof(struct kvec));
+ iov.iov_len = length;
+ iov.iov_base = buf;
+
+ /*
+ * Initial Marker-less Interval.
+ * Add the values regardless of IFMarker/OFMarker, considering
+ * it may not be negoitated yet.
+ */
+ conn->of_marker += length;
+
+ rx_got = rx_data(conn, &iov, 1, length);
+ if (rx_got != length) {
+ pr_err("rx_data returned %d, expecting %d.\n",
+ rx_got, length);
+ return -1;
+ }
+
+ return 0 ;
+}
+
+int iscsi_login_tx_data(
+ struct iscsi_conn *conn,
+ char *pdu_buf,
+ char *text_buf,
+ int text_length)
+{
+ int length, tx_sent;
+ struct kvec iov[2];
+
+ length = (ISCSI_HDR_LEN + text_length);
+
+ memset(&iov[0], 0, 2 * sizeof(struct kvec));
+ iov[0].iov_len = ISCSI_HDR_LEN;
+ iov[0].iov_base = pdu_buf;
+ iov[1].iov_len = text_length;
+ iov[1].iov_base = text_buf;
+
+ /*
+ * Initial Marker-less Interval.
+ * Add the values regardless of IFMarker/OFMarker, considering
+ * it may not be negoitated yet.
+ */
+ conn->if_marker += length;
+
+ tx_sent = tx_data(conn, &iov[0], 2, length);
+ if (tx_sent != length) {
+ pr_err("tx_data returned %d, expecting %d.\n",
+ tx_sent, length);
+ return -1;
+ }
+
+ return 0;
+}
+
+void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
+{
+ pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
+ "CRC32C" : "None");
+ pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
+ "CRC32C" : "None");
+ pr_debug("MaxRecvDataSegmentLength: %u\n",
+ conn_ops->MaxRecvDataSegmentLength);
+ pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No");
+ pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No");
+ if (conn_ops->OFMarker)
+ pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt);
+ if (conn_ops->IFMarker)
+ pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt);
+}
+
+void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
+{
+ pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
+ pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
+ pr_debug("TargetName: %s\n", sess_ops->TargetName);
+ pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
+ pr_debug("TargetPortalGroupTag: %hu\n",
+ sess_ops->TargetPortalGroupTag);
+ pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
+ pr_debug("InitialR2T: %s\n",
+ (sess_ops->InitialR2T) ? "Yes" : "No");
+ pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
+ "Yes" : "No");
+ pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
+ pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
+ pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
+ pr_debug("DefaultTime2Retain: %hu\n",
+ sess_ops->DefaultTime2Retain);
+ pr_debug("MaxOutstandingR2T: %hu\n",
+ sess_ops->MaxOutstandingR2T);
+ pr_debug("DataPDUInOrder: %s\n",
+ (sess_ops->DataPDUInOrder) ? "Yes" : "No");
+ pr_debug("DataSequenceInOrder: %s\n",
+ (sess_ops->DataSequenceInOrder) ? "Yes" : "No");
+ pr_debug("ErrorRecoveryLevel: %hu\n",
+ sess_ops->ErrorRecoveryLevel);
+ pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
+ "Discovery" : "Normal");
+}
+
+void iscsi_print_params(struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list)
+ pr_debug("%s: %s\n", param->name, param->value);
+}
+
+static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
+ char *name, char *value, u8 phase, u8 scope, u8 sender,
+ u16 type_range, u8 use)
+{
+ struct iscsi_param *param = NULL;
+
+ param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+ if (!param) {
+ pr_err("Unable to allocate memory for parameter.\n");
+ goto out;
+ }
+ INIT_LIST_HEAD(&param->p_list);
+
+ param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
+ if (!param->name) {
+ pr_err("Unable to allocate memory for parameter name.\n");
+ goto out;
+ }
+
+ param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ if (!param->value) {
+ pr_err("Unable to allocate memory for parameter value.\n");
+ goto out;
+ }
+
+ memcpy(param->name, name, strlen(name));
+ param->name[strlen(name)] = '\0';
+ memcpy(param->value, value, strlen(value));
+ param->value[strlen(value)] = '\0';
+ param->phase = phase;
+ param->scope = scope;
+ param->sender = sender;
+ param->use = use;
+ param->type_range = type_range;
+
+ switch (param->type_range) {
+ case TYPERANGE_BOOL_AND:
+ param->type = TYPE_BOOL_AND;
+ break;
+ case TYPERANGE_BOOL_OR:
+ param->type = TYPE_BOOL_OR;
+ break;
+ case TYPERANGE_0_TO_2:
+ case TYPERANGE_0_TO_3600:
+ case TYPERANGE_0_TO_32767:
+ case TYPERANGE_0_TO_65535:
+ case TYPERANGE_1_TO_65535:
+ case TYPERANGE_2_TO_3600:
+ case TYPERANGE_512_TO_16777215:
+ param->type = TYPE_NUMBER;
+ break;
+ case TYPERANGE_AUTH:
+ case TYPERANGE_DIGEST:
+ param->type = TYPE_VALUE_LIST | TYPE_STRING;
+ break;
+ case TYPERANGE_MARKINT:
+ param->type = TYPE_NUMBER_RANGE;
+ param->type_range |= TYPERANGE_1_TO_65535;
+ break;
+ case TYPERANGE_ISCSINAME:
+ case TYPERANGE_SESSIONTYPE:
+ case TYPERANGE_TARGETADDRESS:
+ case TYPERANGE_UTF8:
+ param->type = TYPE_STRING;
+ break;
+ default:
+ pr_err("Unknown type_range 0x%02x\n",
+ param->type_range);
+ goto out;
+ }
+ list_add_tail(&param->p_list, &param_list->param_list);
+
+ return param;
+out:
+ if (param) {
+ kfree(param->value);
+ kfree(param->name);
+ kfree(param);
+ }
+
+ return NULL;
+}
+
+/* #warning Add extension keys */
+int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
+{
+ struct iscsi_param *param = NULL;
+ struct iscsi_param_list *pl;
+
+ pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+ if (!pl) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_param_list.\n");
+ return -1 ;
+ }
+ INIT_LIST_HEAD(&pl->param_list);
+ INIT_LIST_HEAD(&pl->extra_response_list);
+
+ /*
+ * The format for setting the initial parameter definitions are:
+ *
+ * Parameter name:
+ * Initial value:
+ * Allowable phase:
+ * Scope:
+ * Allowable senders:
+ * Typerange:
+ * Use:
+ */
+ param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD,
+ PHASE_SECURITY, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_AUTH, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXCONNECTIONS,
+ INITIAL_MAXCONNECTIONS, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS,
+ PHASE_FFP0, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+ TYPERANGE_UTF8, 0);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_ISCSINAME, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIATORNAME,
+ INITIAL_INITIATORNAME, PHASE_DECLARATIVE,
+ SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+ TYPERANGE_ISCSINAME, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+ TYPERANGE_UTF8, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIATORALIAS,
+ INITIAL_INITIATORALIAS, PHASE_DECLARATIVE,
+ SCOPE_SESSION_WIDE, SENDER_INITIATOR, TYPERANGE_UTF8,
+ USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETADDRESS,
+ INITIAL_TARGETADDRESS, PHASE_DECLARATIVE,
+ SCOPE_SESSION_WIDE, SENDER_TARGET,
+ TYPERANGE_TARGETADDRESS, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG,
+ INITIAL_TARGETPORTALGROUPTAG,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+ TYPERANGE_0_TO_65535, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, IMMEDIATEDATA,
+ INITIAL_IMMEDIATEDATA, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND,
+ USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
+ INITIAL_MAXRECVDATASEGMENTLENGTH,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXBURSTLENGTH,
+ INITIAL_MAXBURSTLENGTH, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH,
+ INITIAL_FIRSTBURSTLENGTH,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT,
+ INITIAL_DEFAULTTIME2WAIT,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN,
+ INITIAL_DEFAULTTIME2RETAIN,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T,
+ INITIAL_MAXOUTSTANDINGR2T,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DATAPDUINORDER,
+ INITIAL_DATAPDUINORDER, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_OR,
+ USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DATASEQUENCEINORDER,
+ INITIAL_DATASEQUENCEINORDER,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL,
+ INITIAL_ERRORRECOVERYLEVEL,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_0_TO_2, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+ TYPERANGE_SESSIONTYPE, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ *param_list_ptr = pl;
+ return 0;
+out:
+ iscsi_release_param_list(pl);
+ return -1;
+}
+
+int iscsi_set_keys_to_negotiate(
+ int sessiontype,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ param->state = 0;
+ if (!strcmp(param->name, AUTHMETHOD)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, HEADERDIGEST)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DATADIGEST)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXCONNECTIONS)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, TARGETNAME)) {
+ continue;
+ } else if (!strcmp(param->name, INITIATORNAME)) {
+ continue;
+ } else if (!strcmp(param->name, TARGETALIAS)) {
+ if (param->value)
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, INITIATORALIAS)) {
+ continue;
+ } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, INITIALR2T)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, IMMEDIATEDATA)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DATAPDUINORDER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, SESSIONTYPE)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, IFMARKER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, OFMARKER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, IFMARKINT)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, OFMARKINT)) {
+ SET_PSTATE_NEGOTIATE(param);
+ }
+ }
+
+ return 0;
+}
+
+int iscsi_set_keys_irrelevant_for_discovery(
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, INITIALR2T))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, IMMEDIATEDATA))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, MAXBURSTLENGTH))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, MAXOUTSTANDINGR2T))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DATAPDUINORDER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DATASEQUENCEINORDER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DEFAULTTIME2WAIT))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, IFMARKER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, OFMARKER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, IFMARKINT))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, OFMARKINT))
+ param->state &= ~PSTATE_NEGOTIATE;
+ }
+
+ return 0;
+}
+
+int iscsi_copy_param_list(
+ struct iscsi_param_list **dst_param_list,
+ struct iscsi_param_list *src_param_list,
+ int leading)
+{
+ struct iscsi_param *new_param = NULL, *param = NULL;
+ struct iscsi_param_list *param_list = NULL;
+
+ param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+ if (!param_list) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_param_list.\n");
+ goto err_out;
+ }
+ INIT_LIST_HEAD(&param_list->param_list);
+ INIT_LIST_HEAD(&param_list->extra_response_list);
+
+ list_for_each_entry(param, &src_param_list->param_list, p_list) {
+ if (!leading && (param->scope & SCOPE_SESSION_WIDE)) {
+ if ((strcmp(param->name, "TargetName") != 0) &&
+ (strcmp(param->name, "InitiatorName") != 0) &&
+ (strcmp(param->name, "TargetPortalGroupTag") != 0))
+ continue;
+ }
+
+ new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+ if (!new_param) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_param.\n");
+ goto err_out;
+ }
+
+ new_param->set_param = param->set_param;
+ new_param->phase = param->phase;
+ new_param->scope = param->scope;
+ new_param->sender = param->sender;
+ new_param->type = param->type;
+ new_param->use = param->use;
+ new_param->type_range = param->type_range;
+
+ new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
+ if (!new_param->name) {
+ pr_err("Unable to allocate memory for"
+ " parameter name.\n");
+ goto err_out;
+ }
+
+ new_param->value = kzalloc(strlen(param->value) + 1,
+ GFP_KERNEL);
+ if (!new_param->value) {
+ pr_err("Unable to allocate memory for"
+ " parameter value.\n");
+ goto err_out;
+ }
+
+ memcpy(new_param->name, param->name, strlen(param->name));
+ new_param->name[strlen(param->name)] = '\0';
+ memcpy(new_param->value, param->value, strlen(param->value));
+ new_param->value[strlen(param->value)] = '\0';
+
+ list_add_tail(&new_param->p_list, &param_list->param_list);
+ }
+
+ if (!list_empty(&param_list->param_list))
+ *dst_param_list = param_list;
+ else {
+ pr_err("No parameters allocated.\n");
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ iscsi_release_param_list(param_list);
+ return -1;
+}
+
+static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
+{
+ struct iscsi_extra_response *er, *er_tmp;
+
+ list_for_each_entry_safe(er, er_tmp, &param_list->extra_response_list,
+ er_list) {
+ list_del(&er->er_list);
+ kfree(er);
+ }
+}
+
+void iscsi_release_param_list(struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param, *param_tmp;
+
+ list_for_each_entry_safe(param, param_tmp, &param_list->param_list,
+ p_list) {
+ list_del(&param->p_list);
+
+ kfree(param->name);
+ param->name = NULL;
+ kfree(param->value);
+ param->value = NULL;
+ kfree(param);
+ param = NULL;
+ }
+
+ iscsi_release_extra_responses(param_list);
+
+ kfree(param_list);
+}
+
+struct iscsi_param *iscsi_find_param_from_key(
+ char *key,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ if (!key || !param_list) {
+ pr_err("Key or parameter list pointer is NULL.\n");
+ return NULL;
+ }
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!strcmp(key, param->name))
+ return param;
+ }
+
+ pr_err("Unable to locate key \"%s\".\n", key);
+ return NULL;
+}
+
+int iscsi_extract_key_value(char *textbuf, char **key, char **value)
+{
+ *value = strchr(textbuf, '=');
+ if (!*value) {
+ pr_err("Unable to locate \"=\" seperator for key,"
+ " ignoring request.\n");
+ return -1;
+ }
+
+ *key = textbuf;
+ **value = '\0';
+ *value = *value + 1;
+
+ return 0;
+}
+
+int iscsi_update_param_value(struct iscsi_param *param, char *value)
+{
+ kfree(param->value);
+
+ param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ if (!param->value) {
+ pr_err("Unable to allocate memory for value.\n");
+ return -1;
+ }
+
+ memcpy(param->value, value, strlen(value));
+ param->value[strlen(value)] = '\0';
+
+ pr_debug("iSCSI Parameter updated to %s=%s\n",
+ param->name, param->value);
+ return 0;
+}
+
+static int iscsi_add_notunderstood_response(
+ char *key,
+ char *value,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_extra_response *extra_response;
+
+ if (strlen(value) > VALUE_MAXLEN) {
+ pr_err("Value for notunderstood key \"%s\" exceeds %d,"
+ " protocol error.\n", key, VALUE_MAXLEN);
+ return -1;
+ }
+
+ extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL);
+ if (!extra_response) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_extra_response.\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&extra_response->er_list);
+
+ strncpy(extra_response->key, key, strlen(key) + 1);
+ strncpy(extra_response->value, NOTUNDERSTOOD,
+ strlen(NOTUNDERSTOOD) + 1);
+
+ list_add_tail(&extra_response->er_list,
+ &param_list->extra_response_list);
+ return 0;
+}
+
+static int iscsi_check_for_auth_key(char *key)
+{
+ /*
+ * RFC 1994
+ */
+ if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") ||
+ !strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") ||
+ !strcmp(key, "CHAP_R"))
+ return 1;
+
+ /*
+ * RFC 2945
+ */
+ if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") ||
+ !strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") ||
+ !strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") ||
+ !strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM"))
+ return 1;
+
+ return 0;
+}
+
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+{
+ if (IS_TYPE_BOOL_AND(param)) {
+ if (!strcmp(param->value, NO))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_TYPE_BOOL_OR(param)) {
+ if (!strcmp(param->value, YES))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, IMMEDIATEDATA))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_TYPE_NUMBER(param)) {
+ if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+ * The GlobalSAN iSCSI Initiator for MacOSX does
+ * not respond to MaxBurstLength, FirstBurstLength,
+ * DefaultTime2Wait or DefaultTime2Retain parameter keys.
+ * So, we set them to 'reply optional' here, and assume the
+ * the defaults from iscsi_parameters.h if the initiator
+ * is not RFC compliant and the keys are not negotiated.
+ */
+ if (!strcmp(param->name, MAXBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, DEFAULTTIME2WAIT))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_PHASE_DECLARATIVE(param))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+}
+
+static int iscsi_check_boolean_value(struct iscsi_param *param, char *value)
+{
+ if (strcmp(value, YES) && strcmp(value, NO)) {
+ pr_err("Illegal value for \"%s\", must be either"
+ " \"%s\" or \"%s\".\n", param->name, YES, NO);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_ptr)
+{
+ char *tmpptr;
+ int value = 0;
+
+ value = simple_strtoul(value_ptr, &tmpptr, 0);
+
+/* #warning FIXME: Fix this */
+#if 0
+ if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
+ pr_err("Illegal value \"%s\" for \"%s\".\n",
+ value, param->name);
+ return -1;
+ }
+#endif
+ if (IS_TYPERANGE_0_TO_2(param)) {
+ if ((value < 0) || (value > 2)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 2.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_0_TO_3600(param)) {
+ if ((value < 0) || (value > 3600)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 3600.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_0_TO_32767(param)) {
+ if ((value < 0) || (value > 32767)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 32767.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_0_TO_65535(param)) {
+ if ((value < 0) || (value > 65535)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 65535.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_1_TO_65535(param)) {
+ if ((value < 1) || (value > 65535)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 1 and 65535.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_2_TO_3600(param)) {
+ if ((value < 2) || (value > 3600)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 2 and 3600.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_512_TO_16777215(param)) {
+ if ((value < 512) || (value > 16777215)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 512 and 16777215.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+
+ return 0;
+}
+
+static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
+{
+ char *left_val_ptr = NULL, *right_val_ptr = NULL;
+ char *tilde_ptr = NULL, *tmp_ptr = NULL;
+ u32 left_val, right_val, local_left_val, local_right_val;
+
+ if (strcmp(param->name, IFMARKINT) &&
+ strcmp(param->name, OFMARKINT)) {
+ pr_err("Only parameters \"%s\" or \"%s\" may contain a"
+ " numerical range value.\n", IFMARKINT, OFMARKINT);
+ return -1;
+ }
+
+ if (IS_PSTATE_PROPOSER(param))
+ return 0;
+
+ tilde_ptr = strchr(value, '~');
+ if (!tilde_ptr) {
+ pr_err("Unable to locate numerical range indicator"
+ " \"~\" for \"%s\".\n", param->name);
+ return -1;
+ }
+ *tilde_ptr = '\0';
+
+ left_val_ptr = value;
+ right_val_ptr = value + strlen(left_val_ptr) + 1;
+
+ if (iscsi_check_numerical_value(param, left_val_ptr) < 0)
+ return -1;
+ if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
+ return -1;
+
+ left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
+ right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ *tilde_ptr = '~';
+
+ if (right_val < left_val) {
+ pr_err("Numerical range for parameter \"%s\" contains"
+ " a right value which is less than the left.\n",
+ param->name);
+ return -1;
+ }
+
+ /*
+ * For now, enforce reasonable defaults for [I,O]FMarkInt.
+ */
+ tilde_ptr = strchr(param->value, '~');
+ if (!tilde_ptr) {
+ pr_err("Unable to locate numerical range indicator"
+ " \"~\" for \"%s\".\n", param->name);
+ return -1;
+ }
+ *tilde_ptr = '\0';
+
+ left_val_ptr = param->value;
+ right_val_ptr = param->value + strlen(left_val_ptr) + 1;
+
+ local_left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
+ local_right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ *tilde_ptr = '~';
+
+ if (param->set_param) {
+ if ((left_val < local_left_val) ||
+ (right_val < local_left_val)) {
+ pr_err("Passed value range \"%u~%u\" is below"
+ " minimum left value \"%u\" for key \"%s\","
+ " rejecting.\n", left_val, right_val,
+ local_left_val, param->name);
+ return -1;
+ }
+ } else {
+ if ((left_val < local_left_val) &&
+ (right_val < local_left_val)) {
+ pr_err("Received value range \"%u~%u\" is"
+ " below minimum left value \"%u\" for key"
+ " \"%s\", rejecting.\n", left_val, right_val,
+ local_left_val, param->name);
+ SET_PSTATE_REJECT(param);
+ if (iscsi_update_param_value(param, REJECT) < 0)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
+{
+ if (IS_PSTATE_PROPOSER(param))
+ return 0;
+
+ if (IS_TYPERANGE_AUTH_PARAM(param)) {
+ if (strcmp(value, KRB5) && strcmp(value, SPKM1) &&
+ strcmp(value, SPKM2) && strcmp(value, SRP) &&
+ strcmp(value, CHAP) && strcmp(value, NONE)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " \"%s\", \"%s\", \"%s\", \"%s\", \"%s\""
+ " or \"%s\".\n", param->name, KRB5,
+ SPKM1, SPKM2, SRP, CHAP, NONE);
+ return -1;
+ }
+ }
+ if (IS_TYPERANGE_DIGEST_PARAM(param)) {
+ if (strcmp(value, CRC32C) && strcmp(value, NONE)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " \"%s\" or \"%s\".\n", param->name,
+ CRC32C, NONE);
+ return -1;
+ }
+ }
+ if (IS_TYPERANGE_SESSIONTYPE(param)) {
+ if (strcmp(value, DISCOVERY) && strcmp(value, NORMAL)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " \"%s\" or \"%s\".\n", param->name,
+ DISCOVERY, NORMAL);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used to pick a value range number, currently just
+ * returns the lesser of both right values.
+ */
+static char *iscsi_get_value_from_number_range(
+ struct iscsi_param *param,
+ char *value)
+{
+ char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL;
+ u32 acceptor_right_value, proposer_right_value;
+
+ tilde_ptr1 = strchr(value, '~');
+ if (!tilde_ptr1)
+ return NULL;
+ *tilde_ptr1++ = '\0';
+ proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0);
+
+ tilde_ptr2 = strchr(param->value, '~');
+ if (!tilde_ptr2)
+ return NULL;
+ *tilde_ptr2++ = '\0';
+ acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0);
+
+ return (acceptor_right_value >= proposer_right_value) ?
+ tilde_ptr1 : tilde_ptr2;
+}
+
+static char *iscsi_check_valuelist_for_support(
+ struct iscsi_param *param,
+ char *value)
+{
+ char *tmp1 = NULL, *tmp2 = NULL;
+ char *acceptor_values = NULL, *proposer_values = NULL;
+
+ acceptor_values = param->value;
+ proposer_values = value;
+
+ do {
+ if (!proposer_values)
+ return NULL;
+ tmp1 = strchr(proposer_values, ',');
+ if (tmp1)
+ *tmp1 = '\0';
+ acceptor_values = param->value;
+ do {
+ if (!acceptor_values) {
+ if (tmp1)
+ *tmp1 = ',';
+ return NULL;
+ }
+ tmp2 = strchr(acceptor_values, ',');
+ if (tmp2)
+ *tmp2 = '\0';
+ if (!acceptor_values || !proposer_values) {
+ if (tmp1)
+ *tmp1 = ',';
+ if (tmp2)
+ *tmp2 = ',';
+ return NULL;
+ }
+ if (!strcmp(acceptor_values, proposer_values)) {
+ if (tmp2)
+ *tmp2 = ',';
+ goto out;
+ }
+ if (tmp2)
+ *tmp2++ = ',';
+
+ acceptor_values = tmp2;
+ if (!acceptor_values)
+ break;
+ } while (acceptor_values);
+ if (tmp1)
+ *tmp1++ = ',';
+ proposer_values = tmp1;
+ } while (proposer_values);
+
+out:
+ return proposer_values;
+}
+
+static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
+{
+ u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
+ char *negoitated_value = NULL;
+
+ if (IS_PSTATE_ACCEPTOR(param)) {
+ pr_err("Received key \"%s\" twice, protocol error.\n",
+ param->name);
+ return -1;
+ }
+
+ if (IS_PSTATE_REJECT(param))
+ return 0;
+
+ if (IS_TYPE_BOOL_AND(param)) {
+ if (!strcmp(value, YES))
+ proposer_boolean_value = 1;
+ if (!strcmp(param->value, YES))
+ acceptor_boolean_value = 1;
+ if (acceptor_boolean_value && proposer_boolean_value)
+ do {} while (0);
+ else {
+ if (iscsi_update_param_value(param, NO) < 0)
+ return -1;
+ if (!proposer_boolean_value)
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
+ } else if (IS_TYPE_BOOL_OR(param)) {
+ if (!strcmp(value, YES))
+ proposer_boolean_value = 1;
+ if (!strcmp(param->value, YES))
+ acceptor_boolean_value = 1;
+ if (acceptor_boolean_value || proposer_boolean_value) {
+ if (iscsi_update_param_value(param, YES) < 0)
+ return -1;
+ if (proposer_boolean_value)
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
+ } else if (IS_TYPE_NUMBER(param)) {
+ char *tmpptr, buf[10];
+ u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
+ u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
+
+ memset(buf, 0, 10);
+
+ if (!strcmp(param->name, MAXCONNECTIONS) ||
+ !strcmp(param->name, MAXBURSTLENGTH) ||
+ !strcmp(param->name, FIRSTBURSTLENGTH) ||
+ !strcmp(param->name, MAXOUTSTANDINGR2T) ||
+ !strcmp(param->name, DEFAULTTIME2RETAIN) ||
+ !strcmp(param->name, ERRORRECOVERYLEVEL)) {
+ if (proposer_value > acceptor_value) {
+ sprintf(buf, "%u", acceptor_value);
+ if (iscsi_update_param_value(param,
+ &buf[0]) < 0)
+ return -1;
+ } else {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ }
+ } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+ if (acceptor_value > proposer_value) {
+ sprintf(buf, "%u", acceptor_value);
+ if (iscsi_update_param_value(param,
+ &buf[0]) < 0)
+ return -1;
+ } else {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ }
+ } else {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ }
+
+ if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_TYPE_NUMBER_RANGE(param)) {
+ negoitated_value = iscsi_get_value_from_number_range(
+ param, value);
+ if (!negoitated_value)
+ return -1;
+ if (iscsi_update_param_value(param, negoitated_value) < 0)
+ return -1;
+ } else if (IS_TYPE_VALUE_LIST(param)) {
+ negoitated_value = iscsi_check_valuelist_for_support(
+ param, value);
+ if (!negoitated_value) {
+ pr_err("Proposer's value list \"%s\" contains"
+ " no valid values from Acceptor's value list"
+ " \"%s\".\n", value, param->value);
+ return -1;
+ }
+ if (iscsi_update_param_value(param, negoitated_value) < 0)
+ return -1;
+ } else if (IS_PHASE_DECLARATIVE(param)) {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
+
+ return 0;
+}
+
+static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
+{
+ if (IS_PSTATE_RESPONSE_GOT(param)) {
+ pr_err("Received key \"%s\" twice, protocol error.\n",
+ param->name);
+ return -1;
+ }
+
+ if (IS_TYPE_NUMBER_RANGE(param)) {
+ u32 left_val = 0, right_val = 0, recieved_value = 0;
+ char *left_val_ptr = NULL, *right_val_ptr = NULL;
+ char *tilde_ptr = NULL, *tmp_ptr = NULL;
+
+ if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ return 0;
+ }
+
+ tilde_ptr = strchr(value, '~');
+ if (tilde_ptr) {
+ pr_err("Illegal \"~\" in response for \"%s\".\n",
+ param->name);
+ return -1;
+ }
+ tilde_ptr = strchr(param->value, '~');
+ if (!tilde_ptr) {
+ pr_err("Unable to locate numerical range"
+ " indicator \"~\" for \"%s\".\n", param->name);
+ return -1;
+ }
+ *tilde_ptr = '\0';
+
+ left_val_ptr = param->value;
+ right_val_ptr = param->value + strlen(left_val_ptr) + 1;
+ left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
+ right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ recieved_value = simple_strtoul(value, &tmp_ptr, 0);
+
+ *tilde_ptr = '~';
+
+ if ((recieved_value < left_val) ||
+ (recieved_value > right_val)) {
+ pr_err("Illegal response \"%s=%u\", value must"
+ " be between %u and %u.\n", param->name,
+ recieved_value, left_val, right_val);
+ return -1;
+ }
+ } else if (IS_TYPE_VALUE_LIST(param)) {
+ char *comma_ptr = NULL, *tmp_ptr = NULL;
+
+ comma_ptr = strchr(value, ',');
+ if (comma_ptr) {
+ pr_err("Illegal \",\" in response for \"%s\".\n",
+ param->name);
+ return -1;
+ }
+
+ tmp_ptr = iscsi_check_valuelist_for_support(param, value);
+ if (!tmp_ptr)
+ return -1;
+ }
+
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int iscsi_check_value(struct iscsi_param *param, char *value)
+{
+ char *comma_ptr = NULL;
+
+ if (!strcmp(value, REJECT)) {
+ if (!strcmp(param->name, IFMARKINT) ||
+ !strcmp(param->name, OFMARKINT)) {
+ /*
+ * Reject is not fatal for [I,O]FMarkInt, and causes
+ * [I,O]FMarker to be reset to No. (See iSCSI v20 A.3.2)
+ */
+ SET_PSTATE_REJECT(param);
+ return 0;
+ }
+ pr_err("Received %s=%s\n", param->name, value);
+ return -1;
+ }
+ if (!strcmp(value, IRRELEVANT)) {
+ pr_debug("Received %s=%s\n", param->name, value);
+ SET_PSTATE_IRRELEVANT(param);
+ return 0;
+ }
+ if (!strcmp(value, NOTUNDERSTOOD)) {
+ if (!IS_PSTATE_PROPOSER(param)) {
+ pr_err("Received illegal offer %s=%s\n",
+ param->name, value);
+ return -1;
+ }
+
+/* #warning FIXME: Add check for X-ExtensionKey here */
+ pr_err("Standard iSCSI key \"%s\" cannot be answered"
+ " with \"%s\", protocol error.\n", param->name, value);
+ return -1;
+ }
+
+ do {
+ comma_ptr = NULL;
+ comma_ptr = strchr(value, ',');
+
+ if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) {
+ pr_err("Detected value seperator \",\", but"
+ " key \"%s\" does not allow a value list,"
+ " protocol error.\n", param->name);
+ return -1;
+ }
+ if (comma_ptr)
+ *comma_ptr = '\0';
+
+ if (strlen(value) > VALUE_MAXLEN) {
+ pr_err("Value for key \"%s\" exceeds %d,"
+ " protocol error.\n", param->name,
+ VALUE_MAXLEN);
+ return -1;
+ }
+
+ if (IS_TYPE_BOOL_AND(param) || IS_TYPE_BOOL_OR(param)) {
+ if (iscsi_check_boolean_value(param, value) < 0)
+ return -1;
+ } else if (IS_TYPE_NUMBER(param)) {
+ if (iscsi_check_numerical_value(param, value) < 0)
+ return -1;
+ } else if (IS_TYPE_NUMBER_RANGE(param)) {
+ if (iscsi_check_numerical_range_value(param, value) < 0)
+ return -1;
+ } else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
+ if (iscsi_check_string_or_list_value(param, value) < 0)
+ return -1;
+ } else {
+ pr_err("Huh? 0x%02x\n", param->type);
+ return -1;
+ }
+
+ if (comma_ptr)
+ *comma_ptr++ = ',';
+
+ value = comma_ptr;
+ } while (value);
+
+ return 0;
+}
+
+static struct iscsi_param *__iscsi_check_key(
+ char *key,
+ int sender,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ if (strlen(key) > KEY_MAXLEN) {
+ pr_err("Length of key name \"%s\" exceeds %d.\n",
+ key, KEY_MAXLEN);
+ return NULL;
+ }
+
+ param = iscsi_find_param_from_key(key, param_list);
+ if (!param)
+ return NULL;
+
+ if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "target" : "initiator");
+ return NULL;
+ }
+
+ if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "initiator" : "target");
+ return NULL;
+ }
+
+ return param;
+}
+
+static struct iscsi_param *iscsi_check_key(
+ char *key,
+ int phase,
+ int sender,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+ /*
+ * Key name length must not exceed 63 bytes. (See iSCSI v20 5.1)
+ */
+ if (strlen(key) > KEY_MAXLEN) {
+ pr_err("Length of key name \"%s\" exceeds %d.\n",
+ key, KEY_MAXLEN);
+ return NULL;
+ }
+
+ param = iscsi_find_param_from_key(key, param_list);
+ if (!param)
+ return NULL;
+
+ if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "target" : "initiator");
+ return NULL;
+ }
+ if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "initiator" : "target");
+ return NULL;
+ }
+
+ if (IS_PSTATE_ACCEPTOR(param)) {
+ pr_err("Key \"%s\" received twice, protocol error.\n",
+ key);
+ return NULL;
+ }
+
+ if (!phase)
+ return param;
+
+ if (!(param->phase & phase)) {
+ pr_err("Key \"%s\" may not be negotiated during ",
+ param->name);
+ switch (phase) {
+ case PHASE_SECURITY:
+ pr_debug("Security phase.\n");
+ break;
+ case PHASE_OPERATIONAL:
+ pr_debug("Operational phase.\n");
+ default:
+ pr_debug("Unknown phase.\n");
+ }
+ return NULL;
+ }
+
+ return param;
+}
+
+static int iscsi_enforce_integrity_rules(
+ u8 phase,
+ struct iscsi_param_list *param_list)
+{
+ char *tmpptr;
+ u8 DataSequenceInOrder = 0;
+ u8 ErrorRecoveryLevel = 0, SessionType = 0;
+ u8 IFMarker = 0, OFMarker = 0;
+ u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
+ u32 FirstBurstLength = 0, MaxBurstLength = 0;
+ struct iscsi_param *param = NULL;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!(param->phase & phase))
+ continue;
+ if (!strcmp(param->name, SESSIONTYPE))
+ if (!strcmp(param->value, NORMAL))
+ SessionType = 1;
+ if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+ ErrorRecoveryLevel = simple_strtoul(param->value,
+ &tmpptr, 0);
+ if (!strcmp(param->name, DATASEQUENCEINORDER))
+ if (!strcmp(param->value, YES))
+ DataSequenceInOrder = 1;
+ if (!strcmp(param->name, MAXBURSTLENGTH))
+ MaxBurstLength = simple_strtoul(param->value,
+ &tmpptr, 0);
+ if (!strcmp(param->name, IFMARKER))
+ if (!strcmp(param->value, YES))
+ IFMarker = 1;
+ if (!strcmp(param->name, OFMARKER))
+ if (!strcmp(param->value, YES))
+ OFMarker = 1;
+ if (!strcmp(param->name, IFMARKINT))
+ if (!strcmp(param->value, REJECT))
+ IFMarkInt_Reject = 1;
+ if (!strcmp(param->name, OFMARKINT))
+ if (!strcmp(param->value, REJECT))
+ OFMarkInt_Reject = 1;
+ }
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!(param->phase & phase))
+ continue;
+ if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) &&
+ (strcmp(param->name, IFMARKER) &&
+ strcmp(param->name, OFMARKER) &&
+ strcmp(param->name, IFMARKINT) &&
+ strcmp(param->name, OFMARKINT))))
+ continue;
+ if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
+ DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
+ if (strcmp(param->value, "1")) {
+ if (iscsi_update_param_value(param, "1") < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+ if (!strcmp(param->name, MAXCONNECTIONS) && !SessionType) {
+ if (strcmp(param->value, "1")) {
+ if (iscsi_update_param_value(param, "1") < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+ if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+ FirstBurstLength = simple_strtoul(param->value,
+ &tmpptr, 0);
+ if (FirstBurstLength > MaxBurstLength) {
+ char tmpbuf[10];
+ memset(tmpbuf, 0, 10);
+ sprintf(tmpbuf, "%u", MaxBurstLength);
+ if (iscsi_update_param_value(param, tmpbuf))
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+ if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) {
+ if (iscsi_update_param_value(param, NO) < 0)
+ return -1;
+ IFMarker = 0;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) {
+ if (iscsi_update_param_value(param, NO) < 0)
+ return -1;
+ OFMarker = 0;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ if (!strcmp(param->name, IFMARKINT) && !IFMarker) {
+ if (!strcmp(param->value, REJECT))
+ continue;
+ param->state &= ~PSTATE_NEGOTIATE;
+ if (iscsi_update_param_value(param, IRRELEVANT) < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ if (!strcmp(param->name, OFMARKINT) && !OFMarker) {
+ if (!strcmp(param->value, REJECT))
+ continue;
+ param->state &= ~PSTATE_NEGOTIATE;
+ if (iscsi_update_param_value(param, IRRELEVANT) < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+
+ return 0;
+}
+
+int iscsi_decode_text_input(
+ u8 phase,
+ u8 sender,
+ char *textbuf,
+ u32 length,
+ struct iscsi_param_list *param_list)
+{
+ char *tmpbuf, *start = NULL, *end = NULL;
+
+ tmpbuf = kzalloc(length + 1, GFP_KERNEL);
+ if (!tmpbuf) {
+ pr_err("Unable to allocate memory for tmpbuf.\n");
+ return -1;
+ }
+
+ memcpy(tmpbuf, textbuf, length);
+ tmpbuf[length] = '\0';
+ start = tmpbuf;
+ end = (start + length);
+
+ while (start < end) {
+ char *key, *value;
+ struct iscsi_param *param;
+
+ if (iscsi_extract_key_value(start, &key, &value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+
+ pr_debug("Got key: %s=%s\n", key, value);
+
+ if (phase & PHASE_SECURITY) {
+ if (iscsi_check_for_auth_key(key) > 0) {
+ char *tmpptr = key + strlen(key);
+ *tmpptr = '=';
+ kfree(tmpbuf);
+ return 1;
+ }
+ }
+
+ param = iscsi_check_key(key, phase, sender, param_list);
+ if (!param) {
+ if (iscsi_add_notunderstood_response(key,
+ value, param_list) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+ start += strlen(key) + strlen(value) + 2;
+ continue;
+ }
+ if (iscsi_check_value(param, value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+
+ start += strlen(key) + strlen(value) + 2;
+
+ if (IS_PSTATE_PROPOSER(param)) {
+ if (iscsi_check_proposer_state(param, value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+ SET_PSTATE_RESPONSE_GOT(param);
+ } else {
+ if (iscsi_check_acceptor_state(param, value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+ SET_PSTATE_ACCEPTOR(param);
+ }
+ }
+
+ kfree(tmpbuf);
+ return 0;
+}
+
+int iscsi_encode_text_output(
+ u8 phase,
+ u8 sender,
+ char *textbuf,
+ u32 *length,
+ struct iscsi_param_list *param_list)
+{
+ char *output_buf = NULL;
+ struct iscsi_extra_response *er;
+ struct iscsi_param *param;
+
+ output_buf = textbuf + *length;
+
+ if (iscsi_enforce_integrity_rules(phase, param_list) < 0)
+ return -1;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!(param->sender & sender))
+ continue;
+ if (IS_PSTATE_ACCEPTOR(param) &&
+ !IS_PSTATE_RESPONSE_SENT(param) &&
+ !IS_PSTATE_REPLY_OPTIONAL(param) &&
+ (param->phase & phase)) {
+ *length += sprintf(output_buf, "%s=%s",
+ param->name, param->value);
+ *length += 1;
+ output_buf = textbuf + *length;
+ SET_PSTATE_RESPONSE_SENT(param);
+ pr_debug("Sending key: %s=%s\n",
+ param->name, param->value);
+ continue;
+ }
+ if (IS_PSTATE_NEGOTIATE(param) &&
+ !IS_PSTATE_ACCEPTOR(param) &&
+ !IS_PSTATE_PROPOSER(param) &&
+ (param->phase & phase)) {
+ *length += sprintf(output_buf, "%s=%s",
+ param->name, param->value);
+ *length += 1;
+ output_buf = textbuf + *length;
+ SET_PSTATE_PROPOSER(param);
+ iscsi_check_proposer_for_optional_reply(param);
+ pr_debug("Sending key: %s=%s\n",
+ param->name, param->value);
+ }
+ }
+
+ list_for_each_entry(er, &param_list->extra_response_list, er_list) {
+ *length += sprintf(output_buf, "%s=%s", er->key, er->value);
+ *length += 1;
+ output_buf = textbuf + *length;
+ pr_debug("Sending key: %s=%s\n", er->key, er->value);
+ }
+ iscsi_release_extra_responses(param_list);
+
+ return 0;
+}
+
+int iscsi_check_negotiated_keys(struct iscsi_param_list *param_list)
+{
+ int ret = 0;
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (IS_PSTATE_NEGOTIATE(param) &&
+ IS_PSTATE_PROPOSER(param) &&
+ !IS_PSTATE_RESPONSE_GOT(param) &&
+ !IS_PSTATE_REPLY_OPTIONAL(param) &&
+ !IS_PHASE_DECLARATIVE(param)) {
+ pr_err("No response for proposed key \"%s\".\n",
+ param->name);
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+int iscsi_change_param_value(
+ char *keyvalue,
+ struct iscsi_param_list *param_list,
+ int check_key)
+{
+ char *key = NULL, *value = NULL;
+ struct iscsi_param *param;
+ int sender = 0;
+
+ if (iscsi_extract_key_value(keyvalue, &key, &value) < 0)
+ return -1;
+
+ if (!check_key) {
+ param = __iscsi_check_key(keyvalue, sender, param_list);
+ if (!param)
+ return -1;
+ } else {
+ param = iscsi_check_key(keyvalue, 0, sender, param_list);
+ if (!param)
+ return -1;
+
+ param->set_param = 1;
+ if (iscsi_check_value(param, value) < 0) {
+ param->set_param = 0;
+ return -1;
+ }
+ param->set_param = 0;
+ }
+
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+
+ return 0;
+}
+
+void iscsi_set_connection_parameters(
+ struct iscsi_conn_ops *ops,
+ struct iscsi_param_list *param_list)
+{
+ char *tmpptr;
+ struct iscsi_param *param;
+
+ pr_debug("---------------------------------------------------"
+ "---------------\n");
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+ continue;
+ if (!strcmp(param->name, AUTHMETHOD)) {
+ pr_debug("AuthMethod: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, HEADERDIGEST)) {
+ ops->HeaderDigest = !strcmp(param->value, CRC32C);
+ pr_debug("HeaderDigest: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DATADIGEST)) {
+ ops->DataDigest = !strcmp(param->value, CRC32C);
+ pr_debug("DataDigest: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+ ops->MaxRecvDataSegmentLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxRecvDataSegmentLength: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, OFMARKER)) {
+ ops->OFMarker = !strcmp(param->value, YES);
+ pr_debug("OFMarker: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, IFMARKER)) {
+ ops->IFMarker = !strcmp(param->value, YES);
+ pr_debug("IFMarker: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, OFMARKINT)) {
+ ops->OFMarkInt =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("OFMarkInt: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, IFMARKINT)) {
+ ops->IFMarkInt =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("IFMarkInt: %s\n",
+ param->value);
+ }
+ }
+ pr_debug("----------------------------------------------------"
+ "--------------\n");
+}
+
+void iscsi_set_session_parameters(
+ struct iscsi_sess_ops *ops,
+ struct iscsi_param_list *param_list,
+ int leading)
+{
+ char *tmpptr;
+ struct iscsi_param *param;
+
+ pr_debug("----------------------------------------------------"
+ "--------------\n");
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+ continue;
+ if (!strcmp(param->name, INITIATORNAME)) {
+ if (!param->value)
+ continue;
+ if (leading)
+ snprintf(ops->InitiatorName,
+ sizeof(ops->InitiatorName),
+ "%s", param->value);
+ pr_debug("InitiatorName: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, INITIATORALIAS)) {
+ if (!param->value)
+ continue;
+ snprintf(ops->InitiatorAlias,
+ sizeof(ops->InitiatorAlias),
+ "%s", param->value);
+ pr_debug("InitiatorAlias: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, TARGETNAME)) {
+ if (!param->value)
+ continue;
+ if (leading)
+ snprintf(ops->TargetName,
+ sizeof(ops->TargetName),
+ "%s", param->value);
+ pr_debug("TargetName: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, TARGETALIAS)) {
+ if (!param->value)
+ continue;
+ snprintf(ops->TargetAlias, sizeof(ops->TargetAlias),
+ "%s", param->value);
+ pr_debug("TargetAlias: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+ ops->TargetPortalGroupTag =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("TargetPortalGroupTag: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXCONNECTIONS)) {
+ ops->MaxConnections =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxConnections: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, INITIALR2T)) {
+ ops->InitialR2T = !strcmp(param->value, YES);
+ pr_debug("InitialR2T: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, IMMEDIATEDATA)) {
+ ops->ImmediateData = !strcmp(param->value, YES);
+ pr_debug("ImmediateData: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+ ops->MaxBurstLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxBurstLength: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+ ops->FirstBurstLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("FirstBurstLength: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+ ops->DefaultTime2Wait =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("DefaultTime2Wait: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+ ops->DefaultTime2Retain =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("DefaultTime2Retain: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+ ops->MaxOutstandingR2T =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxOutstandingR2T: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DATAPDUINORDER)) {
+ ops->DataPDUInOrder = !strcmp(param->value, YES);
+ pr_debug("DataPDUInOrder: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+ ops->DataSequenceInOrder = !strcmp(param->value, YES);
+ pr_debug("DataSequenceInOrder: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+ ops->ErrorRecoveryLevel =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("ErrorRecoveryLevel: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, SESSIONTYPE)) {
+ ops->SessionType = !strcmp(param->value, DISCOVERY);
+ pr_debug("SessionType: %s\n",
+ param->value);
+ }
+ }
+ pr_debug("----------------------------------------------------"
+ "--------------\n");
+
+}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
new file mode 100644
index 00000000000..6a37fd6f128
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -0,0 +1,269 @@
+#ifndef ISCSI_PARAMETERS_H
+#define ISCSI_PARAMETERS_H
+
+struct iscsi_extra_response {
+ char key[64];
+ char value[32];
+ struct list_head er_list;
+} ____cacheline_aligned;
+
+struct iscsi_param {
+ char *name;
+ char *value;
+ u8 set_param;
+ u8 phase;
+ u8 scope;
+ u8 sender;
+ u8 type;
+ u8 use;
+ u16 type_range;
+ u32 state;
+ struct list_head p_list;
+} ____cacheline_aligned;
+
+extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
+extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
+extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
+extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
+extern void iscsi_print_params(struct iscsi_param_list *);
+extern int iscsi_create_default_params(struct iscsi_param_list **);
+extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *);
+extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
+extern int iscsi_copy_param_list(struct iscsi_param_list **,
+ struct iscsi_param_list *, int);
+extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
+extern void iscsi_release_param_list(struct iscsi_param_list *);
+extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
+extern int iscsi_extract_key_value(char *, char **, char **);
+extern int iscsi_update_param_value(struct iscsi_param *, char *);
+extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_param_list *);
+extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
+ struct iscsi_param_list *);
+extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
+extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
+ struct iscsi_param_list *);
+extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
+ struct iscsi_param_list *, int);
+
+#define YES "Yes"
+#define NO "No"
+#define ALL "All"
+#define IRRELEVANT "Irrelevant"
+#define NONE "None"
+#define NOTUNDERSTOOD "NotUnderstood"
+#define REJECT "Reject"
+
+/*
+ * The Parameter Names.
+ */
+#define AUTHMETHOD "AuthMethod"
+#define HEADERDIGEST "HeaderDigest"
+#define DATADIGEST "DataDigest"
+#define MAXCONNECTIONS "MaxConnections"
+#define SENDTARGETS "SendTargets"
+#define TARGETNAME "TargetName"
+#define INITIATORNAME "InitiatorName"
+#define TARGETALIAS "TargetAlias"
+#define INITIATORALIAS "InitiatorAlias"
+#define TARGETADDRESS "TargetAddress"
+#define TARGETPORTALGROUPTAG "TargetPortalGroupTag"
+#define INITIALR2T "InitialR2T"
+#define IMMEDIATEDATA "ImmediateData"
+#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
+#define MAXBURSTLENGTH "MaxBurstLength"
+#define FIRSTBURSTLENGTH "FirstBurstLength"
+#define DEFAULTTIME2WAIT "DefaultTime2Wait"
+#define DEFAULTTIME2RETAIN "DefaultTime2Retain"
+#define MAXOUTSTANDINGR2T "MaxOutstandingR2T"
+#define DATAPDUINORDER "DataPDUInOrder"
+#define DATASEQUENCEINORDER "DataSequenceInOrder"
+#define ERRORRECOVERYLEVEL "ErrorRecoveryLevel"
+#define SESSIONTYPE "SessionType"
+#define IFMARKER "IFMarker"
+#define OFMARKER "OFMarker"
+#define IFMARKINT "IFMarkInt"
+#define OFMARKINT "OFMarkInt"
+#define X_EXTENSIONKEY "X-com.sbei.version"
+#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
+#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
+
+/*
+ * For AuthMethod.
+ */
+#define KRB5 "KRB5"
+#define SPKM1 "SPKM1"
+#define SPKM2 "SPKM2"
+#define SRP "SRP"
+#define CHAP "CHAP"
+
+/*
+ * Initial values for Parameter Negotiation.
+ */
+#define INITIAL_AUTHMETHOD CHAP
+#define INITIAL_HEADERDIGEST "CRC32C,None"
+#define INITIAL_DATADIGEST "CRC32C,None"
+#define INITIAL_MAXCONNECTIONS "1"
+#define INITIAL_SENDTARGETS ALL
+#define INITIAL_TARGETNAME "LIO.Target"
+#define INITIAL_INITIATORNAME "LIO.Initiator"
+#define INITIAL_TARGETALIAS "LIO Target"
+#define INITIAL_INITIATORALIAS "LIO Initiator"
+#define INITIAL_TARGETADDRESS "0.0.0.0:0000,0"
+#define INITIAL_TARGETPORTALGROUPTAG "1"
+#define INITIAL_INITIALR2T YES
+#define INITIAL_IMMEDIATEDATA YES
+#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
+#define INITIAL_MAXBURSTLENGTH "262144"
+#define INITIAL_FIRSTBURSTLENGTH "65536"
+#define INITIAL_DEFAULTTIME2WAIT "2"
+#define INITIAL_DEFAULTTIME2RETAIN "20"
+#define INITIAL_MAXOUTSTANDINGR2T "1"
+#define INITIAL_DATAPDUINORDER YES
+#define INITIAL_DATASEQUENCEINORDER YES
+#define INITIAL_ERRORRECOVERYLEVEL "0"
+#define INITIAL_SESSIONTYPE NORMAL
+#define INITIAL_IFMARKER NO
+#define INITIAL_OFMARKER NO
+#define INITIAL_IFMARKINT "2048~65535"
+#define INITIAL_OFMARKINT "2048~65535"
+
+/*
+ * For [Header,Data]Digests.
+ */
+#define CRC32C "CRC32C"
+
+/*
+ * For SessionType.
+ */
+#define DISCOVERY "Discovery"
+#define NORMAL "Normal"
+
+/*
+ * struct iscsi_param->use
+ */
+#define USE_LEADING_ONLY 0x01
+#define USE_INITIAL_ONLY 0x02
+#define USE_ALL 0x04
+
+#define IS_USE_LEADING_ONLY(p) ((p)->use & USE_LEADING_ONLY)
+#define IS_USE_INITIAL_ONLY(p) ((p)->use & USE_INITIAL_ONLY)
+#define IS_USE_ALL(p) ((p)->use & USE_ALL)
+
+#define SET_USE_INITIAL_ONLY(p) ((p)->use |= USE_INITIAL_ONLY)
+
+/*
+ * struct iscsi_param->sender
+ */
+#define SENDER_INITIATOR 0x01
+#define SENDER_TARGET 0x02
+#define SENDER_BOTH 0x03
+/* Used in iscsi_check_key() */
+#define SENDER_RECEIVER 0x04
+
+#define IS_SENDER_INITIATOR(p) ((p)->sender & SENDER_INITIATOR)
+#define IS_SENDER_TARGET(p) ((p)->sender & SENDER_TARGET)
+#define IS_SENDER_BOTH(p) ((p)->sender & SENDER_BOTH)
+
+/*
+ * struct iscsi_param->scope
+ */
+#define SCOPE_CONNECTION_ONLY 0x01
+#define SCOPE_SESSION_WIDE 0x02
+
+#define IS_SCOPE_CONNECTION_ONLY(p) ((p)->scope & SCOPE_CONNECTION_ONLY)
+#define IS_SCOPE_SESSION_WIDE(p) ((p)->scope & SCOPE_SESSION_WIDE)
+
+/*
+ * struct iscsi_param->phase
+ */
+#define PHASE_SECURITY 0x01
+#define PHASE_OPERATIONAL 0x02
+#define PHASE_DECLARATIVE 0x04
+#define PHASE_FFP0 0x08
+
+#define IS_PHASE_SECURITY(p) ((p)->phase & PHASE_SECURITY)
+#define IS_PHASE_OPERATIONAL(p) ((p)->phase & PHASE_OPERATIONAL)
+#define IS_PHASE_DECLARATIVE(p) ((p)->phase & PHASE_DECLARATIVE)
+#define IS_PHASE_FFP0(p) ((p)->phase & PHASE_FFP0)
+
+/*
+ * struct iscsi_param->type
+ */
+#define TYPE_BOOL_AND 0x01
+#define TYPE_BOOL_OR 0x02
+#define TYPE_NUMBER 0x04
+#define TYPE_NUMBER_RANGE 0x08
+#define TYPE_STRING 0x10
+#define TYPE_VALUE_LIST 0x20
+
+#define IS_TYPE_BOOL_AND(p) ((p)->type & TYPE_BOOL_AND)
+#define IS_TYPE_BOOL_OR(p) ((p)->type & TYPE_BOOL_OR)
+#define IS_TYPE_NUMBER(p) ((p)->type & TYPE_NUMBER)
+#define IS_TYPE_NUMBER_RANGE(p) ((p)->type & TYPE_NUMBER_RANGE)
+#define IS_TYPE_STRING(p) ((p)->type & TYPE_STRING)
+#define IS_TYPE_VALUE_LIST(p) ((p)->type & TYPE_VALUE_LIST)
+
+/*
+ * struct iscsi_param->type_range
+ */
+#define TYPERANGE_BOOL_AND 0x0001
+#define TYPERANGE_BOOL_OR 0x0002
+#define TYPERANGE_0_TO_2 0x0004
+#define TYPERANGE_0_TO_3600 0x0008
+#define TYPERANGE_0_TO_32767 0x0010
+#define TYPERANGE_0_TO_65535 0x0020
+#define TYPERANGE_1_TO_65535 0x0040
+#define TYPERANGE_2_TO_3600 0x0080
+#define TYPERANGE_512_TO_16777215 0x0100
+#define TYPERANGE_AUTH 0x0200
+#define TYPERANGE_DIGEST 0x0400
+#define TYPERANGE_ISCSINAME 0x0800
+#define TYPERANGE_MARKINT 0x1000
+#define TYPERANGE_SESSIONTYPE 0x2000
+#define TYPERANGE_TARGETADDRESS 0x4000
+#define TYPERANGE_UTF8 0x8000
+
+#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
+#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
+#define IS_TYPERANGE_0_TO_32767(p) ((p)->type_range & TYPERANGE_0_TO_32767)
+#define IS_TYPERANGE_0_TO_65535(p) ((p)->type_range & TYPERANGE_0_TO_65535)
+#define IS_TYPERANGE_1_TO_65535(p) ((p)->type_range & TYPERANGE_1_TO_65535)
+#define IS_TYPERANGE_2_TO_3600(p) ((p)->type_range & TYPERANGE_2_TO_3600)
+#define IS_TYPERANGE_512_TO_16777215(p) ((p)->type_range & \
+ TYPERANGE_512_TO_16777215)
+#define IS_TYPERANGE_AUTH_PARAM(p) ((p)->type_range & TYPERANGE_AUTH)
+#define IS_TYPERANGE_DIGEST_PARAM(p) ((p)->type_range & TYPERANGE_DIGEST)
+#define IS_TYPERANGE_SESSIONTYPE(p) ((p)->type_range & \
+ TYPERANGE_SESSIONTYPE)
+
+/*
+ * struct iscsi_param->state
+ */
+#define PSTATE_ACCEPTOR 0x01
+#define PSTATE_NEGOTIATE 0x02
+#define PSTATE_PROPOSER 0x04
+#define PSTATE_IRRELEVANT 0x08
+#define PSTATE_REJECT 0x10
+#define PSTATE_REPLY_OPTIONAL 0x20
+#define PSTATE_RESPONSE_GOT 0x40
+#define PSTATE_RESPONSE_SENT 0x80
+
+#define IS_PSTATE_ACCEPTOR(p) ((p)->state & PSTATE_ACCEPTOR)
+#define IS_PSTATE_NEGOTIATE(p) ((p)->state & PSTATE_NEGOTIATE)
+#define IS_PSTATE_PROPOSER(p) ((p)->state & PSTATE_PROPOSER)
+#define IS_PSTATE_IRRELEVANT(p) ((p)->state & PSTATE_IRRELEVANT)
+#define IS_PSTATE_REJECT(p) ((p)->state & PSTATE_REJECT)
+#define IS_PSTATE_REPLY_OPTIONAL(p) ((p)->state & PSTATE_REPLY_OPTIONAL)
+#define IS_PSTATE_RESPONSE_GOT(p) ((p)->state & PSTATE_RESPONSE_GOT)
+#define IS_PSTATE_RESPONSE_SENT(p) ((p)->state & PSTATE_RESPONSE_SENT)
+
+#define SET_PSTATE_ACCEPTOR(p) ((p)->state |= PSTATE_ACCEPTOR)
+#define SET_PSTATE_NEGOTIATE(p) ((p)->state |= PSTATE_NEGOTIATE)
+#define SET_PSTATE_PROPOSER(p) ((p)->state |= PSTATE_PROPOSER)
+#define SET_PSTATE_IRRELEVANT(p) ((p)->state |= PSTATE_IRRELEVANT)
+#define SET_PSTATE_REJECT(p) ((p)->state |= PSTATE_REJECT)
+#define SET_PSTATE_REPLY_OPTIONAL(p) ((p)->state |= PSTATE_REPLY_OPTIONAL)
+#define SET_PSTATE_RESPONSE_GOT(p) ((p)->state |= PSTATE_RESPONSE_GOT)
+#define SET_PSTATE_RESPONSE_SENT(p) ((p)->state |= PSTATE_RESPONSE_SENT)
+
+#endif /* ISCSI_PARAMETERS_H */
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
new file mode 100644
index 00000000000..fc694082bfc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -0,0 +1,664 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI DataSequenceInOrder=No
+ * and DataPDUInOrder=No.
+ *
+ \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/random.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_seq_pdu_list.h"
+
+#define OFFLOAD_BUF_SIZE 32768
+
+void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
+{
+ int i;
+ struct iscsi_seq *seq;
+
+ pr_debug("Dumping Sequence List for ITT: 0x%08x:\n",
+ cmd->init_task_tag);
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ seq = &cmd->seq_list[i];
+ pr_debug("i: %d, pdu_start: %d, pdu_count: %d,"
+ " offset: %d, xfer_len: %d, seq_send_order: %d,"
+ " seq_no: %d\n", i, seq->pdu_start, seq->pdu_count,
+ seq->offset, seq->xfer_len, seq->seq_send_order,
+ seq->seq_no);
+ }
+}
+
+void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
+{
+ int i;
+ struct iscsi_pdu *pdu;
+
+ pr_debug("Dumping PDU List for ITT: 0x%08x:\n",
+ cmd->init_task_tag);
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+ pr_debug("i: %d, offset: %d, length: %d,"
+ " pdu_send_order: %d, seq_no: %d\n", i, pdu->offset,
+ pdu->length, pdu->pdu_send_order, pdu->seq_no);
+ }
+}
+
+static void iscsit_ordered_seq_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ u32 i, seq_count = 0;
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+ continue;
+ cmd->seq_list[i].seq_send_order = seq_count++;
+ }
+}
+
+static void iscsit_ordered_pdu_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ u32 i, pdu_send_order = 0, seq_no = 0;
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+redo:
+ if (cmd->pdu_list[i].seq_no == seq_no) {
+ cmd->pdu_list[i].pdu_send_order = pdu_send_order++;
+ continue;
+ }
+ seq_no++;
+ pdu_send_order = 0;
+ goto redo;
+ }
+}
+
+/*
+ * Generate count random values into array.
+ * Use 0x80000000 to mark generates valued in array[].
+ */
+static void iscsit_create_random_array(u32 *array, u32 count)
+{
+ int i, j, k;
+
+ if (count == 1) {
+ array[0] = 0;
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+redo:
+ get_random_bytes(&j, sizeof(u32));
+ j = (1 + (int) (9999 + 1) - j) % count;
+ for (k = 0; k < i + 1; k++) {
+ j |= 0x80000000;
+ if ((array[k] & 0x80000000) && (array[k] == j))
+ goto redo;
+ }
+ array[i] = j;
+ }
+
+ for (i = 0; i < count; i++)
+ array[i] &= ~0x80000000;
+}
+
+static int iscsit_randomize_pdu_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ int i = 0;
+ u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0;
+
+ for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) {
+redo:
+ if (cmd->pdu_list[pdu_count].seq_no == seq_no) {
+ seq_count++;
+ continue;
+ }
+ array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ if (!array) {
+ pr_err("Unable to allocate memory"
+ " for random array.\n");
+ return -1;
+ }
+ iscsit_create_random_array(array, seq_count);
+
+ for (i = 0; i < seq_count; i++)
+ cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+ kfree(array);
+
+ seq_offset += seq_count;
+ seq_count = 0;
+ seq_no++;
+ goto redo;
+ }
+
+ if (seq_count) {
+ array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ if (!array) {
+ pr_err("Unable to allocate memory for"
+ " random array.\n");
+ return -1;
+ }
+ iscsit_create_random_array(array, seq_count);
+
+ for (i = 0; i < seq_count; i++)
+ cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+ kfree(array);
+ }
+
+ return 0;
+}
+
+static int iscsit_randomize_seq_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ int i, j = 0;
+ u32 *array, seq_count = cmd->seq_count;
+
+ if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED))
+ seq_count--;
+ else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED)
+ seq_count -= 2;
+
+ if (!seq_count)
+ return 0;
+
+ array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ if (!array) {
+ pr_err("Unable to allocate memory for random array.\n");
+ return -1;
+ }
+ iscsit_create_random_array(array, seq_count);
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+ continue;
+ cmd->seq_list[i].seq_send_order = array[j++];
+ }
+
+ kfree(array);
+ return 0;
+}
+
+static void iscsit_determine_counts_for_list(
+ struct iscsi_cmd *cmd,
+ struct iscsi_build_list *bl,
+ u32 *seq_count,
+ u32 *pdu_count)
+{
+ int check_immediate = 0;
+ u32 burstlength = 0, offset = 0;
+ u32 unsolicited_data_length = 0;
+ struct iscsi_conn *conn = cmd->conn;
+
+ if ((bl->type == PDULIST_IMMEDIATE) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ check_immediate = 1;
+
+ if ((bl->type == PDULIST_UNSOLICITED) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ unsolicited_data_length = (cmd->data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+
+ while (offset < cmd->data_length) {
+ *pdu_count += 1;
+
+ if (check_immediate) {
+ check_immediate = 0;
+ offset += bl->immediate_data_length;
+ *seq_count += 1;
+ if (unsolicited_data_length)
+ unsolicited_data_length -=
+ bl->immediate_data_length;
+ continue;
+ }
+ if (unsolicited_data_length > 0) {
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
+ >= cmd->data_length) {
+ unsolicited_data_length -=
+ (cmd->data_length - offset);
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
+ >= conn->sess->sess_ops->FirstBurstLength) {
+ unsolicited_data_length -=
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ offset += (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ burstlength = 0;
+ *seq_count += 1;
+ continue;
+ }
+
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ unsolicited_data_length -=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ continue;
+ }
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ conn->sess->sess_ops->MaxBurstLength) {
+ offset += (conn->sess->sess_ops->MaxBurstLength -
+ burstlength);
+ burstlength = 0;
+ *seq_count += 1;
+ continue;
+ }
+
+ burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+}
+
+
+/*
+ * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
+ * and DataPDUInOrder=No.
+ */
+static int iscsit_build_pdu_and_seq_list(
+ struct iscsi_cmd *cmd,
+ struct iscsi_build_list *bl)
+{
+ int check_immediate = 0, datapduinorder, datasequenceinorder;
+ u32 burstlength = 0, offset = 0, i = 0;
+ u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = cmd->pdu_list;
+ struct iscsi_seq *seq = cmd->seq_list;
+
+ datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
+ datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
+
+ if ((bl->type == PDULIST_IMMEDIATE) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ check_immediate = 1;
+
+ if ((bl->type == PDULIST_UNSOLICITED) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ unsolicited_data_length = (cmd->data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+
+ while (offset < cmd->data_length) {
+ pdu_count++;
+ if (!datapduinorder) {
+ pdu[i].offset = offset;
+ pdu[i].seq_no = seq_no;
+ }
+ if (!datasequenceinorder && (pdu_count == 1)) {
+ seq[seq_no].pdu_start = i;
+ seq[seq_no].seq_no = seq_no;
+ seq[seq_no].offset = offset;
+ seq[seq_no].orig_offset = offset;
+ }
+
+ if (check_immediate) {
+ check_immediate = 0;
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_IMMEDIATE;
+ pdu[i++].length = bl->immediate_data_length;
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_IMMEDIATE;
+ seq[seq_no].pdu_count = 1;
+ seq[seq_no].xfer_len =
+ bl->immediate_data_length;
+ }
+ offset += bl->immediate_data_length;
+ pdu_count = 0;
+ seq_no++;
+ if (unsolicited_data_length)
+ unsolicited_data_length -=
+ bl->immediate_data_length;
+ continue;
+ }
+ if (unsolicited_data_length > 0) {
+ if ((offset +
+ conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_UNSOLICITED;
+ pdu[i].length =
+ (cmd->data_length - offset);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_UNSOLICITED;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (cmd->data_length - offset));
+ }
+ unsolicited_data_length -=
+ (cmd->data_length - offset);
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((offset +
+ conn->conn_ops->MaxRecvDataSegmentLength) >=
+ conn->sess->sess_ops->FirstBurstLength) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_UNSOLICITED;
+ pdu[i++].length =
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_UNSOLICITED;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset));
+ }
+ unsolicited_data_length -=
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ offset += (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ burstlength = 0;
+ pdu_count = 0;
+ seq_no++;
+ continue;
+ }
+
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_UNSOLICITED;
+ pdu[i++].length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+ burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ unsolicited_data_length -=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ continue;
+ }
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_NORMAL;
+ pdu[i].length = (cmd->data_length - offset);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_NORMAL;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (cmd->data_length - offset));
+ }
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ conn->sess->sess_ops->MaxBurstLength) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_NORMAL;
+ pdu[i++].length =
+ (conn->sess->sess_ops->MaxBurstLength -
+ burstlength);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_NORMAL;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (conn->sess->sess_ops->MaxBurstLength -
+ burstlength));
+ }
+ offset += (conn->sess->sess_ops->MaxBurstLength -
+ burstlength);
+ burstlength = 0;
+ pdu_count = 0;
+ seq_no++;
+ continue;
+ }
+
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_NORMAL;
+ pdu[i++].length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+ burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+
+ if (!datasequenceinorder) {
+ if (bl->data_direction & ISCSI_PDU_WRITE) {
+ if (bl->randomize & RANDOM_R2T_OFFSETS) {
+ if (iscsit_randomize_seq_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_seq_lists(cmd, bl->type);
+ } else if (bl->data_direction & ISCSI_PDU_READ) {
+ if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) {
+ if (iscsit_randomize_seq_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_seq_lists(cmd, bl->type);
+ }
+#if 0
+ iscsit_dump_seq_list(cmd);
+#endif
+ }
+ if (!datapduinorder) {
+ if (bl->data_direction & ISCSI_PDU_WRITE) {
+ if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) {
+ if (iscsit_randomize_pdu_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_pdu_lists(cmd, bl->type);
+ } else if (bl->data_direction & ISCSI_PDU_READ) {
+ if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) {
+ if (iscsit_randomize_pdu_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_pdu_lists(cmd, bl->type);
+ }
+#if 0
+ iscsit_dump_pdu_list(cmd);
+#endif
+ }
+
+ return 0;
+}
+
+/*
+ * Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
+ */
+int iscsit_do_build_list(
+ struct iscsi_cmd *cmd,
+ struct iscsi_build_list *bl)
+{
+ u32 pdu_count = 0, seq_count = 1;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = NULL;
+ struct iscsi_seq *seq = NULL;
+
+ iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count);
+
+ if (!conn->sess->sess_ops->DataSequenceInOrder) {
+ seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC);
+ if (!seq) {
+ pr_err("Unable to allocate struct iscsi_seq list\n");
+ return -1;
+ }
+ cmd->seq_list = seq;
+ cmd->seq_count = seq_count;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC);
+ if (!pdu) {
+ pr_err("Unable to allocate struct iscsi_pdu list.\n");
+ kfree(seq);
+ return -1;
+ }
+ cmd->pdu_list = pdu;
+ cmd->pdu_count = pdu_count;
+ }
+
+ return iscsit_build_pdu_and_seq_list(cmd, bl);
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 length)
+{
+ u32 i;
+ struct iscsi_pdu *pdu = NULL;
+
+ if (!cmd->pdu_list) {
+ pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+ return NULL;
+ }
+
+ pdu = &cmd->pdu_list[0];
+
+ for (i = 0; i < cmd->pdu_count; i++)
+ if ((pdu[i].offset == offset) && (pdu[i].length == length))
+ return &pdu[i];
+
+ pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:"
+ " %u, Length: %u\n", cmd->init_task_tag, offset, length);
+ return NULL;
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
+ struct iscsi_cmd *cmd,
+ struct iscsi_seq *seq)
+{
+ u32 i;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = NULL;
+
+ if (!cmd->pdu_list) {
+ pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+ return NULL;
+ }
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+redo:
+ pdu = &cmd->pdu_list[cmd->pdu_start];
+
+ for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
+#if 0
+ pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
+ "_send_order: %d, pdu[i].offset: %d,"
+ " pdu[i].length: %d\n", pdu[i].seq_no,
+ pdu[i].pdu_send_order, pdu[i].offset,
+ pdu[i].length);
+#endif
+ if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
+ cmd->pdu_send_order++;
+ return &pdu[i];
+ }
+ }
+
+ cmd->pdu_start += cmd->pdu_send_order;
+ cmd->pdu_send_order = 0;
+ cmd->seq_no++;
+
+ if (cmd->pdu_start < cmd->pdu_count)
+ goto redo;
+
+ pr_err("Command ITT: 0x%08x unable to locate"
+ " struct iscsi_pdu for cmd->pdu_send_order: %u.\n",
+ cmd->init_task_tag, cmd->pdu_send_order);
+ return NULL;
+ } else {
+ if (!seq) {
+ pr_err("struct iscsi_seq is NULL!\n");
+ return NULL;
+ }
+#if 0
+ pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
+ " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
+ seq->seq_no);
+#endif
+ pdu = &cmd->pdu_list[seq->pdu_start];
+
+ if (seq->pdu_send_order == seq->pdu_count) {
+ pr_err("Command ITT: 0x%08x seq->pdu_send"
+ "_order: %u equals seq->pdu_count: %u\n",
+ cmd->init_task_tag, seq->pdu_send_order,
+ seq->pdu_count);
+ return NULL;
+ }
+
+ for (i = 0; i < seq->pdu_count; i++) {
+ if (pdu[i].pdu_send_order == seq->pdu_send_order) {
+ seq->pdu_send_order++;
+ return &pdu[i];
+ }
+ }
+
+ pr_err("Command ITT: 0x%08x unable to locate iscsi"
+ "_pdu_t for seq->pdu_send_order: %u.\n",
+ cmd->init_task_tag, seq->pdu_send_order);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 length)
+{
+ u32 i;
+
+ if (!cmd->seq_list) {
+ pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ return NULL;
+ }
+
+ for (i = 0; i < cmd->seq_count; i++) {
+#if 0
+ pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
+ "xfer_len: %d, seq_list[i].seq_no %u\n",
+ cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
+ cmd->seq_list[i].seq_no);
+#endif
+ if ((cmd->seq_list[i].orig_offset +
+ cmd->seq_list[i].xfer_len) >=
+ (offset + length))
+ return &cmd->seq_list[i];
+ }
+
+ pr_err("Unable to locate Sequence holder for ITT: 0x%08x,"
+ " Offset: %u, Length: %u\n", cmd->init_task_tag, offset,
+ length);
+ return NULL;
+}
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
new file mode 100644
index 00000000000..0d52a10e306
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -0,0 +1,86 @@
+#ifndef ISCSI_SEQ_AND_PDU_LIST_H
+#define ISCSI_SEQ_AND_PDU_LIST_H
+
+/* struct iscsi_pdu->status */
+#define DATAOUT_PDU_SENT 1
+
+/* struct iscsi_seq->type */
+#define SEQTYPE_IMMEDIATE 1
+#define SEQTYPE_UNSOLICITED 2
+#define SEQTYPE_NORMAL 3
+
+/* struct iscsi_seq->status */
+#define DATAOUT_SEQUENCE_GOT_R2T 1
+#define DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY 2
+#define DATAOUT_SEQUENCE_COMPLETE 3
+
+/* iscsi_determine_counts_for_list() type */
+#define PDULIST_NORMAL 1
+#define PDULIST_IMMEDIATE 2
+#define PDULIST_UNSOLICITED 3
+#define PDULIST_IMMEDIATE_AND_UNSOLICITED 4
+
+/* struct iscsi_pdu->type */
+#define PDUTYPE_IMMEDIATE 1
+#define PDUTYPE_UNSOLICITED 2
+#define PDUTYPE_NORMAL 3
+
+/* struct iscsi_pdu->status */
+#define ISCSI_PDU_NOT_RECEIVED 0
+#define ISCSI_PDU_RECEIVED_OK 1
+#define ISCSI_PDU_CRC_FAILED 2
+#define ISCSI_PDU_TIMED_OUT 3
+
+/* struct iscsi_build_list->randomize */
+#define RANDOM_DATAIN_PDU_OFFSETS 0x01
+#define RANDOM_DATAIN_SEQ_OFFSETS 0x02
+#define RANDOM_DATAOUT_PDU_OFFSETS 0x04
+#define RANDOM_R2T_OFFSETS 0x08
+
+/* struct iscsi_build_list->data_direction */
+#define ISCSI_PDU_READ 0x01
+#define ISCSI_PDU_WRITE 0x02
+
+struct iscsi_build_list {
+ int data_direction;
+ int randomize;
+ int type;
+ int immediate_data_length;
+};
+
+struct iscsi_pdu {
+ int status;
+ int type;
+ u8 flags;
+ u32 data_sn;
+ u32 length;
+ u32 offset;
+ u32 pdu_send_order;
+ u32 seq_no;
+} ____cacheline_aligned;
+
+struct iscsi_seq {
+ int sent;
+ int status;
+ int type;
+ u32 data_sn;
+ u32 first_datasn;
+ u32 last_datasn;
+ u32 next_burst_len;
+ u32 pdu_start;
+ u32 pdu_count;
+ u32 offset;
+ u32 orig_offset;
+ u32 pdu_send_order;
+ u32 r2t_sn;
+ u32 seq_send_order;
+ u32 seq_no;
+ u32 xfer_len;
+} ____cacheline_aligned;
+
+extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *);
+extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
+extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
+
+#endif /* ISCSI_SEQ_AND_PDU_LIST_H */
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
new file mode 100644
index 00000000000..bbdbe9301b2
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -0,0 +1,950 @@
+/*******************************************************************************
+ * Modern ConfigFS group context specific iSCSI statistics based on original
+ * iscsi_target_mib.c code
+ *
+ * Copyright (c) 2011 Rising Tide Systems
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/configfs.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/configfs_macros.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_stat.h"
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+/* Instance Attributes Table */
+#define ISCSI_INST_NUM_NODES 1
+#define ISCSI_INST_DESCR "Storage Engine Target"
+#define ISCSI_INST_LAST_FAILURE_TYPE 0
+#define ISCSI_DISCONTINUITY_TIME 0
+
+#define ISCSI_NODE_INDEX 1
+
+#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+
+/****************************************************************************
+ * iSCSI MIB Tables
+ ****************************************************************************/
+/*
+ * Instance Attributes Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \
+static struct iscsi_stat_instance_attribute \
+ iscsi_stat_instance_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_instance_show_attr_##_name, \
+ iscsi_stat_instance_store_attr_##_name);
+
+#define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \
+static struct iscsi_stat_instance_attribute \
+ iscsi_stat_instance_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_instance_show_attr_##_name);
+
+static ssize_t iscsi_stat_instance_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_instance_show_attr_min_ver(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(min_ver);
+
+static ssize_t iscsi_stat_instance_show_attr_max_ver(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(max_ver);
+
+static ssize_t iscsi_stat_instance_show_attr_portals(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(portals);
+
+static ssize_t iscsi_stat_instance_show_attr_nodes(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(nodes);
+
+static ssize_t iscsi_stat_instance_show_attr_sessions(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(sessions);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_sess(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+ u32 sess_err_count;
+
+ spin_lock_bh(&sess_err->lock);
+ sess_err_count = (sess_err->digest_errors +
+ sess_err->cxn_timeout_errors +
+ sess_err->pdu_format_errors);
+ spin_unlock_bh(&sess_err->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_type(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ sess_err->last_sess_failure_type);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_type);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_rem_name(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ sess_err->last_sess_fail_rem_name[0] ?
+ sess_err->last_sess_fail_rem_name : NONE);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name);
+
+static ssize_t iscsi_stat_instance_show_attr_disc_time(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(disc_time);
+
+static ssize_t iscsi_stat_instance_show_attr_description(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(description);
+
+static ssize_t iscsi_stat_instance_show_attr_vendor(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
+
+static ssize_t iscsi_stat_instance_show_attr_version(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(version);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps,
+ iscsi_instance_group);
+
+static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
+ &iscsi_stat_instance_inst.attr,
+ &iscsi_stat_instance_min_ver.attr,
+ &iscsi_stat_instance_max_ver.attr,
+ &iscsi_stat_instance_portals.attr,
+ &iscsi_stat_instance_nodes.attr,
+ &iscsi_stat_instance_sessions.attr,
+ &iscsi_stat_instance_fail_sess.attr,
+ &iscsi_stat_instance_fail_type.attr,
+ &iscsi_stat_instance_fail_rem_name.attr,
+ &iscsi_stat_instance_disc_time.attr,
+ &iscsi_stat_instance_description.attr,
+ &iscsi_stat_instance_vendor.attr,
+ &iscsi_stat_instance_version.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_instance_item_ops = {
+ .show_attribute = iscsi_stat_instance_attr_show,
+ .store_attribute = iscsi_stat_instance_attr_store,
+};
+
+struct config_item_type iscsi_stat_instance_cit = {
+ .ct_item_ops = &iscsi_stat_instance_item_ops,
+ .ct_attrs = iscsi_stat_instance_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Instance Session Failure Stats Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \
+static struct iscsi_stat_sess_err_attribute \
+ iscsi_stat_sess_err_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_sess_err_show_attr_##_name, \
+ iscsi_stat_sess_err_store_attr_##_name);
+
+#define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \
+static struct iscsi_stat_sess_err_attribute \
+ iscsi_stat_sess_err_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_sess_err_show_attr_##_name);
+
+static ssize_t iscsi_stat_sess_err_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_sess_err_show_attr_digest_errors(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors);
+
+static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors);
+
+static ssize_t iscsi_stat_sess_err_show_attr_format_errors(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps,
+ iscsi_sess_err_group);
+
+static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
+ &iscsi_stat_sess_err_inst.attr,
+ &iscsi_stat_sess_err_digest_errors.attr,
+ &iscsi_stat_sess_err_cxn_errors.attr,
+ &iscsi_stat_sess_err_format_errors.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_sess_err_item_ops = {
+ .show_attribute = iscsi_stat_sess_err_attr_show,
+ .store_attribute = iscsi_stat_sess_err_attr_store,
+};
+
+struct config_item_type iscsi_stat_sess_err_cit = {
+ .ct_item_ops = &iscsi_stat_sess_err_item_ops,
+ .ct_attrs = iscsi_stat_sess_err_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Target Attributes Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_TGT_ATTR(_name, _mode) \
+static struct iscsi_stat_tgt_attr_attribute \
+ iscsi_stat_tgt_attr_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_tgt-attr_show_attr_##_name, \
+ iscsi_stat_tgt_attr_store_attr_##_name);
+
+#define ISCSI_STAT_TGT_ATTR_RO(_name) \
+static struct iscsi_stat_tgt_attr_attribute \
+ iscsi_stat_tgt_attr_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_tgt_attr_show_attr_##_name);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_TGT_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_indx(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_TGT_ATTR_RO(indx);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ u32 fail_count;
+
+ spin_lock(&lstat->lock);
+ fail_count = (lstat->redirects + lstat->authorize_fails +
+ lstat->authenticate_fails + lstat->negotiate_fails +
+ lstat->other_fails);
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
+}
+ISCSI_STAT_TGT_ATTR_RO(login_fails);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ u32 last_fail_time;
+
+ spin_lock(&lstat->lock);
+ last_fail_time = lstat->last_fail_time ?
+ (u32)(((u32)lstat->last_fail_time -
+ INITIAL_JIFFIES) * 100 / HZ) : 0;
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
+}
+ISCSI_STAT_TGT_ATTR_RO(last_fail_time);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ u32 last_fail_type;
+
+ spin_lock(&lstat->lock);
+ last_fail_type = lstat->last_fail_type;
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
+}
+ISCSI_STAT_TGT_ATTR_RO(last_fail_type);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ unsigned char buf[224];
+
+ spin_lock(&lstat->lock);
+ snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+ lstat->last_intr_fail_name : NONE);
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_name);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ unsigned char buf[8];
+
+ spin_lock(&lstat->lock);
+ snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
+ "ipv6" : "ipv4");
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ unsigned char buf[32];
+
+ spin_lock(&lstat->lock);
+ if (lstat->last_intr_fail_ip_family == AF_INET6)
+ snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
+ else
+ snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps,
+ iscsi_tgt_attr_group);
+
+static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
+ &iscsi_stat_tgt_attr_inst.attr,
+ &iscsi_stat_tgt_attr_indx.attr,
+ &iscsi_stat_tgt_attr_login_fails.attr,
+ &iscsi_stat_tgt_attr_last_fail_time.attr,
+ &iscsi_stat_tgt_attr_last_fail_type.attr,
+ &iscsi_stat_tgt_attr_fail_intr_name.attr,
+ &iscsi_stat_tgt_attr_fail_intr_addr_type.attr,
+ &iscsi_stat_tgt_attr_fail_intr_addr.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = {
+ .show_attribute = iscsi_stat_tgt_attr_attr_show,
+ .store_attribute = iscsi_stat_tgt_attr_attr_store,
+};
+
+struct config_item_type iscsi_stat_tgt_attr_cit = {
+ .ct_item_ops = &iscsi_stat_tgt_attr_item_ops,
+ .ct_attrs = iscsi_stat_tgt_attr_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Target Login Stats Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_LOGIN(_name, _mode) \
+static struct iscsi_stat_login_attribute \
+ iscsi_stat_login_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_login_show_attr_##_name, \
+ iscsi_stat_login_store_attr_##_name);
+
+#define ISCSI_STAT_LOGIN_RO(_name) \
+static struct iscsi_stat_login_attribute \
+ iscsi_stat_login_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_login_show_attr_##_name);
+
+static ssize_t iscsi_stat_login_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_LOGIN_RO(inst);
+
+static ssize_t iscsi_stat_login_show_attr_indx(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_LOGIN_RO(indx);
+
+static ssize_t iscsi_stat_login_show_attr_accepts(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(accepts);
+
+static ssize_t iscsi_stat_login_show_attr_other_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(other_fails);
+
+static ssize_t iscsi_stat_login_show_attr_redirects(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(redirects);
+
+static ssize_t iscsi_stat_login_show_attr_authorize_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(authorize_fails);
+
+static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(authenticate_fails);
+
+static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(negotiate_fails);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps,
+ iscsi_login_stats_group);
+
+static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
+ &iscsi_stat_login_inst.attr,
+ &iscsi_stat_login_indx.attr,
+ &iscsi_stat_login_accepts.attr,
+ &iscsi_stat_login_other_fails.attr,
+ &iscsi_stat_login_redirects.attr,
+ &iscsi_stat_login_authorize_fails.attr,
+ &iscsi_stat_login_authenticate_fails.attr,
+ &iscsi_stat_login_negotiate_fails.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_login_stats_item_ops = {
+ .show_attribute = iscsi_stat_login_attr_show,
+ .store_attribute = iscsi_stat_login_attr_store,
+};
+
+struct config_item_type iscsi_stat_login_cit = {
+ .ct_item_ops = &iscsi_stat_login_stats_item_ops,
+ .ct_attrs = iscsi_stat_login_stats_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Target Logout Stats Table
+ */
+
+CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_LOGOUT(_name, _mode) \
+static struct iscsi_stat_logout_attribute \
+ iscsi_stat_logout_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_logout_show_attr_##_name, \
+ iscsi_stat_logout_store_attr_##_name);
+
+#define ISCSI_STAT_LOGOUT_RO(_name) \
+static struct iscsi_stat_logout_attribute \
+ iscsi_stat_logout_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_logout_show_attr_##_name);
+
+static ssize_t iscsi_stat_logout_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_LOGOUT_RO(inst);
+
+static ssize_t iscsi_stat_logout_show_attr_indx(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_LOGOUT_RO(indx);
+
+static ssize_t iscsi_stat_logout_show_attr_normal_logouts(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
+}
+ISCSI_STAT_LOGOUT_RO(normal_logouts);
+
+static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
+}
+ISCSI_STAT_LOGOUT_RO(abnormal_logouts);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps,
+ iscsi_logout_stats_group);
+
+static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
+ &iscsi_stat_logout_inst.attr,
+ &iscsi_stat_logout_indx.attr,
+ &iscsi_stat_logout_normal_logouts.attr,
+ &iscsi_stat_logout_abnormal_logouts.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = {
+ .show_attribute = iscsi_stat_logout_attr_show,
+ .store_attribute = iscsi_stat_logout_attr_store,
+};
+
+struct config_item_type iscsi_stat_logout_cit = {
+ .ct_item_ops = &iscsi_stat_logout_stats_item_ops,
+ .ct_attrs = iscsi_stat_logout_stats_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Session Stats Table
+ */
+
+CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps);
+#define ISCSI_STAT_SESS(_name, _mode) \
+static struct iscsi_stat_sess_attribute \
+ iscsi_stat_sess_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_sess_show_attr_##_name, \
+ iscsi_stat_sess_store_attr_##_name);
+
+#define ISCSI_STAT_SESS_RO(_name) \
+static struct iscsi_stat_sess_attribute \
+ iscsi_stat_sess_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_sess_show_attr_##_name);
+
+static ssize_t iscsi_stat_sess_show_attr_inst(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
+ struct iscsi_tiqn *tiqn = container_of(wwn,
+ struct iscsi_tiqn, tiqn_wwn);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_SESS_RO(inst);
+
+static ssize_t iscsi_stat_sess_show_attr_node(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(node);
+
+static ssize_t iscsi_stat_sess_show_attr_indx(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->session_index);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(indx);
+
+static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(cmd_pdus);
+
+static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(rsp_pdus);
+
+static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%llu\n",
+ (unsigned long long)sess->tx_data_octets);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(txdata_octs);
+
+static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%llu\n",
+ (unsigned long long)sess->rx_data_octets);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(rxdata_octs);
+
+static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->conn_digest_errors);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(conn_digest_errors);
+
+static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->conn_timeout_errors);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(conn_timeout_errors);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps,
+ iscsi_sess_stats_group);
+
+static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
+ &iscsi_stat_sess_inst.attr,
+ &iscsi_stat_sess_node.attr,
+ &iscsi_stat_sess_indx.attr,
+ &iscsi_stat_sess_cmd_pdus.attr,
+ &iscsi_stat_sess_rsp_pdus.attr,
+ &iscsi_stat_sess_txdata_octs.attr,
+ &iscsi_stat_sess_rxdata_octs.attr,
+ &iscsi_stat_sess_conn_digest_errors.attr,
+ &iscsi_stat_sess_conn_timeout_errors.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = {
+ .show_attribute = iscsi_stat_sess_attr_show,
+ .store_attribute = iscsi_stat_sess_attr_store,
+};
+
+struct config_item_type iscsi_stat_sess_cit = {
+ .ct_item_ops = &iscsi_stat_sess_stats_item_ops,
+ .ct_attrs = iscsi_stat_sess_stats_attrs,
+ .ct_owner = THIS_MODULE,
+};
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
new file mode 100644
index 00000000000..3ff76b4faad
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.h
@@ -0,0 +1,64 @@
+#ifndef ISCSI_TARGET_STAT_H
+#define ISCSI_TARGET_STAT_H
+
+/*
+ * For struct iscsi_tiqn->tiqn_wwn default groups
+ */
+extern struct config_item_type iscsi_stat_instance_cit;
+extern struct config_item_type iscsi_stat_sess_err_cit;
+extern struct config_item_type iscsi_stat_tgt_attr_cit;
+extern struct config_item_type iscsi_stat_login_cit;
+extern struct config_item_type iscsi_stat_logout_cit;
+
+/*
+ * For struct iscsi_session->se_sess default groups
+ */
+extern struct config_item_type iscsi_stat_sess_cit;
+
+/* iSCSI session error types */
+#define ISCSI_SESS_ERR_UNKNOWN 0
+#define ISCSI_SESS_ERR_DIGEST 1
+#define ISCSI_SESS_ERR_CXN_TIMEOUT 2
+#define ISCSI_SESS_ERR_PDU_FORMAT 3
+
+/* iSCSI session error stats */
+struct iscsi_sess_err_stats {
+ spinlock_t lock;
+ u32 digest_errors;
+ u32 cxn_timeout_errors;
+ u32 pdu_format_errors;
+ u32 last_sess_failure_type;
+ char last_sess_fail_rem_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI login failure types (sub oids) */
+#define ISCSI_LOGIN_FAIL_OTHER 2
+#define ISCSI_LOGIN_FAIL_REDIRECT 3
+#define ISCSI_LOGIN_FAIL_AUTHORIZE 4
+#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5
+#define ISCSI_LOGIN_FAIL_NEGOTIATE 6
+
+/* iSCSI login stats */
+struct iscsi_login_stats {
+ spinlock_t lock;
+ u32 accepts;
+ u32 other_fails;
+ u32 redirects;
+ u32 authorize_fails;
+ u32 authenticate_fails;
+ u32 negotiate_fails; /* used for notifications */
+ u64 last_fail_time; /* time stamp (jiffies) */
+ u32 last_fail_type;
+ int last_intr_fail_ip_family;
+ unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+ char last_intr_fail_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI logout stats */
+struct iscsi_logout_stats {
+ spinlock_t lock;
+ u32 normal_logouts;
+ u32 abnormal_logouts;
+} ____cacheline_aligned;
+
+#endif /*** ISCSI_TARGET_STAT_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
new file mode 100644
index 00000000000..db1fe1ec84d
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -0,0 +1,849 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific Task Management functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <asm/unaligned.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+u8 iscsit_tmr_abort_task(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_cmd *ref_cmd;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+ struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+
+ ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
+ if (!ref_cmd) {
+ pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
+ " %hu.\n", hdr->rtt, conn->cid);
+ return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) &&
+ (hdr->refcmdsn <= conn->sess->max_cmd_sn)) ?
+ ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
+ }
+ if (ref_cmd->cmd_sn != hdr->refcmdsn) {
+ pr_err("RefCmdSN 0x%08x does not equal"
+ " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
+ hdr->refcmdsn, ref_cmd->cmd_sn);
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+
+ se_tmr->ref_task_tag = hdr->rtt;
+ se_tmr->ref_cmd = &ref_cmd->se_cmd;
+ tmr_req->ref_cmd_sn = hdr->refcmdsn;
+ tmr_req->exp_data_sn = hdr->exp_datasn;
+
+ return ISCSI_TMF_RSP_COMPLETE;
+}
+
+/*
+ * Called from iscsit_handle_task_mgt_cmd().
+ */
+int iscsit_tmr_task_warm_reset(
+ struct iscsi_conn *conn,
+ struct iscsi_tmr_req *tmr_req,
+ unsigned char *buf)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+#if 0
+ struct iscsi_init_task_mgt_cmnd *hdr =
+ (struct iscsi_init_task_mgt_cmnd *) buf;
+#endif
+ if (!na->tmr_warm_reset) {
+ pr_err("TMR Opcode TARGET_WARM_RESET authorization"
+ " failed for Initiator Node: %s\n",
+ sess->se_sess->se_node_acl->initiatorname);
+ return -1;
+ }
+ /*
+ * Do the real work in transport_generic_do_tmr().
+ */
+ return 0;
+}
+
+int iscsit_tmr_task_cold_reset(
+ struct iscsi_conn *conn,
+ struct iscsi_tmr_req *tmr_req,
+ unsigned char *buf)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ if (!na->tmr_cold_reset) {
+ pr_err("TMR Opcode TARGET_COLD_RESET authorization"
+ " failed for Initiator Node: %s\n",
+ sess->se_sess->se_node_acl->initiatorname);
+ return -1;
+ }
+ /*
+ * Do the real work in transport_generic_do_tmr().
+ */
+ return 0;
+}
+
+u8 iscsit_tmr_task_reassign(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_cmd *ref_cmd = NULL;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_conn_recovery *cr = NULL;
+ struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+ struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+ int ret;
+
+ pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
+ " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
+ hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
+
+ if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
+ pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
+ " ignoring request.\n");
+ return ISCSI_TMF_RSP_NOT_SUPPORTED;
+ }
+
+ ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
+ if (ret == -2) {
+ pr_err("Command ITT: 0x%08x is still alligent to CID:"
+ " %hu\n", ref_cmd->init_task_tag, cr->cid);
+ return ISCSI_TMF_RSP_TASK_ALLEGIANT;
+ } else if (ret == -1) {
+ pr_err("Unable to locate RefTaskTag: 0x%08x in"
+ " connection recovery command list.\n", hdr->rtt);
+ return ISCSI_TMF_RSP_NO_TASK;
+ }
+ /*
+ * Temporary check to prevent connection recovery for
+ * connections with a differing MaxRecvDataSegmentLength.
+ */
+ if (cr->maxrecvdatasegmentlength !=
+ conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("Unable to perform connection recovery for"
+ " differing MaxRecvDataSegmentLength, rejecting"
+ " TMR TASK_REASSIGN.\n");
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+
+ se_tmr->ref_task_tag = hdr->rtt;
+ se_tmr->ref_cmd = &ref_cmd->se_cmd;
+ se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
+ tmr_req->ref_cmd_sn = hdr->refcmdsn;
+ tmr_req->exp_data_sn = hdr->exp_datasn;
+ tmr_req->conn_recovery = cr;
+ tmr_req->task_reassign = 1;
+ /*
+ * Command can now be reassigned to a new connection.
+ * The task management response must be sent before the
+ * reassignment actually happens. See iscsi_tmr_post_handler().
+ */
+ return ISCSI_TMF_RSP_COMPLETE;
+}
+
+static void iscsit_task_reassign_remove_cmd(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn_recovery *cr,
+ struct iscsi_session *sess)
+{
+ int ret;
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!ret) {
+ pr_debug("iSCSI connection recovery successful for CID:"
+ " %hu on SID: %u\n", cr->cid, sess->sid);
+ iscsit_remove_active_connection_recovery_entry(cr, sess);
+ }
+}
+
+static int iscsit_task_reassign_complete_nop_out(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_conn_recovery *cr;
+
+ if (!cmd->cr) {
+ pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+ " is NULL!\n", cmd->init_task_tag);
+ return -1;
+ }
+ cr = cmd->cr;
+
+ /*
+ * Reset the StatSN so a new one for this commands new connection
+ * will be assigned.
+ * Reset the ExpStatSN as well so we may receive Status SNACKs.
+ */
+ cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+ iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ cmd->i_state = ISTATE_SEND_NOPIN;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+static int iscsit_task_reassign_complete_write(
+ struct iscsi_cmd *cmd,
+ struct iscsi_tmr_req *tmr_req)
+{
+ int no_build_r2ts = 0;
+ u32 length = 0, offset = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ /*
+ * The Initiator must not send a R2T SNACK with a Begrun less than
+ * the TMR TASK_REASSIGN's ExpDataSN.
+ */
+ if (!tmr_req->exp_data_sn) {
+ cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = 0;
+ } else {
+ cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+ }
+
+ /*
+ * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
+ * Initiator is expecting. The Target controls all WRITE operations
+ * so if we have received all DataOUT we can safety ignore Initiator.
+ */
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+ if (!atomic_read(&cmd->transport_sent)) {
+ pr_debug("WRITE ITT: 0x%08x: t_state: %d"
+ " never sent to transport\n",
+ cmd->init_task_tag, cmd->se_cmd.t_state);
+ return transport_generic_handle_data(se_cmd);
+ }
+
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ /*
+ * Special case to deal with DataSequenceInOrder=No and Non-Immeidate
+ * Unsolicited DataOut.
+ */
+ if (cmd->unsolicited_data) {
+ cmd->unsolicited_data = 0;
+
+ offset = cmd->next_burst_len = cmd->write_data_done;
+
+ if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
+ cmd->data_length) {
+ no_build_r2ts = 1;
+ length = (cmd->data_length - offset);
+ } else
+ length = (conn->sess->sess_ops->FirstBurstLength - offset);
+
+ spin_lock_bh(&cmd->r2t_lock);
+ if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+ cmd->outstanding_r2ts++;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ if (no_build_r2ts)
+ return 0;
+ }
+ /*
+ * iscsit_build_r2ts_for_cmd() can handle the rest from here.
+ */
+ return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
+}
+
+static int iscsit_task_reassign_complete_read(
+ struct iscsi_cmd *cmd,
+ struct iscsi_tmr_req *tmr_req)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ /*
+ * The Initiator must not send a Data SNACK with a BegRun less than
+ * the TMR TASK_REASSIGN's ExpDataSN.
+ */
+ if (!tmr_req->exp_data_sn) {
+ cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = 0;
+ } else {
+ cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+ }
+
+ if (!atomic_read(&cmd->transport_sent)) {
+ pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
+ " transport\n", cmd->init_task_tag,
+ cmd->se_cmd.t_state);
+ transport_generic_handle_cdb(se_cmd);
+ return 0;
+ }
+
+ if (!atomic_read(&se_cmd->t_transport_complete)) {
+ pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
+ " from transport\n", cmd->init_task_tag,
+ cmd->se_cmd.t_state);
+ return -1;
+ }
+
+ dr = iscsit_allocate_datain_req();
+ if (!dr)
+ return -1;
+ /*
+ * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
+ * Initiator is expecting.
+ */
+ dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
+ dr->runlength = 0;
+ dr->generate_recovery_values = 1;
+ dr->recovery = DATAIN_CONNECTION_RECOVERY;
+
+ iscsit_attach_datain_req(cmd, dr);
+
+ cmd->i_state = ISTATE_SEND_DATAIN;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+static int iscsit_task_reassign_complete_none(
+ struct iscsi_cmd *cmd,
+ struct iscsi_tmr_req *tmr_req)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+static int iscsit_task_reassign_complete_scsi_cmnd(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_conn_recovery *cr;
+
+ if (!cmd->cr) {
+ pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+ " is NULL!\n", cmd->init_task_tag);
+ return -1;
+ }
+ cr = cmd->cr;
+
+ /*
+ * Reset the StatSN so a new one for this commands new connection
+ * will be assigned.
+ * Reset the ExpStatSN as well so we may receive Status SNACKs.
+ */
+ cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+ iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ switch (cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return iscsit_task_reassign_complete_write(cmd, tmr_req);
+ case DMA_FROM_DEVICE:
+ return iscsit_task_reassign_complete_read(cmd, tmr_req);
+ case DMA_NONE:
+ return iscsit_task_reassign_complete_none(cmd, tmr_req);
+ default:
+ pr_err("Unknown cmd->data_direction: 0x%02x\n",
+ cmd->data_direction);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsit_task_reassign_complete(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd;
+ struct iscsi_cmd *cmd;
+ int ret = 0;
+
+ if (!se_tmr->ref_cmd) {
+ pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
+ return -1;
+ }
+ se_cmd = se_tmr->ref_cmd;
+ cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->conn = conn;
+
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_NOOP_OUT:
+ ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
+ break;
+ case ISCSI_OP_SCSI_CMD:
+ ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
+ break;
+ default:
+ pr_err("Illegal iSCSI Opcode 0x%02x during"
+ " command realligence\n", cmd->iscsi_opcode);
+ return -1;
+ }
+
+ if (ret != 0)
+ return ret;
+
+ pr_debug("Completed connection realligence for Opcode: 0x%02x,"
+ " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, conn->cid);
+
+ return 0;
+}
+
+/*
+ * Handles special after-the-fact actions related to TMRs.
+ * Right now the only one that its really needed for is
+ * connection recovery releated TASK_REASSIGN.
+ */
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+
+ if (tmr_req->task_reassign &&
+ (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+ return iscsit_task_reassign_complete(tmr_req, conn);
+
+ return 0;
+}
+
+/*
+ * Nothing to do here, but leave it for good measure. :-)
+ */
+int iscsit_task_reassign_prepare_read(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ return 0;
+}
+
+static void iscsit_task_reassign_prepare_unsolicited_dataout(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int i, j;
+ struct iscsi_pdu *pdu = NULL;
+ struct iscsi_seq *seq = NULL;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ cmd->data_sn = 0;
+
+ if (cmd->immediate_data)
+ cmd->r2t_offset += (cmd->first_burst_len -
+ cmd->seq_start_offset);
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ cmd->write_data_done -= (cmd->immediate_data) ?
+ (cmd->first_burst_len -
+ cmd->seq_start_offset) :
+ cmd->first_burst_len;
+ cmd->first_burst_len = 0;
+ return;
+ }
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ if ((pdu->offset >= cmd->seq_start_offset) &&
+ ((pdu->offset + pdu->length) <=
+ cmd->seq_end_offset)) {
+ cmd->first_burst_len -= pdu->length;
+ cmd->write_data_done -= pdu->length;
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+ } else {
+ for (i = 0; i < cmd->seq_count; i++) {
+ seq = &cmd->seq_list[i];
+
+ if (seq->type != SEQTYPE_UNSOLICITED)
+ continue;
+
+ cmd->write_data_done -=
+ (seq->offset - seq->orig_offset);
+ cmd->first_burst_len = 0;
+ seq->data_sn = 0;
+ seq->offset = seq->orig_offset;
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ continue;
+
+ for (j = 0; j < seq->pdu_count; j++) {
+ pdu = &cmd->pdu_list[j+seq->pdu_start];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+ }
+}
+
+int iscsit_task_reassign_prepare_write(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_pdu *pdu = NULL;
+ struct iscsi_r2t *r2t = NULL, *r2t_tmp;
+ int first_incomplete_r2t = 1, i = 0;
+
+ /*
+ * The command was in the process of receiving Unsolicited DataOUT when
+ * the connection failed.
+ */
+ if (cmd->unsolicited_data)
+ iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
+
+ /*
+ * The Initiator is requesting R2Ts starting from zero, skip
+ * checking acknowledged R2Ts and start checking struct iscsi_r2ts
+ * greater than zero.
+ */
+ if (!tmr_req->exp_data_sn)
+ goto drop_unacknowledged_r2ts;
+
+ /*
+ * We now check that the PDUs in DataOUT sequences below
+ * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
+ * expecting next) have all the DataOUT they require to complete
+ * the DataOUT sequence. First scan from R2TSN 0 to TMR
+ * TASK_REASSIGN ExpDataSN-1.
+ *
+ * If we have not received all DataOUT in question, we must
+ * make sure to make the appropriate changes to values in
+ * struct iscsi_cmd (and elsewhere depending on session parameters)
+ * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
+ * will resend a new R2T for the DataOUT sequences in question.
+ */
+ spin_lock_bh(&cmd->r2t_lock);
+ if (list_empty(&cmd->cmd_r2t_list)) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+
+ if (r2t->r2t_sn >= tmr_req->exp_data_sn)
+ continue;
+ /*
+ * Safely ignore Recovery R2Ts and R2Ts that have completed
+ * DataOUT sequences.
+ */
+ if (r2t->seq_complete)
+ continue;
+
+ if (r2t->recovery_r2t)
+ continue;
+
+ /*
+ * DataSequenceInOrder=Yes:
+ *
+ * Taking into account the iSCSI implementation requirement of
+ * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+ * DataSequenceInOrder=Yes, we must take into consideration
+ * the following:
+ *
+ * DataSequenceInOrder=No:
+ *
+ * Taking into account that the Initiator controls the (possibly
+ * random) PDU Order in (possibly random) Sequence Order of
+ * DataOUT the target requests with R2Ts, we must take into
+ * consideration the following:
+ *
+ * DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
+ *
+ * While processing non-complete R2T DataOUT sequence requests
+ * the Target will re-request only the total sequence length
+ * minus current received offset. This is because we must
+ * assume the initiator will continue sending DataOUT from the
+ * last PDU before the connection failed.
+ *
+ * DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
+ *
+ * While processing non-complete R2T DataOUT sequence requests
+ * the Target will re-request the entire DataOUT sequence if
+ * any single PDU is missing from the sequence. This is because
+ * we have no logical method to determine the next PDU offset,
+ * and we must assume the Initiator will be sending any random
+ * PDU offset in the current sequence after TASK_REASSIGN
+ * has completed.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (!first_incomplete_r2t) {
+ cmd->r2t_offset -= r2t->xfer_len;
+ goto next;
+ }
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ cmd->data_sn = 0;
+ cmd->r2t_offset -= (r2t->xfer_len -
+ cmd->next_burst_len);
+ first_incomplete_r2t = 0;
+ goto next;
+ }
+
+ cmd->data_sn = 0;
+ cmd->r2t_offset -= r2t->xfer_len;
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ if ((pdu->offset >= r2t->offset) &&
+ (pdu->offset < (r2t->offset +
+ r2t->xfer_len))) {
+ cmd->next_burst_len -= pdu->length;
+ cmd->write_data_done -= pdu->length;
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+
+ first_incomplete_r2t = 0;
+ } else {
+ struct iscsi_seq *seq;
+
+ seq = iscsit_get_seq_holder(cmd, r2t->offset,
+ r2t->xfer_len);
+ if (!seq) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ cmd->write_data_done -=
+ (seq->offset - seq->orig_offset);
+ seq->data_sn = 0;
+ seq->offset = seq->orig_offset;
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+ cmd->seq_send_order--;
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ goto next;
+
+ for (i = 0; i < seq->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+
+next:
+ cmd->outstanding_r2ts--;
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ /*
+ * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
+ * TASK_REASSIGN to the last R2T in the list.. We are also careful
+ * to check that the Initiator is not requesting R2Ts for DataOUT
+ * sequences it has already completed.
+ *
+ * Free each R2T in question and adjust values in struct iscsi_cmd
+ * accordingly so iscsit_build_r2ts_for_cmd() do the rest of
+ * the work after the TMR TASK_REASSIGN Response is sent.
+ */
+drop_unacknowledged_r2ts:
+
+ cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
+ cmd->r2t_sn = tmr_req->exp_data_sn;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
+ /*
+ * Skip up to the R2T Sequence number provided by the
+ * iSCSI TASK_REASSIGN TMR
+ */
+ if (r2t->r2t_sn < tmr_req->exp_data_sn)
+ continue;
+
+ if (r2t->seq_complete) {
+ pr_err("Initiator is requesting R2Ts from"
+ " R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
+ " Length: %u is already complete."
+ " BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
+ tmr_req->exp_data_sn, r2t->r2t_sn,
+ r2t->offset, r2t->xfer_len);
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ if (r2t->recovery_r2t) {
+ iscsit_free_r2t(r2t, cmd);
+ continue;
+ }
+
+ /* DataSequenceInOrder=Yes:
+ *
+ * Taking into account the iSCSI implementation requirement of
+ * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+ * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
+ * entire transfer length from the commands R2T offset marker.
+ *
+ * DataSequenceInOrder=No:
+ *
+ * We subtract the difference from struct iscsi_seq between the
+ * current offset and original offset from cmd->write_data_done
+ * for account for DataOUT PDUs already received. Then reset
+ * the current offset to the original and zero out the current
+ * burst length, to make sure we re-request the entire DataOUT
+ * sequence.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder)
+ cmd->r2t_offset -= r2t->xfer_len;
+ else
+ cmd->seq_send_order--;
+
+ cmd->outstanding_r2ts--;
+ iscsit_free_r2t(r2t, cmd);
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+/*
+ * Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
+ * a given struct iscsi_cmd.
+ */
+int iscsit_check_task_reassign_expdatasn(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
+ return 0;
+
+ if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+ return 0;
+
+ if (ref_cmd->data_direction == DMA_NONE)
+ return 0;
+
+ /*
+ * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
+ * of DataIN the Initiator is expecting.
+ *
+ * Also check that the Initiator is not re-requesting DataIN that has
+ * already been acknowledged with a DataAck SNACK.
+ */
+ if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
+ if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
+ pr_err("Received ExpDataSN: 0x%08x for READ"
+ " in TMR TASK_REASSIGN greater than command's"
+ " DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
+ ref_cmd->data_sn);
+ return -1;
+ }
+ if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+ (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
+ pr_err("Received ExpDataSN: 0x%08x for READ"
+ " in TMR TASK_REASSIGN for previously"
+ " acknowledged DataIN: 0x%08x,"
+ " protocol error\n", tmr_req->exp_data_sn,
+ ref_cmd->acked_data_sn);
+ return -1;
+ }
+ return iscsit_task_reassign_prepare_read(tmr_req, conn);
+ }
+
+ /*
+ * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
+ * for R2Ts the Initiator is expecting.
+ *
+ * Do the magic in iscsit_task_reassign_prepare_write().
+ */
+ if (ref_cmd->data_direction == DMA_TO_DEVICE) {
+ if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
+ pr_err("Received ExpDataSN: 0x%08x for WRITE"
+ " in TMR TASK_REASSIGN greater than command's"
+ " R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
+ ref_cmd->r2t_sn);
+ return -1;
+ }
+ return iscsit_task_reassign_prepare_write(tmr_req, conn);
+ }
+
+ pr_err("Unknown iSCSI data_direction: 0x%02x\n",
+ ref_cmd->data_direction);
+
+ return -1;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
new file mode 100644
index 00000000000..142e992cb09
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_TMR_H
+#define ISCSI_TARGET_TMR_H
+
+extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+ unsigned char *);
+extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+ unsigned char *);
+extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
+ struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_TMR_H */
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
new file mode 100644
index 00000000000..d4cf2cd25c4
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -0,0 +1,759 @@
+/*******************************************************************************
+ * This file contains iSCSI Target Portal Group related functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+#include <target/target_core_tpg.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
+{
+ struct iscsi_portal_group *tpg;
+
+ tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct iscsi_portal_group\n");
+ return NULL;
+ }
+
+ tpg->tpgt = tpgt;
+ tpg->tpg_state = TPG_STATE_FREE;
+ tpg->tpg_tiqn = tiqn;
+ INIT_LIST_HEAD(&tpg->tpg_gnp_list);
+ INIT_LIST_HEAD(&tpg->tpg_list);
+ mutex_init(&tpg->tpg_access_lock);
+ mutex_init(&tpg->np_login_lock);
+ spin_lock_init(&tpg->tpg_state_lock);
+ spin_lock_init(&tpg->tpg_np_lock);
+
+ return tpg;
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
+
+int iscsit_load_discovery_tpg(void)
+{
+ struct iscsi_param *param;
+ struct iscsi_portal_group *tpg;
+ int ret;
+
+ tpg = iscsit_alloc_portal_group(NULL, 1);
+ if (!tpg) {
+ pr_err("Unable to allocate struct iscsi_portal_group\n");
+ return -1;
+ }
+
+ ret = core_tpg_register(
+ &lio_target_fabric_configfs->tf_ops,
+ NULL, &tpg->tpg_se_tpg, (void *)tpg,
+ TRANSPORT_TPG_TYPE_DISCOVERY);
+ if (ret < 0) {
+ kfree(tpg);
+ return -1;
+ }
+
+ tpg->sid = 1; /* First Assigned LIO Session ID */
+ iscsit_set_default_tpg_attribs(tpg);
+
+ if (iscsi_create_default_params(&tpg->param_list) < 0)
+ goto out;
+ /*
+ * By default we disable authentication for discovery sessions,
+ * this can be changed with:
+ *
+ * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+ if (!param)
+ goto out;
+
+ if (iscsi_update_param_value(param, "CHAP,None") < 0)
+ goto out;
+
+ tpg->tpg_attrib.authentication = 0;
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_ACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ iscsit_global->discovery_tpg = tpg;
+ pr_debug("CORE[0] - Allocated Discovery TPG\n");
+
+ return 0;
+out:
+ if (tpg->sid == 1)
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+ kfree(tpg);
+ return -1;
+}
+
+void iscsit_release_discovery_tpg(void)
+{
+ struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
+
+ if (!tpg)
+ return;
+
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+
+ kfree(tpg);
+ iscsit_global->discovery_tpg = NULL;
+}
+
+struct iscsi_portal_group *iscsit_get_tpg_from_np(
+ struct iscsi_tiqn *tiqn,
+ struct iscsi_np *np)
+{
+ struct iscsi_portal_group *tpg = NULL;
+ struct iscsi_tpg_np *tpg_np;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_state_lock);
+ if (tpg->tpg_state == TPG_STATE_FREE) {
+ spin_unlock(&tpg->tpg_state_lock);
+ continue;
+ }
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+ if (tpg_np->tpg_np == np) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ return tpg;
+ }
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return NULL;
+}
+
+int iscsit_get_tpg(
+ struct iscsi_portal_group *tpg)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
+ return ((ret != 0) || signal_pending(current)) ? -1 : 0;
+}
+
+void iscsit_put_tpg(struct iscsi_portal_group *tpg)
+{
+ mutex_unlock(&tpg->tpg_access_lock);
+}
+
+static void iscsit_clear_tpg_np_login_thread(
+ struct iscsi_tpg_np *tpg_np,
+ struct iscsi_portal_group *tpg)
+{
+ if (!tpg_np->tpg_np) {
+ pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+ return;
+ }
+
+ iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
+}
+
+void iscsit_clear_tpg_np_login_threads(
+ struct iscsi_portal_group *tpg)
+{
+ struct iscsi_tpg_np *tpg_np;
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+ if (!tpg_np->tpg_np) {
+ pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+ continue;
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+ spin_lock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+}
+
+void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
+{
+ iscsi_print_params(tpg->param_list);
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ a->authentication = TA_AUTHENTICATION;
+ a->login_timeout = TA_LOGIN_TIMEOUT;
+ a->netif_timeout = TA_NETIF_TIMEOUT;
+ a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
+ a->generate_node_acls = TA_GENERATE_NODE_ACLS;
+ a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
+ a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
+ a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
+}
+
+int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
+{
+ if (tpg->tpg_state != TPG_STATE_FREE) {
+ pr_err("Unable to add iSCSI Target Portal Group: %d"
+ " while not in TPG_STATE_FREE state.\n", tpg->tpgt);
+ return -EEXIST;
+ }
+ iscsit_set_default_tpg_attribs(tpg);
+
+ if (iscsi_create_default_params(&tpg->param_list) < 0)
+ goto err_out;
+
+ ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_INACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
+ tiqn->tiqn_ntpgs++;
+ pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
+ tiqn->tiqn, tpg->tpgt);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return 0;
+err_out:
+ if (tpg->param_list) {
+ iscsi_release_param_list(tpg->param_list);
+ tpg->param_list = NULL;
+ }
+ kfree(tpg);
+ return -ENOMEM;
+}
+
+int iscsit_tpg_del_portal_group(
+ struct iscsi_tiqn *tiqn,
+ struct iscsi_portal_group *tpg,
+ int force)
+{
+ u8 old_state = tpg->tpg_state;
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_INACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+ pr_err("Unable to delete iSCSI Target Portal Group:"
+ " %hu while active sessions exist, and force=0\n",
+ tpg->tpgt);
+ tpg->tpg_state = old_state;
+ return -EPERM;
+ }
+
+ core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
+
+ if (tpg->param_list) {
+ iscsi_release_param_list(tpg->param_list);
+ tpg->param_list = NULL;
+ }
+
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_FREE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ tiqn->tiqn_ntpgs--;
+ list_del(&tpg->tpg_list);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
+ tiqn->tiqn, tpg->tpgt);
+
+ kfree(tpg);
+ return 0;
+}
+
+int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
+{
+ struct iscsi_param *param;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ spin_lock(&tpg->tpg_state_lock);
+ if (tpg->tpg_state == TPG_STATE_ACTIVE) {
+ pr_err("iSCSI target portal group: %hu is already"
+ " active, ignoring request.\n", tpg->tpgt);
+ spin_unlock(&tpg->tpg_state_lock);
+ return -EINVAL;
+ }
+ /*
+ * Make sure that AuthMethod does not contain None as an option
+ * unless explictly disabled. Set the default to CHAP if authentication
+ * is enforced (as per default), and remove the NONE option.
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+ if (!param) {
+ spin_unlock(&tpg->tpg_state_lock);
+ return -ENOMEM;
+ }
+
+ if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
+ if (!strcmp(param->value, NONE))
+ if (iscsi_update_param_value(param, CHAP) < 0) {
+ spin_unlock(&tpg->tpg_state_lock);
+ return -ENOMEM;
+ }
+ if (iscsit_ta_authentication(tpg, 1) < 0) {
+ spin_unlock(&tpg->tpg_state_lock);
+ return -ENOMEM;
+ }
+ }
+
+ tpg->tpg_state = TPG_STATE_ACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ tiqn->tiqn_active_tpgs++;
+ pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
+ tpg->tpgt);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return 0;
+}
+
+int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
+{
+ struct iscsi_tiqn *tiqn;
+ u8 old_state = tpg->tpg_state;
+
+ spin_lock(&tpg->tpg_state_lock);
+ if (tpg->tpg_state == TPG_STATE_INACTIVE) {
+ pr_err("iSCSI Target Portal Group: %hu is already"
+ " inactive, ignoring request.\n", tpg->tpgt);
+ spin_unlock(&tpg->tpg_state_lock);
+ return -EINVAL;
+ }
+ tpg->tpg_state = TPG_STATE_INACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ iscsit_clear_tpg_np_login_threads(tpg);
+
+ if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = old_state;
+ spin_unlock(&tpg->tpg_state_lock);
+ pr_err("Unable to disable iSCSI Target Portal Group:"
+ " %hu while active sessions exist, and force=0\n",
+ tpg->tpgt);
+ return -EPERM;
+ }
+
+ tiqn = tpg->tpg_tiqn;
+ if (!tiqn || (tpg == iscsit_global->discovery_tpg))
+ return 0;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ tiqn->tiqn_active_tpgs--;
+ pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
+ tpg->tpgt);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return 0;
+}
+
+struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
+ struct iscsi_session *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct se_node_acl *se_nacl = se_sess->se_node_acl;
+ struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
+ se_node_acl);
+
+ return &acl->node_attrib;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
+ struct iscsi_tpg_np *tpg_np,
+ int network_transport)
+{
+ struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+
+ spin_lock(&tpg_np->tpg_np_parent_lock);
+ list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+ &tpg_np->tpg_np_parent_list, tpg_np_child_list) {
+ if (tpg_np_child->tpg_np->np_network_transport ==
+ network_transport) {
+ spin_unlock(&tpg_np->tpg_np_parent_lock);
+ return tpg_np_child;
+ }
+ }
+ spin_unlock(&tpg_np->tpg_np_parent_lock);
+
+ return NULL;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
+ struct iscsi_portal_group *tpg,
+ struct __kernel_sockaddr_storage *sockaddr,
+ char *ip_str,
+ struct iscsi_tpg_np *tpg_np_parent,
+ int network_transport)
+{
+ struct iscsi_np *np;
+ struct iscsi_tpg_np *tpg_np;
+
+ tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
+ if (!tpg_np) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_tpg_np.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ np = iscsit_add_np(sockaddr, ip_str, network_transport);
+ if (IS_ERR(np)) {
+ kfree(tpg_np);
+ return ERR_CAST(np);
+ }
+
+ INIT_LIST_HEAD(&tpg_np->tpg_np_list);
+ INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
+ INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
+ spin_lock_init(&tpg_np->tpg_np_parent_lock);
+ tpg_np->tpg_np = np;
+ tpg_np->tpg = tpg;
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
+ tpg->num_tpg_nps++;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_num_tpg_nps++;
+ spin_unlock(&tpg->tpg_np_lock);
+
+ if (tpg_np_parent) {
+ tpg_np->tpg_np_parent = tpg_np_parent;
+ spin_lock(&tpg_np_parent->tpg_np_parent_lock);
+ list_add_tail(&tpg_np->tpg_np_child_list,
+ &tpg_np_parent->tpg_np_parent_list);
+ spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
+ }
+
+ pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+
+ return tpg_np;
+}
+
+static int iscsit_tpg_release_np(
+ struct iscsi_tpg_np *tpg_np,
+ struct iscsi_portal_group *tpg,
+ struct iscsi_np *np)
+{
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+
+ pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+
+ tpg_np->tpg_np = NULL;
+ tpg_np->tpg = NULL;
+ kfree(tpg_np);
+ /*
+ * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
+ */
+ return iscsit_del_np(np);
+}
+
+int iscsit_tpg_del_network_portal(
+ struct iscsi_portal_group *tpg,
+ struct iscsi_tpg_np *tpg_np)
+{
+ struct iscsi_np *np;
+ struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+ int ret = 0;
+
+ np = tpg_np->tpg_np;
+ if (!np) {
+ pr_err("Unable to locate struct iscsi_np from"
+ " struct iscsi_tpg_np\n");
+ return -EINVAL;
+ }
+
+ if (!tpg_np->tpg_np_parent) {
+ /*
+ * We are the parent tpg network portal. Release all of the
+ * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
+ * list first.
+ */
+ list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+ &tpg_np->tpg_np_parent_list,
+ tpg_np_child_list) {
+ ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
+ if (ret < 0)
+ pr_err("iscsit_tpg_del_network_portal()"
+ " failed: %d\n", ret);
+ }
+ } else {
+ /*
+ * We are not the parent ISCSI_TCP tpg network portal. Release
+ * our own network portals from the child list.
+ */
+ spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+ list_del(&tpg_np->tpg_np_child_list);
+ spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+ }
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_del(&tpg_np->tpg_np_list);
+ tpg->num_tpg_nps--;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_num_tpg_nps--;
+ spin_unlock(&tpg->tpg_np_lock);
+
+ return iscsit_tpg_release_np(tpg_np, tpg, np);
+}
+
+int iscsit_tpg_set_initiator_node_queue_depth(
+ struct iscsi_portal_group *tpg,
+ unsigned char *initiatorname,
+ u32 queue_depth,
+ int force)
+{
+ return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
+ initiatorname, queue_depth, force);
+}
+
+int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
+{
+ unsigned char buf1[256], buf2[256], *none = NULL;
+ int len;
+ struct iscsi_param *param;
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((authentication != 1) && (authentication != 0)) {
+ pr_err("Illegal value for authentication parameter:"
+ " %u, ignoring request.\n", authentication);
+ return -1;
+ }
+
+ memset(buf1, 0, sizeof(buf1));
+ memset(buf2, 0, sizeof(buf2));
+
+ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+ if (!param)
+ return -EINVAL;
+
+ if (authentication) {
+ snprintf(buf1, sizeof(buf1), "%s", param->value);
+ none = strstr(buf1, NONE);
+ if (!none)
+ goto out;
+ if (!strncmp(none + 4, ",", 1)) {
+ if (!strcmp(buf1, none))
+ sprintf(buf2, "%s", none+5);
+ else {
+ none--;
+ *none = '\0';
+ len = sprintf(buf2, "%s", buf1);
+ none += 5;
+ sprintf(buf2 + len, "%s", none);
+ }
+ } else {
+ none--;
+ *none = '\0';
+ sprintf(buf2, "%s", buf1);
+ }
+ if (iscsi_update_param_value(param, buf2) < 0)
+ return -EINVAL;
+ } else {
+ snprintf(buf1, sizeof(buf1), "%s", param->value);
+ none = strstr(buf1, NONE);
+ if ((none))
+ goto out;
+ strncat(buf1, ",", strlen(","));
+ strncat(buf1, NONE, strlen(NONE));
+ if (iscsi_update_param_value(param, buf1) < 0)
+ return -EINVAL;
+ }
+
+out:
+ a->authentication = authentication;
+ pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
+ a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
+
+ return 0;
+}
+
+int iscsit_ta_login_timeout(
+ struct iscsi_portal_group *tpg,
+ u32 login_timeout)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
+ pr_err("Requested Login Timeout %u larger than maximum"
+ " %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
+ pr_err("Requested Logout Timeout %u smaller than"
+ " minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->login_timeout = login_timeout;
+ pr_debug("Set Logout Timeout to %u for Target Portal Group"
+ " %hu\n", a->login_timeout, tpg->tpgt);
+
+ return 0;
+}
+
+int iscsit_ta_netif_timeout(
+ struct iscsi_portal_group *tpg,
+ u32 netif_timeout)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
+ pr_err("Requested Network Interface Timeout %u larger"
+ " than maximum %u\n", netif_timeout,
+ TA_NETIF_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
+ pr_err("Requested Network Interface Timeout %u smaller"
+ " than minimum %u\n", netif_timeout,
+ TA_NETIF_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->netif_timeout = netif_timeout;
+ pr_debug("Set Network Interface Timeout to %u for"
+ " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
+
+ return 0;
+}
+
+int iscsit_ta_generate_node_acls(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->generate_node_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+ tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
+ return 0;
+}
+
+int iscsit_ta_default_cmdsn_depth(
+ struct iscsi_portal_group *tpg,
+ u32 tcq_depth)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+ pr_err("Requested Default Queue Depth: %u larger"
+ " than maximum %u\n", tcq_depth,
+ TA_DEFAULT_CMDSN_DEPTH_MAX);
+ return -EINVAL;
+ } else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
+ pr_err("Requested Default Queue Depth: %u smaller"
+ " than minimum %u\n", tcq_depth,
+ TA_DEFAULT_CMDSN_DEPTH_MIN);
+ return -EINVAL;
+ }
+
+ a->default_cmdsn_depth = tcq_depth;
+ pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
+ tpg->tpgt, a->default_cmdsn_depth);
+
+ return 0;
+}
+
+int iscsit_ta_cache_dynamic_acls(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->cache_dynamic_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+ " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+ "Enabled" : "Disabled");
+
+ return 0;
+}
+
+int iscsit_ta_demo_mode_write_protect(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->demo_mode_write_protect = flag;
+ pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
+ tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
+
+ return 0;
+}
+
+int iscsit_ta_prod_mode_write_protect(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->prod_mode_write_protect = flag;
+ pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
+ " %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
+ "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
new file mode 100644
index 00000000000..dda48c141a8
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -0,0 +1,41 @@
+#ifndef ISCSI_TARGET_TPG_H
+#define ISCSI_TARGET_TPG_H
+
+extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
+extern int iscsit_load_discovery_tpg(void);
+extern void iscsit_release_discovery_tpg(void);
+extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
+ struct iscsi_np *);
+extern int iscsit_get_tpg(struct iscsi_portal_group *);
+extern void iscsit_put_tpg(struct iscsi_portal_group *);
+extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *);
+extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
+extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
+extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
+ int);
+extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
+extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
+extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
+ struct iscsi_portal_group *, const char *, u32);
+extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
+ struct se_node_acl *);
+extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
+extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
+extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
+extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
+ struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
+ int);
+extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
+ struct iscsi_tpg_np *);
+extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
+ unsigned char *, u32, int);
+extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
+
+#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
new file mode 100644
index 00000000000..0baac5bcebd
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -0,0 +1,551 @@
+/*******************************************************************************
+ * This file contains the iSCSI Login Thread and Thread Queue functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target.h"
+
+static LIST_HEAD(active_ts_list);
+static LIST_HEAD(inactive_ts_list);
+static DEFINE_SPINLOCK(active_ts_lock);
+static DEFINE_SPINLOCK(inactive_ts_lock);
+static DEFINE_SPINLOCK(ts_bitmap_lock);
+
+static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
+{
+ spin_lock(&active_ts_lock);
+ list_add_tail(&ts->ts_list, &active_ts_list);
+ iscsit_global->active_ts++;
+ spin_unlock(&active_ts_lock);
+}
+
+extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
+{
+ spin_lock(&inactive_ts_lock);
+ list_add_tail(&ts->ts_list, &inactive_ts_list);
+ iscsit_global->inactive_ts++;
+ spin_unlock(&inactive_ts_lock);
+}
+
+static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
+{
+ spin_lock(&active_ts_lock);
+ list_del(&ts->ts_list);
+ iscsit_global->active_ts--;
+ spin_unlock(&active_ts_lock);
+}
+
+static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+{
+ struct iscsi_thread_set *ts;
+
+ spin_lock(&inactive_ts_lock);
+ if (list_empty(&inactive_ts_list)) {
+ spin_unlock(&inactive_ts_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(ts, &inactive_ts_list, ts_list)
+ break;
+
+ list_del(&ts->ts_list);
+ iscsit_global->inactive_ts--;
+ spin_unlock(&inactive_ts_lock);
+
+ return ts;
+}
+
+extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
+{
+ int allocated_thread_pair_count = 0, i, thread_id;
+ struct iscsi_thread_set *ts = NULL;
+
+ for (i = 0; i < thread_pair_count; i++) {
+ ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
+ if (!ts) {
+ pr_err("Unable to allocate memory for"
+ " thread set.\n");
+ return allocated_thread_pair_count;
+ }
+ /*
+ * Locate the next available regision in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+ iscsit_global->ts_bitmap_count, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
+ if (thread_id < 0) {
+ pr_err("bitmap_find_free_region() failed for"
+ " thread_set_bitmap\n");
+ kfree(ts);
+ return allocated_thread_pair_count;
+ }
+
+ ts->thread_id = thread_id;
+ ts->status = ISCSI_THREAD_SET_FREE;
+ INIT_LIST_HEAD(&ts->ts_list);
+ spin_lock_init(&ts->ts_state_lock);
+ init_completion(&ts->rx_post_start_comp);
+ init_completion(&ts->tx_post_start_comp);
+ init_completion(&ts->rx_restart_comp);
+ init_completion(&ts->tx_restart_comp);
+ init_completion(&ts->rx_start_comp);
+ init_completion(&ts->tx_start_comp);
+
+ ts->create_threads = 1;
+ ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
+ ISCSI_TX_THREAD_NAME);
+ if (IS_ERR(ts->tx_thread)) {
+ dump_stack();
+ pr_err("Unable to start iscsi_target_tx_thread\n");
+ break;
+ }
+
+ ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
+ ISCSI_RX_THREAD_NAME);
+ if (IS_ERR(ts->rx_thread)) {
+ kthread_stop(ts->tx_thread);
+ pr_err("Unable to start iscsi_target_rx_thread\n");
+ break;
+ }
+ ts->create_threads = 0;
+
+ iscsi_add_ts_to_inactive_list(ts);
+ allocated_thread_pair_count++;
+ }
+
+ pr_debug("Spawned %d thread set(s) (%d total threads).\n",
+ allocated_thread_pair_count, allocated_thread_pair_count * 2);
+ return allocated_thread_pair_count;
+}
+
+extern void iscsi_deallocate_thread_sets(void)
+{
+ u32 released_count = 0;
+ struct iscsi_thread_set *ts = NULL;
+
+ while ((ts = iscsi_get_ts_from_inactive_list())) {
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_DIE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ if (ts->rx_thread) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ kthread_stop(ts->rx_thread);
+ }
+ if (ts->tx_thread) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ kthread_stop(ts->tx_thread);
+ }
+ /*
+ * Release this thread_id in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap,
+ ts->thread_id, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
+
+ released_count++;
+ kfree(ts);
+ }
+
+ if (released_count)
+ pr_debug("Stopped %d thread set(s) (%d total threads)."
+ "\n", released_count, released_count * 2);
+}
+
+static void iscsi_deallocate_extra_thread_sets(void)
+{
+ u32 orig_count, released_count = 0;
+ struct iscsi_thread_set *ts = NULL;
+
+ orig_count = TARGET_THREAD_SET_COUNT;
+
+ while ((iscsit_global->inactive_ts + 1) > orig_count) {
+ ts = iscsi_get_ts_from_inactive_list();
+ if (!ts)
+ break;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_DIE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ if (ts->rx_thread) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ kthread_stop(ts->rx_thread);
+ }
+ if (ts->tx_thread) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ kthread_stop(ts->tx_thread);
+ }
+ /*
+ * Release this thread_id in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap,
+ ts->thread_id, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
+
+ released_count++;
+ kfree(ts);
+ }
+
+ if (released_count) {
+ pr_debug("Stopped %d thread set(s) (%d total threads)."
+ "\n", released_count, released_count * 2);
+ }
+}
+
+void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
+{
+ iscsi_add_ts_to_active_list(ts);
+
+ spin_lock_bh(&ts->ts_state_lock);
+ conn->thread_set = ts;
+ ts->conn = conn;
+ spin_unlock_bh(&ts->ts_state_lock);
+ /*
+ * Start up the RX thread and wait on rx_post_start_comp. The RX
+ * Thread will then do the same for the TX Thread in
+ * iscsi_rx_thread_pre_handler().
+ */
+ complete(&ts->rx_start_comp);
+ wait_for_completion(&ts->rx_post_start_comp);
+}
+
+struct iscsi_thread_set *iscsi_get_thread_set(void)
+{
+ int allocate_ts = 0;
+ struct completion comp;
+ struct iscsi_thread_set *ts = NULL;
+ /*
+ * If no inactive thread set is available on the first call to
+ * iscsi_get_ts_from_inactive_list(), sleep for a second and
+ * try again. If still none are available after two attempts,
+ * allocate a set ourselves.
+ */
+get_set:
+ ts = iscsi_get_ts_from_inactive_list();
+ if (!ts) {
+ if (allocate_ts == 2)
+ iscsi_allocate_thread_sets(1);
+
+ init_completion(&comp);
+ wait_for_completion_timeout(&comp, 1 * HZ);
+
+ allocate_ts++;
+ goto get_set;
+ }
+
+ ts->delay_inactive = 1;
+ ts->signal_sent = 0;
+ ts->thread_count = 2;
+ init_completion(&ts->rx_restart_comp);
+ init_completion(&ts->tx_restart_comp);
+
+ return ts;
+}
+
+void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
+{
+ struct iscsi_thread_set *ts = NULL;
+
+ if (!conn->thread_set) {
+ pr_err("struct iscsi_conn->thread_set is NULL\n");
+ return;
+ }
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->thread_clear &= ~thread_clear;
+
+ if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
+ (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
+ complete(&ts->rx_restart_comp);
+ else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
+ (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
+ complete(&ts->tx_restart_comp);
+ spin_unlock_bh(&ts->ts_state_lock);
+}
+
+void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
+{
+ struct iscsi_thread_set *ts = NULL;
+
+ if (!conn->thread_set) {
+ pr_err("struct iscsi_conn->thread_set is NULL\n");
+ return;
+ }
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->signal_sent |= signal_sent;
+ spin_unlock_bh(&ts->ts_state_lock);
+}
+
+int iscsi_release_thread_set(struct iscsi_conn *conn)
+{
+ int thread_called = 0;
+ struct iscsi_thread_set *ts = NULL;
+
+ if (!conn || !conn->thread_set) {
+ pr_err("connection or thread set pointer is NULL\n");
+ BUG();
+ }
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_RESET;
+
+ if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
+ strlen(ISCSI_RX_THREAD_NAME)))
+ thread_called = ISCSI_RX_THREAD;
+ else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
+ strlen(ISCSI_TX_THREAD_NAME)))
+ thread_called = ISCSI_TX_THREAD;
+
+ if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
+ (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
+
+ if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
+ }
+ ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+ wait_for_completion(&ts->rx_restart_comp);
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
+ }
+ if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
+ (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
+
+ if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
+ }
+ ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+ wait_for_completion(&ts->tx_restart_comp);
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
+ }
+
+ ts->conn = NULL;
+ ts->status = ISCSI_THREAD_SET_FREE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return 0;
+}
+
+int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
+{
+ struct iscsi_thread_set *ts;
+
+ if (!conn->thread_set)
+ return -1;
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ return -1;
+ }
+
+ if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
+ }
+ if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
+ }
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return 0;
+}
+
+static void iscsi_check_to_add_additional_sets(void)
+{
+ int thread_sets_add;
+
+ spin_lock(&inactive_ts_lock);
+ thread_sets_add = iscsit_global->inactive_ts;
+ spin_unlock(&inactive_ts_lock);
+ if (thread_sets_add == 1)
+ iscsi_allocate_thread_sets(1);
+}
+
+static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+ spin_lock_bh(&ts->ts_state_lock);
+ if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ return -1;
+ }
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return 0;
+}
+
+struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+ int ret;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ if (ts->create_threads) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ goto sleep;
+ }
+
+ flush_signals(current);
+
+ if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ iscsi_del_ts_from_active_list(ts);
+
+ if (!iscsit_global->in_shutdown)
+ iscsi_deallocate_extra_thread_sets();
+
+ iscsi_add_ts_to_inactive_list(ts);
+ spin_lock_bh(&ts->ts_state_lock);
+ }
+
+ if ((ts->status == ISCSI_THREAD_SET_RESET) &&
+ (ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
+ complete(&ts->rx_restart_comp);
+
+ ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+sleep:
+ ret = wait_for_completion_interruptible(&ts->rx_start_comp);
+ if (ret != 0)
+ return NULL;
+
+ if (iscsi_signal_thread_pre_handler(ts) < 0)
+ return NULL;
+
+ if (!ts->conn) {
+ pr_err("struct iscsi_thread_set->conn is NULL for"
+ " thread_id: %d, going back to sleep\n", ts->thread_id);
+ goto sleep;
+ }
+ iscsi_check_to_add_additional_sets();
+ /*
+ * The RX Thread starts up the TX Thread and sleeps.
+ */
+ ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
+ complete(&ts->tx_start_comp);
+ wait_for_completion(&ts->tx_post_start_comp);
+
+ return ts->conn;
+}
+
+struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+ int ret;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ if (ts->create_threads) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ goto sleep;
+ }
+
+ flush_signals(current);
+
+ if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ iscsi_del_ts_from_active_list(ts);
+
+ if (!iscsit_global->in_shutdown)
+ iscsi_deallocate_extra_thread_sets();
+
+ iscsi_add_ts_to_inactive_list(ts);
+ spin_lock_bh(&ts->ts_state_lock);
+ }
+ if ((ts->status == ISCSI_THREAD_SET_RESET) &&
+ (ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
+ complete(&ts->tx_restart_comp);
+
+ ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+sleep:
+ ret = wait_for_completion_interruptible(&ts->tx_start_comp);
+ if (ret != 0)
+ return NULL;
+
+ if (iscsi_signal_thread_pre_handler(ts) < 0)
+ return NULL;
+
+ if (!ts->conn) {
+ pr_err("struct iscsi_thread_set->conn is NULL for "
+ " thread_id: %d, going back to sleep\n",
+ ts->thread_id);
+ goto sleep;
+ }
+
+ iscsi_check_to_add_additional_sets();
+ /*
+ * From the TX thread, up the tx_post_start_comp that the RX Thread is
+ * sleeping on in iscsi_rx_thread_pre_handler(), then up the
+ * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
+ */
+ ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
+ complete(&ts->tx_post_start_comp);
+ complete(&ts->rx_post_start_comp);
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_ACTIVE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return ts->conn;
+}
+
+int iscsi_thread_set_init(void)
+{
+ int size;
+
+ iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
+
+ size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
+ iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
+ if (!iscsit_global->ts_bitmap) {
+ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&active_ts_lock);
+ spin_lock_init(&inactive_ts_lock);
+ spin_lock_init(&ts_bitmap_lock);
+ INIT_LIST_HEAD(&active_ts_list);
+ INIT_LIST_HEAD(&inactive_ts_list);
+
+ return 0;
+}
+
+void iscsi_thread_set_free(void)
+{
+ kfree(iscsit_global->ts_bitmap);
+}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
new file mode 100644
index 00000000000..26e6a95ec20
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.h
@@ -0,0 +1,88 @@
+#ifndef ISCSI_THREAD_QUEUE_H
+#define ISCSI_THREAD_QUEUE_H
+
+/*
+ * Defines for thread sets.
+ */
+extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
+extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *);
+extern int iscsi_allocate_thread_sets(u32);
+extern void iscsi_deallocate_thread_sets(void);
+extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
+extern struct iscsi_thread_set *iscsi_get_thread_set(void);
+extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
+extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
+extern int iscsi_release_thread_set(struct iscsi_conn *);
+extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
+extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
+extern int iscsi_thread_set_init(void);
+extern void iscsi_thread_set_free(void);
+
+extern int iscsi_target_tx_thread(void *);
+extern int iscsi_target_rx_thread(void *);
+
+#define TARGET_THREAD_SET_COUNT 4
+
+#define ISCSI_RX_THREAD 1
+#define ISCSI_TX_THREAD 2
+#define ISCSI_RX_THREAD_NAME "iscsi_trx"
+#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
+#define ISCSI_BLOCK_RX_THREAD 0x1
+#define ISCSI_BLOCK_TX_THREAD 0x2
+#define ISCSI_CLEAR_RX_THREAD 0x1
+#define ISCSI_CLEAR_TX_THREAD 0x2
+#define ISCSI_SIGNAL_RX_THREAD 0x1
+#define ISCSI_SIGNAL_TX_THREAD 0x2
+
+/* struct iscsi_thread_set->status */
+#define ISCSI_THREAD_SET_FREE 1
+#define ISCSI_THREAD_SET_ACTIVE 2
+#define ISCSI_THREAD_SET_DIE 3
+#define ISCSI_THREAD_SET_RESET 4
+#define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5
+
+/* By default allow a maximum of 32K iSCSI connections */
+#define ISCSI_TS_BITMAP_BITS 32768
+
+struct iscsi_thread_set {
+ /* flags used for blocking and restarting sets */
+ int blocked_threads;
+ /* flag for creating threads */
+ int create_threads;
+ /* flag for delaying readding to inactive list */
+ int delay_inactive;
+ /* status for thread set */
+ int status;
+ /* which threads have had signals sent */
+ int signal_sent;
+ /* flag for which threads exited first */
+ int thread_clear;
+ /* Active threads in the thread set */
+ int thread_count;
+ /* Unique thread ID */
+ u32 thread_id;
+ /* pointer to connection if set is active */
+ struct iscsi_conn *conn;
+ /* used for controlling ts state accesses */
+ spinlock_t ts_state_lock;
+ /* Used for rx side post startup */
+ struct completion rx_post_start_comp;
+ /* Used for tx side post startup */
+ struct completion tx_post_start_comp;
+ /* used for restarting thread queue */
+ struct completion rx_restart_comp;
+ /* used for restarting thread queue */
+ struct completion tx_restart_comp;
+ /* used for normal unused blocking */
+ struct completion rx_start_comp;
+ /* used for normal unused blocking */
+ struct completion tx_start_comp;
+ /* OS descriptor for rx thread */
+ struct task_struct *rx_thread;
+ /* OS descriptor for tx thread */
+ struct task_struct *tx_thread;
+ /* struct iscsi_thread_set in list list head*/
+ struct list_head ts_list;
+};
+
+#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
new file mode 100644
index 00000000000..a1acb016790
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -0,0 +1,1819 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific utility functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+#define PRINT_BUFF(buff, len) \
+{ \
+ int zzz; \
+ \
+ pr_debug("%d:\n", __LINE__); \
+ for (zzz = 0; zzz < len; zzz++) { \
+ if (zzz % 16 == 0) { \
+ if (zzz) \
+ pr_debug("\n"); \
+ pr_debug("%4i: ", zzz); \
+ } \
+ pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
+ } \
+ if ((len + 1) % 16) \
+ pr_debug("\n"); \
+}
+
+extern struct list_head g_tiqn_list;
+extern spinlock_t tiqn_lock;
+
+/*
+ * Called with cmd->r2t_lock held.
+ */
+int iscsit_add_r2t_to_list(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 xfer_len,
+ int recovery,
+ u32 r2t_sn)
+{
+ struct iscsi_r2t *r2t;
+
+ r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
+ if (!r2t) {
+ pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&r2t->r2t_list);
+
+ r2t->recovery_r2t = recovery;
+ r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
+ r2t->offset = offset;
+ r2t->xfer_len = xfer_len;
+ list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+ spin_lock_bh(&cmd->r2t_lock);
+ return 0;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_for_eos(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 length)
+{
+ struct iscsi_r2t *r2t;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if ((r2t->offset <= offset) &&
+ (r2t->offset + r2t->xfer_len) >= (offset + length)) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return r2t;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ pr_err("Unable to locate R2T for Offset: %u, Length:"
+ " %u\n", offset, length);
+ return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
+{
+ struct iscsi_r2t *r2t;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if (!r2t->sent_r2t) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return r2t;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ pr_err("Unable to locate next R2T to send for ITT:"
+ " 0x%08x.\n", cmd->init_task_tag);
+ return NULL;
+}
+
+/*
+ * Called with cmd->r2t_lock held.
+ */
+void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
+{
+ list_del(&r2t->r2t_list);
+ kmem_cache_free(lio_r2t_cache, r2t);
+}
+
+void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
+{
+ struct iscsi_r2t *r2t, *r2t_tmp;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
+ iscsit_free_r2t(r2t, cmd);
+ spin_unlock_bh(&cmd->r2t_lock);
+}
+
+/*
+ * May be called from software interrupt (timer) context for allocating
+ * iSCSI NopINs.
+ */
+struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+{
+ struct iscsi_cmd *cmd;
+
+ cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
+ if (!cmd) {
+ pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
+ return NULL;
+ }
+
+ cmd->conn = conn;
+ INIT_LIST_HEAD(&cmd->i_list);
+ INIT_LIST_HEAD(&cmd->datain_list);
+ INIT_LIST_HEAD(&cmd->cmd_r2t_list);
+ init_completion(&cmd->reject_comp);
+ spin_lock_init(&cmd->datain_lock);
+ spin_lock_init(&cmd->dataout_timeout_lock);
+ spin_lock_init(&cmd->istate_lock);
+ spin_lock_init(&cmd->error_lock);
+ spin_lock_init(&cmd->r2t_lock);
+
+ return cmd;
+}
+
+/*
+ * Called from iscsi_handle_scsi_cmd()
+ */
+struct iscsi_cmd *iscsit_allocate_se_cmd(
+ struct iscsi_conn *conn,
+ u32 data_length,
+ int data_direction,
+ int iscsi_task_attr)
+{
+ struct iscsi_cmd *cmd;
+ struct se_cmd *se_cmd;
+ int sam_task_attr;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->data_direction = data_direction;
+ cmd->data_length = data_length;
+ /*
+ * Figure out the SAM Task Attribute for the incoming SCSI CDB
+ */
+ if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
+ (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
+ sam_task_attr = MSG_SIMPLE_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
+ sam_task_attr = MSG_ORDERED_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
+ sam_task_attr = MSG_HEAD_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_ACA)
+ sam_task_attr = MSG_ACA_TAG;
+ else {
+ pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
+ " MSG_SIMPLE_TAG\n", iscsi_task_attr);
+ sam_task_attr = MSG_SIMPLE_TAG;
+ }
+
+ se_cmd = &cmd->se_cmd;
+ /*
+ * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ */
+ transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
+ conn->sess->se_sess, data_length, data_direction,
+ sam_task_attr, &cmd->sense_buffer[0]);
+ return cmd;
+}
+
+struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
+ struct iscsi_conn *conn,
+ u8 function)
+{
+ struct iscsi_cmd *cmd;
+ struct se_cmd *se_cmd;
+ u8 tcm_function;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->data_direction = DMA_NONE;
+
+ cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
+ if (!cmd->tmr_req) {
+ pr_err("Unable to allocate memory for"
+ " Task Management command!\n");
+ return NULL;
+ }
+ /*
+ * TASK_REASSIGN for ERL=2 / connection stays inside of
+ * LIO-Target $FABRIC_MOD
+ */
+ if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
+ return cmd;
+
+ se_cmd = &cmd->se_cmd;
+ /*
+ * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ */
+ transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
+
+ switch (function) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ tcm_function = TMR_ABORT_TASK;
+ break;
+ case ISCSI_TM_FUNC_ABORT_TASK_SET:
+ tcm_function = TMR_ABORT_TASK_SET;
+ break;
+ case ISCSI_TM_FUNC_CLEAR_ACA:
+ tcm_function = TMR_CLEAR_ACA;
+ break;
+ case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+ tcm_function = TMR_CLEAR_TASK_SET;
+ break;
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ tcm_function = TMR_LUN_RESET;
+ break;
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ tcm_function = TMR_TARGET_WARM_RESET;
+ break;
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ tcm_function = TMR_TARGET_COLD_RESET;
+ break;
+ default:
+ pr_err("Unknown iSCSI TMR Function:"
+ " 0x%02x\n", function);
+ goto out;
+ }
+
+ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
+ (void *)cmd->tmr_req, tcm_function);
+ if (!se_cmd->se_tmr_req)
+ goto out;
+
+ cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
+
+ return cmd;
+out:
+ iscsit_release_cmd(cmd);
+ if (se_cmd)
+ transport_free_se_cmd(se_cmd);
+ return NULL;
+}
+
+int iscsit_decide_list_to_build(
+ struct iscsi_cmd *cmd,
+ u32 immediate_data_length)
+{
+ struct iscsi_build_list bl;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na;
+
+ if (sess->sess_ops->DataSequenceInOrder &&
+ sess->sess_ops->DataPDUInOrder)
+ return 0;
+
+ if (cmd->data_direction == DMA_NONE)
+ return 0;
+
+ na = iscsit_tpg_get_node_attrib(sess);
+ memset(&bl, 0, sizeof(struct iscsi_build_list));
+
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ bl.data_direction = ISCSI_PDU_READ;
+ bl.type = PDULIST_NORMAL;
+ if (na->random_datain_pdu_offsets)
+ bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
+ if (na->random_datain_seq_offsets)
+ bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
+ } else {
+ bl.data_direction = ISCSI_PDU_WRITE;
+ bl.immediate_data_length = immediate_data_length;
+ if (na->random_r2t_offsets)
+ bl.randomize |= RANDOM_R2T_OFFSETS;
+
+ if (!cmd->immediate_data && !cmd->unsolicited_data)
+ bl.type = PDULIST_NORMAL;
+ else if (cmd->immediate_data && !cmd->unsolicited_data)
+ bl.type = PDULIST_IMMEDIATE;
+ else if (!cmd->immediate_data && cmd->unsolicited_data)
+ bl.type = PDULIST_UNSOLICITED;
+ else if (cmd->immediate_data && cmd->unsolicited_data)
+ bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
+ }
+
+ return iscsit_do_build_list(cmd, &bl);
+}
+
+struct iscsi_seq *iscsit_get_seq_holder_for_datain(
+ struct iscsi_cmd *cmd,
+ u32 seq_send_order)
+{
+ u32 i;
+
+ for (i = 0; i < cmd->seq_count; i++)
+ if (cmd->seq_list[i].seq_send_order == seq_send_order)
+ return &cmd->seq_list[i];
+
+ return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
+{
+ u32 i;
+
+ if (!cmd->seq_list) {
+ pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ return NULL;
+ }
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+ continue;
+ if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
+ cmd->seq_send_order++;
+ return &cmd->seq_list[i];
+ }
+ }
+
+ return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
+ struct iscsi_cmd *cmd,
+ u32 r2t_sn)
+{
+ struct iscsi_r2t *r2t;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if (r2t->r2t_sn == r2t_sn) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return r2t;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return NULL;
+}
+
+static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
+{
+ int ret;
+
+ /*
+ * This is the proper method of checking received CmdSN against
+ * ExpCmdSN and MaxCmdSN values, as well as accounting for out
+ * or order CmdSNs due to multiple connection sessions and/or
+ * CRC failures.
+ */
+ if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
+ pr_err("Received CmdSN: 0x%08x is greater than"
+ " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
+ sess->max_cmd_sn);
+ ret = CMDSN_ERROR_CANNOT_RECOVER;
+
+ } else if (cmdsn == sess->exp_cmd_sn) {
+ sess->exp_cmd_sn++;
+ pr_debug("Received CmdSN matches ExpCmdSN,"
+ " incremented ExpCmdSN to: 0x%08x\n",
+ sess->exp_cmd_sn);
+ ret = CMDSN_NORMAL_OPERATION;
+
+ } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
+ pr_debug("Received CmdSN: 0x%08x is greater"
+ " than ExpCmdSN: 0x%08x, not acknowledging.\n",
+ cmdsn, sess->exp_cmd_sn);
+ ret = CMDSN_HIGHER_THAN_EXP;
+
+ } else {
+ pr_err("Received CmdSN: 0x%08x is less than"
+ " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
+ sess->exp_cmd_sn);
+ ret = CMDSN_LOWER_THAN_EXP;
+ }
+
+ return ret;
+}
+
+/*
+ * Commands may be received out of order if MC/S is in use.
+ * Ensure they are executed in CmdSN order.
+ */
+int iscsit_sequence_cmd(
+ struct iscsi_conn *conn,
+ struct iscsi_cmd *cmd,
+ u32 cmdsn)
+{
+ int ret;
+ int cmdsn_ret;
+
+ mutex_lock(&conn->sess->cmdsn_mutex);
+
+ cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
+ switch (cmdsn_ret) {
+ case CMDSN_NORMAL_OPERATION:
+ ret = iscsit_execute_cmd(cmd, 0);
+ if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
+ iscsit_execute_ooo_cmdsns(conn->sess);
+ break;
+ case CMDSN_HIGHER_THAN_EXP:
+ ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
+ break;
+ case CMDSN_LOWER_THAN_EXP:
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ ret = cmdsn_ret;
+ break;
+ default:
+ ret = cmdsn_ret;
+ break;
+ }
+ mutex_unlock(&conn->sess->cmdsn_mutex);
+
+ return ret;
+}
+
+int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (conn->sess->sess_ops->InitialR2T) {
+ pr_err("Received unexpected unsolicited data"
+ " while InitialR2T=Yes, protocol error.\n");
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+ return -1;
+ }
+
+ if ((cmd->first_burst_len + payload_length) >
+ conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+ " for this Unsolicited DataOut Burst.\n",
+ (cmd->first_burst_len + payload_length),
+ conn->sess->sess_ops->FirstBurstLength);
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return -1;
+ }
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
+ return 0;
+
+ if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
+ ((cmd->first_burst_len + payload_length) !=
+ conn->sess->sess_ops->FirstBurstLength)) {
+ pr_err("Unsolicited non-immediate data received %u"
+ " does not equal FirstBurstLength: %u, and does"
+ " not equal ExpXferLen %u.\n",
+ (cmd->first_burst_len + payload_length),
+ conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return -1;
+ }
+ return 0;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt(
+ struct iscsi_conn *conn,
+ u32 init_task_tag)
+{
+ struct iscsi_cmd *cmd;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock_bh(&conn->cmd_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
+ init_task_tag, conn->cid);
+ return NULL;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
+ struct iscsi_conn *conn,
+ u32 init_task_tag,
+ u32 length)
+{
+ struct iscsi_cmd *cmd;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock_bh(&conn->cmd_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
+ " dumping payload\n", init_task_tag, conn->cid);
+ if (length)
+ iscsit_dump_data_payload(conn, length, 1);
+
+ return NULL;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_ttt(
+ struct iscsi_conn *conn,
+ u32 targ_xfer_tag)
+{
+ struct iscsi_cmd *cmd = NULL;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->targ_xfer_tag == targ_xfer_tag) {
+ spin_unlock_bh(&conn->cmd_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
+ targ_xfer_tag, conn->cid);
+ return NULL;
+}
+
+int iscsit_find_cmd_for_recovery(
+ struct iscsi_session *sess,
+ struct iscsi_cmd **cmd_ptr,
+ struct iscsi_conn_recovery **cr_ptr,
+ u32 init_task_tag)
+{
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_conn_recovery *cr;
+ /*
+ * Scan through the inactive connection recovery list's command list.
+ * If init_task_tag matches the command is still alligent.
+ */
+ spin_lock(&sess->cr_i_lock);
+ list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_unlock(&sess->cr_i_lock);
+
+ *cr_ptr = cr;
+ *cmd_ptr = cmd;
+ return -2;
+ }
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&sess->cr_i_lock);
+ /*
+ * Scan through the active connection recovery list's command list.
+ * If init_task_tag matches the command is ready to be reassigned.
+ */
+ spin_lock(&sess->cr_a_lock);
+ list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_unlock(&sess->cr_a_lock);
+
+ *cr_ptr = cr;
+ *cmd_ptr = cmd;
+ return 0;
+ }
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&sess->cr_a_lock);
+
+ return -1;
+}
+
+void iscsit_add_cmd_to_immediate_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ u8 state)
+{
+ struct iscsi_queue_req *qr;
+
+ qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+ if (!qr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_queue_req\n");
+ return;
+ }
+ INIT_LIST_HEAD(&qr->qr_list);
+ qr->cmd = cmd;
+ qr->state = state;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ list_add_tail(&qr->qr_list, &conn->immed_queue_list);
+ atomic_inc(&cmd->immed_queue_count);
+ atomic_set(&conn->check_immediate_queue, 1);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ wake_up_process(conn->thread_set->tx_thread);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ if (list_empty(&conn->immed_queue_list)) {
+ spin_unlock_bh(&conn->immed_queue_lock);
+ return NULL;
+ }
+ list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
+ break;
+
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->immed_queue_count);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ return qr;
+}
+
+static void iscsit_remove_cmd_from_immediate_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr, *qr_tmp;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ if (!atomic_read(&cmd->immed_queue_count)) {
+ spin_unlock_bh(&conn->immed_queue_lock);
+ return;
+ }
+
+ list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+ if (qr->cmd != cmd)
+ continue;
+
+ atomic_dec(&qr->cmd->immed_queue_count);
+ list_del(&qr->qr_list);
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ if (atomic_read(&cmd->immed_queue_count)) {
+ pr_err("ITT: 0x%08x immed_queue_count: %d\n",
+ cmd->init_task_tag,
+ atomic_read(&cmd->immed_queue_count));
+ }
+}
+
+void iscsit_add_cmd_to_response_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ u8 state)
+{
+ struct iscsi_queue_req *qr;
+
+ qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+ if (!qr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_queue_req\n");
+ return;
+ }
+ INIT_LIST_HEAD(&qr->qr_list);
+ qr->cmd = cmd;
+ qr->state = state;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ list_add_tail(&qr->qr_list, &conn->response_queue_list);
+ atomic_inc(&cmd->response_queue_count);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ wake_up_process(conn->thread_set->tx_thread);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ if (list_empty(&conn->response_queue_list)) {
+ spin_unlock_bh(&conn->response_queue_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(qr, &conn->response_queue_list, qr_list)
+ break;
+
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->response_queue_count);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ return qr;
+}
+
+static void iscsit_remove_cmd_from_response_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr, *qr_tmp;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ if (!atomic_read(&cmd->response_queue_count)) {
+ spin_unlock_bh(&conn->response_queue_lock);
+ return;
+ }
+
+ list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+ qr_list) {
+ if (qr->cmd != cmd)
+ continue;
+
+ atomic_dec(&qr->cmd->response_queue_count);
+ list_del(&qr->qr_list);
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ if (atomic_read(&cmd->response_queue_count)) {
+ pr_err("ITT: 0x%08x response_queue_count: %d\n",
+ cmd->init_task_tag,
+ atomic_read(&cmd->response_queue_count));
+ }
+}
+
+void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr, *qr_tmp;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->immed_queue_count);
+
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ spin_lock_bh(&conn->response_queue_lock);
+ list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+ qr_list) {
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->response_queue_count);
+
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->response_queue_lock);
+}
+
+void iscsit_release_cmd(struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ int i;
+
+ iscsit_free_r2ts_from_list(cmd);
+ iscsit_free_all_datain_reqs(cmd);
+
+ kfree(cmd->buf_ptr);
+ kfree(cmd->pdu_list);
+ kfree(cmd->seq_list);
+ kfree(cmd->tmr_req);
+ kfree(cmd->iov_data);
+
+ for (i = 0; i < cmd->t_mem_sg_nents; i++)
+ __free_page(sg_page(&cmd->t_mem_sg[i]));
+
+ kfree(cmd->t_mem_sg);
+
+ if (conn) {
+ iscsit_remove_cmd_from_immediate_queue(cmd, conn);
+ iscsit_remove_cmd_from_response_queue(cmd, conn);
+ }
+
+ kmem_cache_free(lio_cmd_cache, cmd);
+}
+
+int iscsit_check_session_usage_count(struct iscsi_session *sess)
+{
+ spin_lock_bh(&sess->session_usage_lock);
+ if (sess->session_usage_count != 0) {
+ sess->session_waiting_on_uc = 1;
+ spin_unlock_bh(&sess->session_usage_lock);
+ if (in_interrupt())
+ return 2;
+
+ wait_for_completion(&sess->session_waiting_on_uc_comp);
+ return 1;
+ }
+ spin_unlock_bh(&sess->session_usage_lock);
+
+ return 0;
+}
+
+void iscsit_dec_session_usage_count(struct iscsi_session *sess)
+{
+ spin_lock_bh(&sess->session_usage_lock);
+ sess->session_usage_count--;
+
+ if (!sess->session_usage_count && sess->session_waiting_on_uc)
+ complete(&sess->session_waiting_on_uc_comp);
+
+ spin_unlock_bh(&sess->session_usage_lock);
+}
+
+void iscsit_inc_session_usage_count(struct iscsi_session *sess)
+{
+ spin_lock_bh(&sess->session_usage_lock);
+ sess->session_usage_count++;
+ spin_unlock_bh(&sess->session_usage_lock);
+}
+
+/*
+ * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
+ * array counts needed for sync and steering.
+ */
+static int iscsit_determine_sync_and_steering_counts(
+ struct iscsi_conn *conn,
+ struct iscsi_data_count *count)
+{
+ u32 length = count->data_length;
+ u32 marker, markint;
+
+ count->sync_and_steering = 1;
+
+ marker = (count->type == ISCSI_RX_DATA) ?
+ conn->of_marker : conn->if_marker;
+ markint = (count->type == ISCSI_RX_DATA) ?
+ (conn->conn_ops->OFMarkInt * 4) :
+ (conn->conn_ops->IFMarkInt * 4);
+ count->ss_iov_count = count->iov_count;
+
+ while (length > 0) {
+ if (length >= marker) {
+ count->ss_iov_count += 3;
+ count->ss_marker_count += 2;
+
+ length -= marker;
+ marker = markint;
+ } else
+ length = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Setup conn->if_marker and conn->of_marker values based upon
+ * the initial marker-less interval. (see iSCSI v19 A.2)
+ */
+int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
+{
+ int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
+ /*
+ * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
+ */
+ u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
+ u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
+
+ if (conn->conn_ops->OFMarker) {
+ /*
+ * Account for the first Login Command received not
+ * via iscsi_recv_msg().
+ */
+ conn->of_marker += ISCSI_HDR_LEN;
+ if (conn->of_marker <= OFMarkInt) {
+ conn->of_marker = (OFMarkInt - conn->of_marker);
+ } else {
+ login_ofmarker_count = (conn->of_marker / OFMarkInt);
+ next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
+ (login_ofmarker_count * MARKER_SIZE);
+ conn->of_marker = (next_marker - conn->of_marker);
+ }
+ conn->of_marker_offset = 0;
+ pr_debug("Setting OFMarker value to %u based on Initial"
+ " Markerless Interval.\n", conn->of_marker);
+ }
+
+ if (conn->conn_ops->IFMarker) {
+ if (conn->if_marker <= IFMarkInt) {
+ conn->if_marker = (IFMarkInt - conn->if_marker);
+ } else {
+ login_ifmarker_count = (conn->if_marker / IFMarkInt);
+ next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
+ (login_ifmarker_count * MARKER_SIZE);
+ conn->if_marker = (next_marker - conn->if_marker);
+ }
+ pr_debug("Setting IFMarker value to %u based on Initial"
+ " Markerless Interval.\n", conn->if_marker);
+ }
+
+ return 0;
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
+{
+ struct iscsi_conn *conn;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ if ((conn->cid == cid) &&
+ (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
+ iscsit_inc_conn_usage_count(conn);
+ spin_unlock_bh(&sess->conn_lock);
+ return conn;
+ }
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ return NULL;
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
+{
+ struct iscsi_conn *conn;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ if (conn->cid == cid) {
+ iscsit_inc_conn_usage_count(conn);
+ spin_lock(&conn->state_lock);
+ atomic_set(&conn->connection_wait_rcfr, 1);
+ spin_unlock(&conn->state_lock);
+ spin_unlock_bh(&sess->conn_lock);
+ return conn;
+ }
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ return NULL;
+}
+
+void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->conn_usage_lock);
+ if (conn->conn_usage_count != 0) {
+ conn->conn_waiting_on_uc = 1;
+ spin_unlock_bh(&conn->conn_usage_lock);
+
+ wait_for_completion(&conn->conn_waiting_on_uc_comp);
+ return;
+ }
+ spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->conn_usage_lock);
+ conn->conn_usage_count--;
+
+ if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
+ complete(&conn->conn_waiting_on_uc_comp);
+
+ spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->conn_usage_lock);
+ conn->conn_usage_count++;
+ spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
+{
+ u8 state;
+ struct iscsi_cmd *cmd;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
+ if (!cmd)
+ return -1;
+
+ cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
+ state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
+ ISTATE_SEND_NOPIN_NO_RESPONSE;
+ cmd->init_task_tag = 0xFFFFFFFF;
+ spin_lock_bh(&conn->sess->ttt_lock);
+ cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
+ 0xFFFFFFFF;
+ if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
+ cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ spin_unlock_bh(&conn->sess->ttt_lock);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (want_response)
+ iscsit_start_nopin_response_timer(conn);
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
+
+ return 0;
+}
+
+static void iscsit_handle_nopin_response_timeout(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ iscsit_dec_conn_usage_count(conn);
+ return;
+ }
+
+ pr_debug("Did not receive response to NOPIN on CID: %hu on"
+ " SID: %u, failing connection.\n", conn->cid,
+ conn->sess->sid);
+ conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ {
+ struct iscsi_portal_group *tpg = conn->sess->tpg;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (tiqn) {
+ spin_lock_bh(&tiqn->sess_err_stats.lock);
+ strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ (void *)conn->sess->sess_ops->InitiatorName);
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ conn->sess->conn_timeout_errors++;
+ spin_unlock_bh(&tiqn->sess_err_stats.lock);
+ }
+ }
+
+ iscsit_cause_connection_reinstatement(conn, 0);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+
+ mod_timer(&conn->nopin_response_timer,
+ (get_jiffies_64() + na->nopin_response_timeout * HZ));
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+/*
+ * Called with conn->nopin_timer_lock held.
+ */
+void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+
+ init_timer(&conn->nopin_response_timer);
+ conn->nopin_response_timer.expires =
+ (get_jiffies_64() + na->nopin_response_timeout * HZ);
+ conn->nopin_response_timer.data = (unsigned long)conn;
+ conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
+ conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
+ conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&conn->nopin_response_timer);
+
+ pr_debug("Started NOPIN Response Timer on CID: %d to %u"
+ " seconds\n", conn->cid, na->nopin_response_timeout);
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+ conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ del_timer_sync(&conn->nopin_response_timer);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+static void iscsit_handle_nopin_timeout(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ iscsit_dec_conn_usage_count(conn);
+ return;
+ }
+ conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ iscsit_add_nopin(conn, 1);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+/*
+ * Called with conn->nopin_timer_lock held.
+ */
+void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+ /*
+ * NOPIN timeout is disabled.
+ */
+ if (!na->nopin_timeout)
+ return;
+
+ if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
+ return;
+
+ init_timer(&conn->nopin_timer);
+ conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+ conn->nopin_timer.data = (unsigned long)conn;
+ conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+ conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+ conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&conn->nopin_timer);
+
+ pr_debug("Started NOPIN Timer on CID: %d at %u second"
+ " interval\n", conn->cid, na->nopin_timeout);
+}
+
+void iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+ /*
+ * NOPIN timeout is disabled..
+ */
+ if (!na->nopin_timeout)
+ return;
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+
+ init_timer(&conn->nopin_timer);
+ conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+ conn->nopin_timer.data = (unsigned long)conn;
+ conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+ conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+ conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&conn->nopin_timer);
+
+ pr_debug("Started NOPIN Timer on CID: %d at %u second"
+ " interval\n", conn->cid, na->nopin_timeout);
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+ conn->nopin_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ del_timer_sync(&conn->nopin_timer);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+int iscsit_send_tx_data(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int use_misc)
+{
+ int tx_sent, tx_size;
+ u32 iov_count;
+ struct kvec *iov;
+
+send_data:
+ tx_size = cmd->tx_size;
+
+ if (!use_misc) {
+ iov = &cmd->iov_data[0];
+ iov_count = cmd->iov_data_count;
+ } else {
+ iov = &cmd->iov_misc[0];
+ iov_count = cmd->iov_misc_count;
+ }
+
+ tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
+ if (tx_size != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_data;
+ } else
+ return -1;
+ }
+ cmd->tx_size = 0;
+
+ return 0;
+}
+
+int iscsit_fe_sendpage_sg(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct scatterlist *sg = cmd->first_data_sg;
+ struct kvec iov;
+ u32 tx_hdr_size, data_len;
+ u32 offset = cmd->first_data_sg_off;
+ int tx_sent;
+
+send_hdr:
+ tx_hdr_size = ISCSI_HDR_LEN;
+ if (conn->conn_ops->HeaderDigest)
+ tx_hdr_size += ISCSI_CRC_LEN;
+
+ iov.iov_base = cmd->pdu;
+ iov.iov_len = tx_hdr_size;
+
+ tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
+ if (tx_hdr_size != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_hdr;
+ }
+ return -1;
+ }
+
+ data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
+ if (conn->conn_ops->DataDigest)
+ data_len -= ISCSI_CRC_LEN;
+
+ /*
+ * Perform sendpage() for each page in the scatterlist
+ */
+ while (data_len) {
+ u32 space = (sg->length - offset);
+ u32 sub_len = min_t(u32, data_len, space);
+send_pg:
+ tx_sent = conn->sock->ops->sendpage(conn->sock,
+ sg_page(sg), sg->offset + offset, sub_len, 0);
+ if (tx_sent != sub_len) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tcp_sendpage() returned"
+ " -EAGAIN\n");
+ goto send_pg;
+ }
+
+ pr_err("tcp_sendpage() failure: %d\n",
+ tx_sent);
+ return -1;
+ }
+
+ data_len -= sub_len;
+ offset = 0;
+ sg = sg_next(sg);
+ }
+
+send_padding:
+ if (cmd->padding) {
+ struct kvec *iov_p =
+ &cmd->iov_data[cmd->iov_data_count-1];
+
+ tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
+ if (cmd->padding != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_padding;
+ }
+ return -1;
+ }
+ }
+
+send_datacrc:
+ if (conn->conn_ops->DataDigest) {
+ struct kvec *iov_d =
+ &cmd->iov_data[cmd->iov_data_count];
+
+ tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
+ if (ISCSI_CRC_LEN != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_datacrc;
+ }
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
+ * back to the Initiator when an expection condition occurs with the
+ * errors set in status_class and status_detail.
+ *
+ * Parameters: iSCSI Connection, Status Class, Status Detail.
+ * Returns: 0 on success, -1 on error.
+ */
+int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
+{
+ u8 iscsi_hdr[ISCSI_HDR_LEN];
+ int err;
+ struct kvec iov;
+ struct iscsi_login_rsp *hdr;
+
+ iscsit_collect_login_stats(conn, status_class, status_detail);
+
+ memset(&iov, 0, sizeof(struct kvec));
+ memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
+
+ hdr = (struct iscsi_login_rsp *)&iscsi_hdr;
+ hdr->opcode = ISCSI_OP_LOGIN_RSP;
+ hdr->status_class = status_class;
+ hdr->status_detail = status_detail;
+ hdr->itt = cpu_to_be32(conn->login_itt);
+
+ iov.iov_base = &iscsi_hdr;
+ iov.iov_len = ISCSI_HDR_LEN;
+
+ PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
+
+ err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+ if (err != ISCSI_HDR_LEN) {
+ pr_err("tx_data returned less than expected\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void iscsit_print_session_params(struct iscsi_session *sess)
+{
+ struct iscsi_conn *conn;
+
+ pr_debug("-----------------------------[Session Params for"
+ " SID: %u]-----------------------------\n", sess->sid);
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+ iscsi_dump_conn_ops(conn->conn_ops);
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsi_dump_sess_ops(sess->sess_ops);
+}
+
+static int iscsit_do_rx_data(
+ struct iscsi_conn *conn,
+ struct iscsi_data_count *count)
+{
+ int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
+ u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
+ struct kvec iov[count->ss_iov_count], *iov_p;
+ struct msghdr msg;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ memset(&msg, 0, sizeof(struct msghdr));
+
+ if (count->sync_and_steering) {
+ int size = 0;
+ u32 i, orig_iov_count = 0;
+ u32 orig_iov_len = 0, orig_iov_loc = 0;
+ u32 iov_count = 0, per_iov_bytes = 0;
+ u32 *rx_marker, old_rx_marker = 0;
+ struct kvec *iov_record;
+
+ memset(&rx_marker_val, 0,
+ count->ss_marker_count * sizeof(u32));
+ memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
+
+ iov_record = count->iov;
+ orig_iov_count = count->iov_count;
+ rx_marker = &conn->of_marker;
+
+ i = 0;
+ size = data;
+ orig_iov_len = iov_record[orig_iov_loc].iov_len;
+ while (size > 0) {
+ pr_debug("rx_data: #1 orig_iov_len %u,"
+ " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
+ pr_debug("rx_data: #2 rx_marker %u, size"
+ " %u\n", *rx_marker, size);
+
+ if (orig_iov_len >= *rx_marker) {
+ iov[iov_count].iov_len = *rx_marker;
+ iov[iov_count++].iov_base =
+ (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &rx_marker_val[rx_marker_iov++];
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &rx_marker_val[rx_marker_iov++];
+ old_rx_marker = *rx_marker;
+
+ /*
+ * OFMarkInt is in 32-bit words.
+ */
+ *rx_marker = (conn->conn_ops->OFMarkInt * 4);
+ size -= old_rx_marker;
+ orig_iov_len -= old_rx_marker;
+ per_iov_bytes += old_rx_marker;
+
+ pr_debug("rx_data: #3 new_rx_marker"
+ " %u, size %u\n", *rx_marker, size);
+ } else {
+ iov[iov_count].iov_len = orig_iov_len;
+ iov[iov_count++].iov_base =
+ (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ per_iov_bytes = 0;
+ *rx_marker -= orig_iov_len;
+ size -= orig_iov_len;
+
+ if (size)
+ orig_iov_len =
+ iov_record[++orig_iov_loc].iov_len;
+
+ pr_debug("rx_data: #4 new_rx_marker"
+ " %u, size %u\n", *rx_marker, size);
+ }
+ }
+ data += (rx_marker_iov * (MARKER_SIZE / 2));
+
+ iov_p = &iov[0];
+ iov_len = iov_count;
+
+ if (iov_count > count->ss_iov_count) {
+ pr_err("iov_count: %d, count->ss_iov_count:"
+ " %d\n", iov_count, count->ss_iov_count);
+ return -1;
+ }
+ if (rx_marker_iov > count->ss_marker_count) {
+ pr_err("rx_marker_iov: %d, count->ss_marker"
+ "_count: %d\n", rx_marker_iov,
+ count->ss_marker_count);
+ return -1;
+ }
+ } else {
+ iov_p = count->iov;
+ iov_len = count->iov_count;
+ }
+
+ while (total_rx < data) {
+ rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
+ (data - total_rx), MSG_WAITALL);
+ if (rx_loop <= 0) {
+ pr_debug("rx_loop: %d total_rx: %d\n",
+ rx_loop, total_rx);
+ return rx_loop;
+ }
+ total_rx += rx_loop;
+ pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
+ rx_loop, total_rx, data);
+ }
+
+ if (count->sync_and_steering) {
+ int j;
+ for (j = 0; j < rx_marker_iov; j++) {
+ pr_debug("rx_data: #5 j: %d, offset: %d\n",
+ j, rx_marker_val[j]);
+ conn->of_marker_offset = rx_marker_val[j];
+ }
+ total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
+ }
+
+ return total_rx;
+}
+
+static int iscsit_do_tx_data(
+ struct iscsi_conn *conn,
+ struct iscsi_data_count *count)
+{
+ int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
+ u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
+ struct kvec iov[count->ss_iov_count], *iov_p;
+ struct msghdr msg;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ if (data <= 0) {
+ pr_err("Data length is: %d\n", data);
+ return -1;
+ }
+
+ memset(&msg, 0, sizeof(struct msghdr));
+
+ if (count->sync_and_steering) {
+ int size = 0;
+ u32 i, orig_iov_count = 0;
+ u32 orig_iov_len = 0, orig_iov_loc = 0;
+ u32 iov_count = 0, per_iov_bytes = 0;
+ u32 *tx_marker, old_tx_marker = 0;
+ struct kvec *iov_record;
+
+ memset(&tx_marker_val, 0,
+ count->ss_marker_count * sizeof(u32));
+ memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
+
+ iov_record = count->iov;
+ orig_iov_count = count->iov_count;
+ tx_marker = &conn->if_marker;
+
+ i = 0;
+ size = data;
+ orig_iov_len = iov_record[orig_iov_loc].iov_len;
+ while (size > 0) {
+ pr_debug("tx_data: #1 orig_iov_len %u,"
+ " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
+ pr_debug("tx_data: #2 tx_marker %u, size"
+ " %u\n", *tx_marker, size);
+
+ if (orig_iov_len >= *tx_marker) {
+ iov[iov_count].iov_len = *tx_marker;
+ iov[iov_count++].iov_base =
+ (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ tx_marker_val[tx_marker_iov] =
+ (size - *tx_marker);
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &tx_marker_val[tx_marker_iov++];
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &tx_marker_val[tx_marker_iov++];
+ old_tx_marker = *tx_marker;
+
+ /*
+ * IFMarkInt is in 32-bit words.
+ */
+ *tx_marker = (conn->conn_ops->IFMarkInt * 4);
+ size -= old_tx_marker;
+ orig_iov_len -= old_tx_marker;
+ per_iov_bytes += old_tx_marker;
+
+ pr_debug("tx_data: #3 new_tx_marker"
+ " %u, size %u\n", *tx_marker, size);
+ pr_debug("tx_data: #4 offset %u\n",
+ tx_marker_val[tx_marker_iov-1]);
+ } else {
+ iov[iov_count].iov_len = orig_iov_len;
+ iov[iov_count++].iov_base
+ = (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ per_iov_bytes = 0;
+ *tx_marker -= orig_iov_len;
+ size -= orig_iov_len;
+
+ if (size)
+ orig_iov_len =
+ iov_record[++orig_iov_loc].iov_len;
+
+ pr_debug("tx_data: #5 new_tx_marker"
+ " %u, size %u\n", *tx_marker, size);
+ }
+ }
+
+ data += (tx_marker_iov * (MARKER_SIZE / 2));
+
+ iov_p = &iov[0];
+ iov_len = iov_count;
+
+ if (iov_count > count->ss_iov_count) {
+ pr_err("iov_count: %d, count->ss_iov_count:"
+ " %d\n", iov_count, count->ss_iov_count);
+ return -1;
+ }
+ if (tx_marker_iov > count->ss_marker_count) {
+ pr_err("tx_marker_iov: %d, count->ss_marker"
+ "_count: %d\n", tx_marker_iov,
+ count->ss_marker_count);
+ return -1;
+ }
+ } else {
+ iov_p = count->iov;
+ iov_len = count->iov_count;
+ }
+
+ while (total_tx < data) {
+ tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+ (data - total_tx));
+ if (tx_loop <= 0) {
+ pr_debug("tx_loop: %d total_tx %d\n",
+ tx_loop, total_tx);
+ return tx_loop;
+ }
+ total_tx += tx_loop;
+ pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
+ tx_loop, total_tx, data);
+ }
+
+ if (count->sync_and_steering)
+ total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
+
+ return total_tx;
+}
+
+int rx_data(
+ struct iscsi_conn *conn,
+ struct kvec *iov,
+ int iov_count,
+ int data)
+{
+ struct iscsi_data_count c;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ memset(&c, 0, sizeof(struct iscsi_data_count));
+ c.iov = iov;
+ c.iov_count = iov_count;
+ c.data_length = data;
+ c.type = ISCSI_RX_DATA;
+
+ if (conn->conn_ops->OFMarker &&
+ (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
+ if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
+ return -1;
+ }
+
+ return iscsit_do_rx_data(conn, &c);
+}
+
+int tx_data(
+ struct iscsi_conn *conn,
+ struct kvec *iov,
+ int iov_count,
+ int data)
+{
+ struct iscsi_data_count c;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ memset(&c, 0, sizeof(struct iscsi_data_count));
+ c.iov = iov;
+ c.iov_count = iov_count;
+ c.data_length = data;
+ c.type = ISCSI_TX_DATA;
+
+ if (conn->conn_ops->IFMarker &&
+ (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
+ if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
+ return -1;
+ }
+
+ return iscsit_do_tx_data(conn, &c);
+}
+
+void iscsit_collect_login_stats(
+ struct iscsi_conn *conn,
+ u8 status_class,
+ u8 status_detail)
+{
+ struct iscsi_param *intrname = NULL;
+ struct iscsi_tiqn *tiqn;
+ struct iscsi_login_stats *ls;
+
+ tiqn = iscsit_snmp_get_tiqn(conn);
+ if (!tiqn)
+ return;
+
+ ls = &tiqn->login_stats;
+
+ spin_lock(&ls->lock);
+ if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
+ ((get_jiffies_64() - ls->last_fail_time) < 10)) {
+ /* We already have the failure info for this login */
+ spin_unlock(&ls->lock);
+ return;
+ }
+
+ if (status_class == ISCSI_STATUS_CLS_SUCCESS)
+ ls->accepts++;
+ else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
+ ls->redirects++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
+ } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+ (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
+ ls->authenticate_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
+ } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+ (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
+ ls->authorize_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
+ } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+ (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
+ ls->negotiate_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
+ } else {
+ ls->other_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
+ }
+
+ /* Save initiator name, ip address and time, if it is a failed login */
+ if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
+ if (conn->param_list)
+ intrname = iscsi_find_param_from_key(INITIATORNAME,
+ conn->param_list);
+ strcpy(ls->last_intr_fail_name,
+ (intrname ? intrname->value : "Unknown"));
+
+ ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
+ snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
+ "%s", conn->login_ip);
+ ls->last_fail_time = get_jiffies_64();
+ }
+
+ spin_unlock(&ls->lock);
+}
+
+struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
+{
+ struct iscsi_portal_group *tpg;
+
+ if (!conn || !conn->sess)
+ return NULL;
+
+ tpg = conn->sess->tpg;
+ if (!tpg)
+ return NULL;
+
+ if (!tpg->tpg_tiqn)
+ return NULL;
+
+ return tpg->tpg_tiqn;
+}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
new file mode 100644
index 00000000000..2cd49d607bd
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -0,0 +1,60 @@
+#ifndef ISCSI_TARGET_UTIL_H
+#define ISCSI_TARGET_UTIL_H
+
+#define MARKER_SIZE 8
+
+extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
+extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
+extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
+extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
+extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
+extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
+int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, u32 cmdsn);
+extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
+ u32, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
+extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
+ struct iscsi_conn_recovery **, u32);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
+extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
+extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
+extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
+extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern int iscsit_check_session_usage_count(struct iscsi_session *);
+extern void iscsit_dec_session_usage_count(struct iscsi_session *);
+extern void iscsit_inc_session_usage_count(struct iscsi_session *);
+extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
+extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
+extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
+extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
+extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
+extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
+extern void iscsit_print_session_params(struct iscsi_session *);
+extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
+extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
+extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
index 57dcbc2d711..abe8ecbcdf0 100644
--- a/drivers/target/loopback/Kconfig
+++ b/drivers/target/loopback/Kconfig
@@ -3,9 +3,3 @@ config LOOPBACK_TARGET
help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module.
-
-config LOOPBACK_TARGET_CDB_DEBUG
- bool "TCM loopback fabric module CDB debug code"
- depends on LOOPBACK_TARGET
- help
- Say Y here to enable the TCM loopback fabric module CDB debug code
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index dee2a2c909f..aa2d6799723 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -31,7 +31,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
@@ -80,7 +79,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
if (!tl_cmd) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
+ pr_err("Unable to allocate struct tcm_loop_cmd\n");
set_host_byte(sc, DID_ERROR);
return NULL;
}
@@ -118,17 +117,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
* Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
*/
if (scsi_bidi_cmnd(sc))
- T_TASK(se_cmd)->t_tasks_bidi = 1;
+ se_cmd->t_tasks_bidi = 1;
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
- if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
+ if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
set_host_byte(sc, DID_NO_CONNECT);
return NULL;
}
- transport_device_setup_cmd(se_cmd);
return se_cmd;
}
@@ -143,17 +141,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- void *mem_ptr, *mem_bidi_ptr = NULL;
- u32 sg_no_bidi = 0;
+ struct scatterlist *sgl_bidi = NULL;
+ u32 sgl_bidi_count = 0;
int ret;
/*
* Allocate the necessary tasks to complete the received CDB+data
*/
- ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
- if (ret == -1) {
+ ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
+ if (ret == -ENOMEM) {
/* Out of Resources */
return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -2) {
+ } else if (ret == -EINVAL) {
/*
* Handle case for SAM_STAT_RESERVATION_CONFLICT
*/
@@ -165,35 +163,21 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
*/
return PYX_TRANSPORT_USE_SENSE_REASON;
}
+
/*
- * Setup the struct scatterlist memory from the received
- * struct scsi_cmnd.
+ * For BIDI commands, pass in the extra READ buffer
+ * to transport_generic_map_mem_to_cmd() below..
*/
- if (scsi_sg_count(sc)) {
- se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
- mem_ptr = (void *)scsi_sglist(sc);
- /*
- * For BIDI commands, pass in the extra READ buffer
- * to transport_generic_map_mem_to_cmd() below..
- */
- if (T_TASK(se_cmd)->t_tasks_bidi) {
- struct scsi_data_buffer *sdb = scsi_in(sc);
+ if (se_cmd->t_tasks_bidi) {
+ struct scsi_data_buffer *sdb = scsi_in(sc);
- mem_bidi_ptr = (void *)sdb->table.sgl;
- sg_no_bidi = sdb->table.nents;
- }
- } else {
- /*
- * Used for DMA_NONE
- */
- mem_ptr = NULL;
+ sgl_bidi = sdb->table.sgl;
+ sgl_bidi_count = sdb->table.nents;
}
- /*
- * Map the SG memory into struct se_mem->page linked list using the same
- * physical memory at sg->page_link.
- */
- ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
- scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
+
+ /* Tell the core about our preallocated memory */
+ ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
if (ret < 0)
return PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -216,13 +200,10 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
* Release the struct se_cmd, which will make a callback to release
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
*/
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
}
-/*
- * Called from struct target_core_fabric_ops->release_cmd_to_pool()
- */
-static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd)
+static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
@@ -300,7 +281,7 @@ static int tcm_loop_queuecommand(
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
- TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
+ pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
" scsi_buf_len: %u\n", sc->device->host->host_no,
sc->device->id, sc->device->channel, sc->device->lun,
sc->cmnd[0], scsi_bufflen(sc));
@@ -350,7 +331,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
- printk(KERN_ERR "Unable to perform device reset without"
+ pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
@@ -363,13 +344,13 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
- printk(KERN_ERR "Unable to allocate memory for tl_cmd\n");
+ pr_err("Unable to allocate memory for tl_cmd\n");
return FAILED;
}
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
if (!tl_tmr) {
- printk(KERN_ERR "Unable to allocate memory for tl_tmr\n");
+ pr_err("Unable to allocate memory for tl_tmr\n");
goto release;
}
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
@@ -384,14 +365,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
/*
* Allocate the LUN_RESET TMR
*/
- se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
+ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
TMR_LUN_RESET);
- if (!se_cmd->se_tmr_req)
+ if (IS_ERR(se_cmd->se_tmr_req))
goto release;
/*
* Locate the underlying TCM struct se_lun from sc->device->lun
*/
- if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0)
+ if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
goto release;
/*
* Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
@@ -407,7 +388,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
SUCCESS : FAILED;
release:
if (se_cmd)
- transport_generic_free_cmd(se_cmd, 1, 1, 0);
+ transport_generic_free_cmd(se_cmd, 1, 0);
else
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
kfree(tl_tmr);
@@ -454,7 +435,7 @@ static int tcm_loop_driver_probe(struct device *dev)
sh = scsi_host_alloc(&tcm_loop_driver_template,
sizeof(struct tcm_loop_hba));
if (!sh) {
- printk(KERN_ERR "Unable to allocate struct scsi_host\n");
+ pr_err("Unable to allocate struct scsi_host\n");
return -ENODEV;
}
tl_hba->sh = sh;
@@ -473,7 +454,7 @@ static int tcm_loop_driver_probe(struct device *dev)
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
- printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
+ pr_err("%s: scsi_add_host failed\n", __func__);
scsi_host_put(sh);
return -ENODEV;
}
@@ -514,7 +495,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
ret = device_register(&tl_hba->dev);
if (ret) {
- printk(KERN_ERR "device_register() failed for"
+ pr_err("device_register() failed for"
" tl_hba->dev: %d\n", ret);
return -ENODEV;
}
@@ -532,24 +513,24 @@ static int tcm_loop_alloc_core_bus(void)
tcm_loop_primary = root_device_register("tcm_loop_0");
if (IS_ERR(tcm_loop_primary)) {
- printk(KERN_ERR "Unable to allocate tcm_loop_primary\n");
+ pr_err("Unable to allocate tcm_loop_primary\n");
return PTR_ERR(tcm_loop_primary);
}
ret = bus_register(&tcm_loop_lld_bus);
if (ret) {
- printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n");
+ pr_err("bus_register() failed for tcm_loop_lld_bus\n");
goto dev_unreg;
}
ret = driver_register(&tcm_loop_driverfs);
if (ret) {
- printk(KERN_ERR "driver_register() failed for"
+ pr_err("driver_register() failed for"
"tcm_loop_driverfs\n");
goto bus_unreg;
}
- printk(KERN_INFO "Initialized TCM Loop Core Bus\n");
+ pr_debug("Initialized TCM Loop Core Bus\n");
return ret;
bus_unreg:
@@ -565,7 +546,7 @@ static void tcm_loop_release_core_bus(void)
bus_unregister(&tcm_loop_lld_bus);
root_device_unregister(tcm_loop_primary);
- printk(KERN_INFO "Releasing TCM Loop Core BUS\n");
+ pr_debug("Releasing TCM Loop Core BUS\n");
}
static char *tcm_loop_get_fabric_name(void)
@@ -593,7 +574,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_fabric_proto_ident(se_tpg);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -649,7 +630,7 @@ static u32 tcm_loop_get_pr_transport_id(
return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -679,7 +660,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -713,7 +694,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -762,7 +743,7 @@ static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
if (!tl_nacl) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n");
+ pr_err("Unable to allocate struct tcm_loop_nacl\n");
return NULL;
}
@@ -784,16 +765,6 @@ static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
-static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd)
-{
- /*
- * Since TCM_loop is already passing struct scatterlist data from
- * struct scsi_cmnd, no more Linux/SCSI failure dependent state need
- * to be handled here.
- */
- return;
-}
-
static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
{
/*
@@ -882,7 +853,7 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
+ pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
sc->result = SAM_STAT_GOOD;
@@ -897,14 +868,14 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p"
+ pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
- memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer,
+ memcpy(sc->sense_buffer, se_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
sc->result = SAM_STAT_CHECK_CONDITION;
set_driver_byte(sc, DRIVER_SENSE);
@@ -972,7 +943,7 @@ static int tcm_loop_port_link(
*/
scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n");
+ pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
return 0;
}
@@ -990,7 +961,7 @@ static void tcm_loop_port_unlink(
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
se_lun->unpacked_lun);
if (!sd) {
- printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:"
+ pr_err("Unable to locate struct scsi_device for %d:%d:"
"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
@@ -1003,7 +974,7 @@ static void tcm_loop_port_unlink(
atomic_dec(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic_dec();
- printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n");
+ pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
/* End items for tcm_loop_port_cit */
@@ -1017,24 +988,27 @@ static int tcm_loop_make_nexus(
struct se_portal_group *se_tpg;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus;
+ int ret = -ENOMEM;
if (tl_tpg->tl_hba->tl_nexus) {
- printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
+ pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tl_tpg->tl_se_tpg;
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
if (!tl_nexus) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n");
+ pr_err("Unable to allocate struct tcm_loop_nexus\n");
return -ENOMEM;
}
/*
* Initialize the struct se_session pointer
*/
tl_nexus->se_sess = transport_init_session();
- if (!tl_nexus->se_sess)
+ if (IS_ERR(tl_nexus->se_sess)) {
+ ret = PTR_ERR(tl_nexus->se_sess);
goto out;
+ }
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
@@ -1051,16 +1025,16 @@ static int tcm_loop_make_nexus(
* transport_register_session()
*/
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
- tl_nexus->se_sess, (void *)tl_nexus);
+ tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_hba->tl_nexus = tl_nexus;
- printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+ pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name);
return 0;
out:
kfree(tl_nexus);
- return -ENOMEM;
+ return ret;
}
static int tcm_loop_drop_nexus(
@@ -1079,13 +1053,13 @@ static int tcm_loop_drop_nexus(
return -ENODEV;
if (atomic_read(&tpg->tl_tpg_port_count)) {
- printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with"
+ pr_err("Unable to remove TCM_Loop I_T Nexus with"
" active TPG port count: %d\n",
atomic_read(&tpg->tl_tpg_port_count));
return -EPERM;
}
- printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+ pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
@@ -1140,8 +1114,8 @@ static ssize_t tcm_loop_tpg_store_nexus(
* the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
* tcm_loop_make_nexus()
*/
- if (strlen(page) > TL_WWN_ADDR_LEN) {
- printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
+ if (strlen(page) >= TL_WWN_ADDR_LEN) {
+ pr_err("Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, TL_WWN_ADDR_LEN);
return -EINVAL;
}
@@ -1150,7 +1124,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
- printk(KERN_ERR "Passed SAS Initiator Port %s does not"
+ pr_err("Passed SAS Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@@ -1161,7 +1135,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
- printk(KERN_ERR "Passed FCP Initiator Port %s does not"
+ pr_err("Passed FCP Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@@ -1172,7 +1146,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
- printk(KERN_ERR "Passed iSCSI Initiator Port %s does not"
+ pr_err("Passed iSCSI Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@@ -1180,7 +1154,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
port_ptr = &i_port[0];
goto check_newline;
}
- printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:"
+ pr_err("Unable to locate prefix for emulated Initiator Port:"
" %s\n", i_port);
return -EINVAL;
/*
@@ -1220,15 +1194,15 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
- printk(KERN_ERR "Unable to locate \"tpgt_#\" directory"
+ pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return ERR_PTR(-EINVAL);
}
tpgt_str += 5; /* Skip ahead of "tpgt_" */
tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
- if (tpgt > TL_TPGS_PER_HBA) {
- printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
+ if (tpgt >= TL_TPGS_PER_HBA) {
+ pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
" %u\n", tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
@@ -1239,12 +1213,12 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
* Register the tl_tpg as a emulated SAS TCM Target Endpoint
*/
ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
- wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg,
+ wwn, &tl_tpg->tl_se_tpg, tl_tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return ERR_PTR(-ENOMEM);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s"
+ pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
@@ -1271,7 +1245,7 @@ void tcm_loop_drop_naa_tpg(
*/
core_tpg_deregister(se_tpg);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s"
+ pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
@@ -1292,7 +1266,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
if (!tl_hba) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n");
+ pr_err("Unable to allocate struct tcm_loop_hba\n");
return ERR_PTR(-ENOMEM);
}
/*
@@ -1311,22 +1285,21 @@ struct se_wwn *tcm_loop_make_scsi_hba(
goto check_len;
}
ptr = strstr(name, "iqn.");
- if (ptr) {
- tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
- goto check_len;
+ if (!ptr) {
+ pr_err("Unable to locate prefix for emulated Target "
+ "Port: %s\n", name);
+ ret = -EINVAL;
+ goto out;
}
-
- printk(KERN_ERR "Unable to locate prefix for emulated Target Port:"
- " %s\n", name);
- return ERR_PTR(-EINVAL);
+ tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
check_len:
- if (strlen(name) > TL_WWN_ADDR_LEN) {
- printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
+ if (strlen(name) >= TL_WWN_ADDR_LEN) {
+ pr_err("Emulated NAA %s Address: %s, exceeds"
" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
TL_WWN_ADDR_LEN);
- kfree(tl_hba);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto out;
}
snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
@@ -1341,7 +1314,7 @@ check_len:
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
- printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target"
+ pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
" %s Address: %s at Linux/SCSI Host ID: %d\n",
tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
@@ -1364,7 +1337,7 @@ void tcm_loop_drop_scsi_hba(
*/
device_unregister(&tl_hba->dev);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target"
+ pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
config_item_name(&wwn->wwn_group.cg_item), host_no);
}
@@ -1399,9 +1372,9 @@ static int tcm_loop_register_configfs(void)
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
- if (!fabric) {
- printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
- return -1;
+ if (IS_ERR(fabric)) {
+ pr_err("tcm_loop_register_configfs() failed!\n");
+ return PTR_ERR(fabric);
}
/*
* Setup the fabric API of function pointers used by target_core_mod
@@ -1433,19 +1406,11 @@ static int tcm_loop_register_configfs(void)
&tcm_loop_tpg_release_fabric_acl;
fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
/*
- * Since tcm_loop is mapping physical memory from Linux/SCSI
- * struct scatterlist arrays for each struct scsi_cmnd I/O,
- * we do not need TCM to allocate a iovec array for
- * virtual memory address mappings
- */
- fabric->tf_ops.alloc_cmd_iovecs = NULL;
- /*
* Used for setting up remaining TCM resources in process context
*/
fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
- fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd;
- fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd;
+ fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
fabric->tf_ops.close_session = &tcm_loop_close_session;
fabric->tf_ops.stop_session = &tcm_loop_stop_session;
@@ -1462,7 +1427,6 @@ static int tcm_loop_register_configfs(void)
&tcm_loop_set_default_node_attributes;
fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
- fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure;
fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
fabric->tf_ops.queue_status = &tcm_loop_queue_status;
fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
@@ -1500,7 +1464,7 @@ static int tcm_loop_register_configfs(void)
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
- printk(KERN_ERR "target_fabric_configfs_register() for"
+ pr_err("target_fabric_configfs_register() for"
" TCM_Loop failed!\n");
target_fabric_configfs_free(fabric);
return -1;
@@ -1509,7 +1473,7 @@ static int tcm_loop_register_configfs(void)
* Setup our local pointer to *fabric.
*/
tcm_loop_fabric_configfs = fabric;
- printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->"
+ pr_debug("TCM_LOOP[0] - Set fabric ->"
" tcm_loop_fabric_configfs\n");
return 0;
}
@@ -1521,7 +1485,7 @@ static void tcm_loop_deregister_configfs(void)
target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
tcm_loop_fabric_configfs = NULL;
- printk(KERN_INFO "TCM_LOOP[0] - Cleared"
+ pr_debug("TCM_LOOP[0] - Cleared"
" tcm_loop_fabric_configfs\n");
}
@@ -1534,7 +1498,7 @@ static int __init tcm_loop_fabric_init(void)
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
- printk(KERN_ERR "kmem_cache_create() for"
+ pr_debug("kmem_cache_create() for"
" tcm_loop_cmd_cache failed\n");
return -ENOMEM;
}
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 7e9f7ab4554..6b76c7a22bb 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -16,12 +16,6 @@
*/
#define TL_SCSI_MAX_CMD_LEN 32
-#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
-# define TL_CDB_DEBUG(x...) printk(KERN_INFO x)
-#else
-# define TL_CDB_DEBUG(x...)
-#endif
-
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 47abb42d9c3..98c98a3a025 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -46,6 +46,14 @@ static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explict, int offline);
+static u16 alua_lu_gps_counter;
+static u32 alua_lu_gps_count;
+
+static DEFINE_SPINLOCK(lu_gps_lock);
+static LIST_HEAD(lu_gps_list);
+
+struct t10_alua_lu_gp *default_lu_gp;
+
/*
* REPORT_TARGET_PORT_GROUPS
*
@@ -53,16 +61,18 @@ static int core_alua_set_tg_pt_secondary_state(
*/
int core_emulate_report_target_port_groups(struct se_cmd *cmd)
{
- struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
Target port group descriptor */
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ buf = transport_kmap_first_data_page(cmd);
+
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
/*
* PREF: Preferred target port bit, determine if this
@@ -124,7 +134,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
@@ -133,6 +143,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
buf[2] = ((rd_len >> 8) & 0xff);
buf[3] = (rd_len & 0xff);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -143,45 +155,53 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
*/
int core_emulate_set_target_port_groups(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
- struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
- struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
+ struct se_device *dev = cmd->se_dev;
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ struct se_port *port, *l_port = cmd->se_lun->lun_sep;
+ struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
- unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
+ unsigned char *buf;
+ unsigned char *ptr;
u32 len = 4; /* Skip over RESERVED area in header */
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!(l_port))
+ if (!l_port)
return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ buf = transport_kmap_first_data_page(cmd);
+
/*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
- if (!(l_tg_pt_gp_mem)) {
- printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ if (!l_tg_pt_gp_mem) {
+ pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+ rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
- if (!(l_tg_pt_gp)) {
+ if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+ rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (!(rc)) {
- printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
+ if (!rc) {
+ pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ goto out;
}
+ ptr = &buf[4]; /* Skip over RESERVED area in header */
+
while (len < cmd->data_length) {
alua_access_state = (ptr[0] & 0x0f);
/*
@@ -201,7 +221,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
}
rc = -1;
/*
@@ -224,11 +245,11 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* Locate the matching target port group ID from
* the global tg_pt_gp list
*/
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &T10_ALUA(su_dev)->tg_pt_gps_list,
+ &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
@@ -236,24 +257,26 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
rc = core_alua_do_port_transition(tg_pt_gp,
dev, l_port, nacl,
alua_access_state, 1);
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
break;
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* If not matching target port group ID can be located
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
- if (rc != 0)
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ if (rc != 0) {
+ rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
} else {
/*
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify
@@ -287,14 +310,19 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* be located, throw an exception with ASCQ:
* INVALID_PARAMETER_LIST
*/
- if (rc != 0)
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ if (rc != 0) {
+ rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
}
ptr += 4;
len += 4;
}
+out:
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -464,13 +492,13 @@ static int core_alua_state_check(
unsigned char *cdb,
u8 *alua_ascq)
{
- struct se_lun *lun = SE_LUN(cmd);
+ struct se_lun *lun = cmd->se_lun;
struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
- if (!(port))
+ if (!port)
return 0;
/*
* First, check for a struct se_port specific secondary ALUA target port
@@ -478,7 +506,7 @@ static int core_alua_state_check(
*/
if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
- printk(KERN_INFO "ALUA: Got secondary offline status for local"
+ pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
return 1;
@@ -520,9 +548,9 @@ static int core_alua_state_check(
*/
case ALUA_ACCESS_STATE_OFFLINE:
default:
- printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
+ pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
- return -1;
+ return -EINVAL;
}
return 0;
@@ -552,8 +580,8 @@ static int core_alua_check_transition(int state, int *primary)
*primary = 0;
break;
default:
- printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
- return -1;
+ pr_err("Unknown ALUA access state: 0x%02x\n", state);
+ return -EINVAL;
}
return 0;
@@ -610,7 +638,7 @@ int core_alua_check_nonop_delay(
* The ALUA Active/NonOptimized access state delay can be disabled
* in via configfs with a value of zero
*/
- if (!(cmd->alua_nonop_delay))
+ if (!cmd->alua_nonop_delay)
return 0;
/*
* struct se_cmd->alua_nonop_delay gets set by a target port group
@@ -639,7 +667,7 @@ static int core_alua_write_tpg_metadata(
file = filp_open(path, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
- printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
+ pr_err("filp_open(%s) for ALUA metadata failed\n",
path);
return -ENODEV;
}
@@ -653,7 +681,7 @@ static int core_alua_write_tpg_metadata(
set_fs(old_fs);
if (ret < 0) {
- printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
+ pr_err("Error writing ALUA metadata file: %s\n", path);
filp_close(file, NULL);
return -EIO;
}
@@ -750,7 +778,7 @@ static int core_alua_do_transition_tg_pt(
* se_deve->se_lun_acl pointer may be NULL for a
* entry created without explict Node+MappedLUN ACLs
*/
- if (!(lacl))
+ if (!lacl)
continue;
if (explict &&
@@ -792,7 +820,7 @@ static int core_alua_do_transition_tg_pt(
*/
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
- printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
@@ -823,8 +851,8 @@ int core_alua_do_port_transition(
return -EINVAL;
md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
- if (!(md_buf)) {
- printk("Unable to allocate buf for ALUA metadata\n");
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
return -ENOMEM;
}
@@ -839,7 +867,7 @@ int core_alua_do_port_transition(
* we only do transition on the passed *l_tp_pt_gp, and not
* on all of the matching target port groups IDs in default_lu_gp.
*/
- if (!(lu_gp->lu_gp_id)) {
+ if (!lu_gp->lu_gp_id) {
/*
* core_alua_do_transition_tg_pt() will always return
* success.
@@ -866,12 +894,12 @@ int core_alua_do_port_transition(
smp_mb__after_atomic_inc();
spin_unlock(&lu_gp->lu_gp_lock);
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &T10_ALUA(su_dev)->tg_pt_gps_list,
+ &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
/*
* If the target behavior port asymmetric access state
@@ -893,7 +921,7 @@ int core_alua_do_port_transition(
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
* success.
@@ -901,11 +929,11 @@ int core_alua_do_port_transition(
core_alua_do_transition_tg_pt(tg_pt_gp, port,
nacl, md_buf, new_state, explict);
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
@@ -913,7 +941,7 @@ int core_alua_do_port_transition(
}
spin_unlock(&lu_gp->lu_gp_lock);
- printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
+ pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
" Group IDs: %hu %s transition to primary state: %s\n",
config_item_name(&lu_gp->lu_gp_group.cg_item),
l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
@@ -942,11 +970,11 @@ static int core_alua_update_tpg_secondary_metadata(
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
- TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
- if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
+ if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
@@ -954,7 +982,7 @@ static int core_alua_update_tpg_secondary_metadata(
port->sep_tg_pt_secondary_stat);
snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
- TPG_TFO(se_tpg)->get_fabric_name(), wwn,
+ se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
port->sep_lun->unpacked_lun);
return core_alua_write_tpg_metadata(path, md_buf, len);
@@ -973,11 +1001,11 @@ static int core_alua_set_tg_pt_secondary_state(
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (!(tg_pt_gp)) {
+ if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_ERR "Unable to complete secondary state"
+ pr_err("Unable to complete secondary state"
" transition\n");
- return -1;
+ return -EINVAL;
}
trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
/*
@@ -994,7 +1022,7 @@ static int core_alua_set_tg_pt_secondary_state(
ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
- printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" to secondary access state: %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
@@ -1012,10 +1040,10 @@ static int core_alua_set_tg_pt_secondary_state(
*/
if (port->sep_tg_pt_secondary_write_md) {
md_buf = kzalloc(md_buf_len, GFP_KERNEL);
- if (!(md_buf)) {
- printk(KERN_ERR "Unable to allocate md_buf for"
+ if (!md_buf) {
+ pr_err("Unable to allocate md_buf for"
" secondary ALUA access metadata\n");
- return -1;
+ return -ENOMEM;
}
mutex_lock(&port->sep_tg_pt_md_mutex);
core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
@@ -1034,19 +1062,19 @@ core_alua_allocate_lu_gp(const char *name, int def_group)
struct t10_alua_lu_gp *lu_gp;
lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
- if (!(lu_gp)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
+ if (!lu_gp) {
+ pr_err("Unable to allocate struct t10_alua_lu_gp\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&lu_gp->lu_gp_list);
+ INIT_LIST_HEAD(&lu_gp->lu_gp_node);
INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
spin_lock_init(&lu_gp->lu_gp_lock);
atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
if (def_group) {
- lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;
+ lu_gp->lu_gp_id = alua_lu_gps_counter++;
lu_gp->lu_gp_valid_id = 1;
- se_global->alua_lu_gps_count++;
+ alua_lu_gps_count++;
}
return lu_gp;
@@ -1060,41 +1088,41 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
* The lu_gp->lu_gp_id may only be set once..
*/
if (lu_gp->lu_gp_valid_id) {
- printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
+ pr_warn("ALUA LU Group already has a valid ID,"
" ignoring request\n");
- return -1;
+ return -EINVAL;
}
- spin_lock(&se_global->lu_gps_lock);
- if (se_global->alua_lu_gps_count == 0x0000ffff) {
- printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
+ spin_lock(&lu_gps_lock);
+ if (alua_lu_gps_count == 0x0000ffff) {
+ pr_err("Maximum ALUA alua_lu_gps_count:"
" 0x0000ffff reached\n");
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
- return -1;
+ return -ENOSPC;
}
again:
lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
- se_global->alua_lu_gps_counter++;
+ alua_lu_gps_counter++;
- list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
+ list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
- if (!(lu_gp_id))
+ if (!lu_gp_id)
goto again;
- printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
+ pr_warn("ALUA Logical Unit Group ID: %hu"
" already exists, ignoring request\n",
lu_gp_id);
- spin_unlock(&se_global->lu_gps_lock);
- return -1;
+ spin_unlock(&lu_gps_lock);
+ return -EINVAL;
}
}
lu_gp->lu_gp_id = lu_gp_id_tmp;
lu_gp->lu_gp_valid_id = 1;
- list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
- se_global->alua_lu_gps_count++;
- spin_unlock(&se_global->lu_gps_lock);
+ list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
+ alua_lu_gps_count++;
+ spin_unlock(&lu_gps_lock);
return 0;
}
@@ -1105,8 +1133,8 @@ core_alua_allocate_lu_gp_mem(struct se_device *dev)
struct t10_alua_lu_gp_member *lu_gp_mem;
lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
- if (!(lu_gp_mem)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
+ if (!lu_gp_mem) {
+ pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
@@ -1130,11 +1158,11 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* no associations can be made while we are releasing
* struct t10_alua_lu_gp.
*/
- spin_lock(&se_global->lu_gps_lock);
+ spin_lock(&lu_gps_lock);
atomic_set(&lu_gp->lu_gp_shutdown, 1);
- list_del(&lu_gp->lu_gp_list);
- se_global->alua_lu_gps_count--;
- spin_unlock(&se_global->lu_gps_lock);
+ list_del(&lu_gp->lu_gp_node);
+ alua_lu_gps_count--;
+ spin_unlock(&lu_gps_lock);
/*
* Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
* in target_core_configfs.c:target_core_store_alua_lu_gp() to be
@@ -1165,9 +1193,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* we want to re-assocate a given lu_gp_mem with default_lu_gp.
*/
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
- if (lu_gp != se_global->default_lu_gp)
+ if (lu_gp != default_lu_gp)
__core_alua_attach_lu_gp_mem(lu_gp_mem,
- se_global->default_lu_gp);
+ default_lu_gp);
else
lu_gp_mem->lu_gp = NULL;
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
@@ -1182,7 +1210,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
void core_alua_free_lu_gp_mem(struct se_device *dev)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1190,7 +1218,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
return;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem))
+ if (!lu_gp_mem)
return;
while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
@@ -1198,7 +1226,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if ((lu_gp)) {
+ if (lu_gp) {
spin_lock(&lu_gp->lu_gp_lock);
if (lu_gp_mem->lu_gp_assoc) {
list_del(&lu_gp_mem->lu_gp_mem_list);
@@ -1218,27 +1246,27 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
struct t10_alua_lu_gp *lu_gp;
struct config_item *ci;
- spin_lock(&se_global->lu_gps_lock);
- list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
- if (!(lu_gp->lu_gp_valid_id))
+ spin_lock(&lu_gps_lock);
+ list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
+ if (!lu_gp->lu_gp_valid_id)
continue;
ci = &lu_gp->lu_gp_group.cg_item;
- if (!(strcmp(config_item_name(ci), name))) {
+ if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&lu_gp->lu_gp_ref_cnt);
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
return lu_gp;
}
}
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
return NULL;
}
void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
{
- spin_lock(&se_global->lu_gps_lock);
+ spin_lock(&lu_gps_lock);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
}
/*
@@ -1279,8 +1307,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp;
tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
- if (!(tg_pt_gp)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
+ if (!tg_pt_gp) {
+ pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
return NULL;
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
@@ -1304,14 +1332,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
if (def_group) {
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_id =
- T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+ su_dev->t10_alua.alua_tg_pt_gps_counter++;
tg_pt_gp->tg_pt_gp_valid_id = 1;
- T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+ su_dev->t10_alua.alua_tg_pt_gps_count++;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &T10_ALUA(su_dev)->tg_pt_gps_list);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ &su_dev->t10_alua.tg_pt_gps_list);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
}
return tg_pt_gp;
@@ -1328,42 +1356,42 @@ int core_alua_set_tg_pt_gp_id(
* The tg_pt_gp->tg_pt_gp_id may only be set once..
*/
if (tg_pt_gp->tg_pt_gp_valid_id) {
- printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
+ pr_warn("ALUA TG PT Group already has a valid ID,"
" ignoring request\n");
- return -1;
+ return -EINVAL;
}
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
- printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
+ pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
- return -1;
+ return -ENOSPC;
}
again:
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
- T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+ su_dev->t10_alua.alua_tg_pt_gps_counter++;
- list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
- if (!(tg_pt_gp_id))
+ if (!tg_pt_gp_id)
goto again;
- printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
+ pr_err("ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- return -1;
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ return -EINVAL;
}
}
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
tg_pt_gp->tg_pt_gp_valid_id = 1;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &T10_ALUA(su_dev)->tg_pt_gps_list);
- T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ &su_dev->t10_alua.tg_pt_gps_list);
+ su_dev->t10_alua.alua_tg_pt_gps_count++;
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return 0;
}
@@ -1375,8 +1403,8 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
GFP_KERNEL);
- if (!(tg_pt_gp_mem)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+ if (!tg_pt_gp_mem) {
+ pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1403,10 +1431,10 @@ void core_alua_free_tg_pt_gp(
* no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_del(&tg_pt_gp->tg_pt_gp_list);
- T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ su_dev->t10_alua.alua_tg_pt_gps_counter--;
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
@@ -1438,9 +1466,9 @@ void core_alua_free_tg_pt_gp(
* default_tg_pt_gp.
*/
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
+ if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- T10_ALUA(su_dev)->default_tg_pt_gp);
+ su_dev->t10_alua.default_tg_pt_gp);
} else
tg_pt_gp_mem->tg_pt_gp = NULL;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1455,7 +1483,7 @@ void core_alua_free_tg_pt_gp(
void core_alua_free_tg_pt_gp_mem(struct se_port *port)
{
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
@@ -1463,7 +1491,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
return;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem))
+ if (!tg_pt_gp_mem)
return;
while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
@@ -1471,7 +1499,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if ((tg_pt_gp)) {
+ if (tg_pt_gp) {
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
if (tg_pt_gp_mem->tg_pt_gp_assoc) {
list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1493,19 +1521,19 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *ci;
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
- if (!(strcmp(config_item_name(ci), name))) {
+ if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return tg_pt_gp;
}
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return NULL;
}
@@ -1515,9 +1543,9 @@ static void core_alua_put_tg_pt_gp_from_name(
{
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
}
/*
@@ -1555,7 +1583,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
{
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct config_item *tg_pt_ci;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0;
@@ -1564,12 +1592,12 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
return len;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem))
+ if (!tg_pt_gp_mem)
return len;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if ((tg_pt_gp)) {
+ if (tg_pt_gp) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
" %hu\nTG Port Primary Access State: %s\nTG Port "
@@ -1605,16 +1633,16 @@ ssize_t core_alua_store_tg_pt_gp_info(
tpg = port->sep_tpg;
lun = port->sep_lun;
- if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
- printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
- " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
+ pr_warn("SPC3_ALUA_EMULATED not enabled for"
+ " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
return -EINVAL;
}
if (count > TG_PT_GROUP_NAME_BUF) {
- printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
+ pr_err("ALUA Target Port Group alias too large!\n");
return -EINVAL;
}
memset(buf, 0, TG_PT_GROUP_NAME_BUF);
@@ -1631,31 +1659,31 @@ ssize_t core_alua_store_tg_pt_gp_info(
*/
tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
strstrip(buf));
- if (!(tg_pt_gp_new))
+ if (!tg_pt_gp_new)
return -ENODEV;
}
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem)) {
+ if (!tg_pt_gp_mem) {
if (tg_pt_gp_new)
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
- printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+ pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
return -EINVAL;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if ((tg_pt_gp)) {
+ if (tg_pt_gp) {
/*
* Clearing an existing tg_pt_gp association, and replacing
* with the default_tg_pt_gp.
*/
- if (!(tg_pt_gp_new)) {
- printk(KERN_INFO "Target_Core_ConfigFS: Moving"
+ if (!tg_pt_gp_new) {
+ pr_debug("Target_Core_ConfigFS: Moving"
" %s/tpgt_%hu/%s from ALUA Target Port Group:"
" alua/%s, ID: %hu back to"
" default_tg_pt_gp\n",
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(
&tg_pt_gp->tg_pt_gp_group.cg_item),
@@ -1663,7 +1691,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- T10_ALUA(su_dev)->default_tg_pt_gp);
+ su_dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
return count;
@@ -1679,10 +1707,10 @@ ssize_t core_alua_store_tg_pt_gp_info(
*/
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+ pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
" Target Port Group: alua/%s, ID: %hu\n", (move) ?
- "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
tg_pt_gp_new->tg_pt_gp_id);
@@ -1716,11 +1744,11 @@ ssize_t core_alua_store_access_type(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_access_type\n");
+ pr_err("Unable to extract alua_access_type\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
- printk(KERN_ERR "Illegal value for alua_access_type:"
+ pr_err("Illegal value for alua_access_type:"
" %lu\n", tmp);
return -EINVAL;
}
@@ -1754,11 +1782,11 @@ ssize_t core_alua_store_nonop_delay_msecs(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
+ pr_err("Unable to extract nonop_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
- printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
+ pr_err("Passed nonop_delay_msecs: %lu, exceeds"
" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_NONOP_DELAY_MSECS);
return -EINVAL;
@@ -1785,11 +1813,11 @@ ssize_t core_alua_store_trans_delay_msecs(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
+ pr_err("Unable to extract trans_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
- printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
+ pr_err("Passed trans_delay_msecs: %lu, exceeds"
" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_TRANS_DELAY_MSECS);
return -EINVAL;
@@ -1816,11 +1844,11 @@ ssize_t core_alua_store_preferred_bit(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract preferred ALUA value\n");
+ pr_err("Unable to extract preferred ALUA value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
+ pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_pref = (int)tmp;
@@ -1830,7 +1858,7 @@ ssize_t core_alua_store_preferred_bit(
ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
{
- if (!(lun->lun_sep))
+ if (!lun->lun_sep)
return -ENODEV;
return sprintf(page, "%d\n",
@@ -1846,22 +1874,22 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
- if (!(lun->lun_sep))
+ if (!lun->lun_sep)
return -ENODEV;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
+ pr_err("Unable to extract alua_tg_pt_offline value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
+ pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
tmp);
return -EINVAL;
}
tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem)) {
- printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
+ if (!tg_pt_gp_mem) {
+ pr_err("Unable to locate *tg_pt_gp_mem\n");
return -EINVAL;
}
@@ -1890,13 +1918,13 @@ ssize_t core_alua_store_secondary_status(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
+ pr_err("Unable to extract alua_tg_pt_status\n");
return -EINVAL;
}
if ((tmp != ALUA_STATUS_NONE) &&
(tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
(tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
- printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
+ pr_err("Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
}
@@ -1923,11 +1951,11 @@ ssize_t core_alua_store_secondary_write_metadata(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
+ pr_err("Unable to extract alua_tg_pt_write_md\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
+ pr_err("Illegal value for alua_tg_pt_write_md:"
" %lu\n", tmp);
return -EINVAL;
}
@@ -1939,7 +1967,7 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev, int force_pt)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp_member *lu_gp_mem;
/*
* If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
@@ -1947,44 +1975,44 @@ int core_setup_alua(struct se_device *dev, int force_pt)
* cause a problem because libata and some SATA RAID HBAs appear
* under Linux/SCSI, but emulate SCSI logic themselves.
*/
- if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
+ if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
alua->alua_type = SPC_ALUA_PASSTHROUGH;
alua->alua_state_check = &core_alua_state_check_nop;
- printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
- " emulation\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+ " emulation\n", dev->transport->name);
return 0;
}
/*
* If SPC-3 or above is reported by real or emulated struct se_device,
* use emulated ALUA.
*/
- if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
- printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
- " device\n", TRANSPORT(dev)->name);
+ if (dev->transport->get_device_rev(dev) >= SCSI_3) {
+ pr_debug("%s: Enabling ALUA Emulation for SPC-3"
+ " device\n", dev->transport->name);
/*
* Associate this struct se_device with the default ALUA
* LUN Group.
*/
lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
- if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
- return -1;
+ if (IS_ERR(lu_gp_mem))
+ return PTR_ERR(lu_gp_mem);
alua->alua_type = SPC3_ALUA_EMULATED;
alua->alua_state_check = &core_alua_state_check;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
__core_alua_attach_lu_gp_mem(lu_gp_mem,
- se_global->default_lu_gp);
+ default_lu_gp);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
- printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
+ pr_debug("%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n",
- TRANSPORT(dev)->name);
+ dev->transport->name);
} else {
alua->alua_type = SPC2_ALUA_DISABLED;
alua->alua_state_check = &core_alua_state_check_nop;
- printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
- " device\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Disabling ALUA Emulation for SPC-2"
+ " device\n", dev->transport->name);
}
return 0;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 7f19c8b7b84..8ae09a1bdf7 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -23,6 +23,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -64,20 +65,22 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf)
static int
target_emulate_inquiry_std(struct se_cmd *cmd)
{
- struct se_lun *lun = SE_LUN(cmd);
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
+ struct se_lun *lun = cmd->se_lun;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
/*
* Make sure we at least have 6 bytes of INQUIRY response
* payload going back for EVPD=0
*/
if (cmd->data_length < 6) {
- printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=0\n", cmd->data_length);
- return -1;
+ return -EINVAL;
}
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = dev->transport->get_device_type(dev);
if (buf[0] == TYPE_TAPE)
buf[1] = 0x80;
@@ -86,12 +89,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
- if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
+ if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
target_fill_alua_data(lun->lun_sep, buf);
if (cmd->data_length < 8) {
buf[4] = 1; /* Set additional length to 1 */
- return 0;
+ goto out;
}
buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
@@ -102,40 +105,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
*/
if (cmd->data_length < 36) {
buf[4] = 3; /* Set additional length to 3 */
- return 0;
+ goto out;
}
snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
snprintf((unsigned char *)&buf[16], 16, "%s",
- &DEV_T10_WWN(dev)->model[0]);
+ &dev->se_sub_dev->t10_wwn.model[0]);
snprintf((unsigned char *)&buf[32], 4, "%s",
- &DEV_T10_WWN(dev)->revision[0]);
+ &dev->se_sub_dev->t10_wwn.revision[0]);
buf[4] = 31; /* Set additional length to 31 */
- return 0;
-}
-
-/* supported vital product data pages */
-static int
-target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
-{
- buf[1] = 0x00;
- if (cmd->data_length < 8)
- return 0;
-
- buf[4] = 0x0;
- /*
- * Only report the INQUIRY EVPD=1 pages after a valid NAA
- * Registered Extended LUN WWN has been set via ConfigFS
- * during device creation/restart.
- */
- if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
- buf[3] = 3;
- buf[5] = 0x80;
- buf[6] = 0x83;
- buf[7] = 0x86;
- }
+out:
+ transport_kunmap_first_data_page(cmd);
return 0;
}
@@ -143,16 +124,15 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
u16 len = 0;
- buf[1] = 0x80;
if (dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len;
unit_serial_len =
- strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if (((len + 4) + unit_serial_len) > cmd->data_length) {
@@ -162,7 +142,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
len += sprintf((unsigned char *)&buf[4], "%s",
- &DEV_T10_WWN(dev)->unit_serial[0]);
+ &dev->se_sub_dev->t10_wwn.unit_serial[0]);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
@@ -176,21 +156,18 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
- struct se_lun *lun = SE_LUN(cmd);
+ struct se_device *dev = cmd->se_dev;
+ struct se_lun *lun = cmd->se_lun;
struct se_port *port = NULL;
struct se_portal_group *tpg = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- unsigned char binary, binary_new;
- unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
+ unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
u32 prod_len;
u32 unit_serial_len, off = 0;
- int i;
u16 len = 0, id_len;
- buf[1] = 0x83;
off = 4;
/*
@@ -210,11 +187,11 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
/* CODE SET == Binary */
buf[off++] = 0x1;
- /* Set ASSOICATION == addressed logical unit: 0)b */
+ /* Set ASSOCIATION == addressed logical unit: 0)b */
buf[off] = 0x00;
/* Identifier/Designator type == NAA identifier */
- buf[off++] = 0x3;
+ buf[off++] |= 0x3;
off++;
/* Identifier/Designator length */
@@ -237,16 +214,9 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
- binary = transport_asciihex_to_binaryhex(
- &DEV_T10_WWN(dev)->unit_serial[0]);
- buf[off++] |= (binary & 0xf0) >> 4;
- for (i = 0; i < 24; i += 2) {
- binary_new = transport_asciihex_to_binaryhex(
- &DEV_T10_WWN(dev)->unit_serial[i+2]);
- buf[off] = (binary & 0x0f) << 4;
- buf[off++] |= (binary_new & 0xf0) >> 4;
- binary = binary_new;
- }
+ buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
+
len = 20;
off = (len + 4);
@@ -263,7 +233,7 @@ check_t10_vend_desc:
if (dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
unit_serial_len =
- strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if ((len + (id_len + 4) +
@@ -274,7 +244,7 @@ check_t10_vend_desc:
}
id_len += sprintf((unsigned char *)&buf[off+12],
"%s:%s", prod,
- &DEV_T10_WWN(dev)->unit_serial[0]);
+ &dev->se_sub_dev->t10_wwn.unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
@@ -312,10 +282,10 @@ check_port:
goto check_tpgi;
}
buf[off] =
- (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOICATION == target port: 01b */
+ /* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Relative target port identifer */
buf[off++] |= 0x4;
@@ -335,7 +305,7 @@ check_port:
* section 7.5.1 Table 362
*/
check_tpgi:
- if (T10_ALUA(dev->se_sub_dev)->alua_type !=
+ if (dev->se_sub_dev->t10_alua.alua_type !=
SPC3_ALUA_EMULATED)
goto check_scsi_name;
@@ -349,7 +319,7 @@ check_tpgi:
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (!(tg_pt_gp)) {
+ if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
goto check_lu_gp;
}
@@ -357,10 +327,10 @@ check_tpgi:
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
buf[off] =
- (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOICATION == target port: 01b */
+ /* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Target port group identifier */
buf[off++] |= 0x5;
@@ -380,12 +350,12 @@ check_lu_gp:
goto check_scsi_name;
}
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem))
+ if (!lu_gp_mem)
goto check_scsi_name;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if (!(lu_gp)) {
+ if (!lu_gp) {
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
goto check_scsi_name;
}
@@ -409,7 +379,7 @@ check_lu_gp:
* section 7.5.1 Table 362
*/
check_scsi_name:
- scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
+ scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
scsi_name_len += 10;
/* Check for 4-byte padding */
@@ -424,10 +394,10 @@ check_scsi_name:
goto set_len;
}
buf[off] =
- (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOICATION == target port: 01b */
+ /* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == SCSI name string */
buf[off++] |= 0x8;
@@ -438,9 +408,9 @@ check_scsi_name:
* Target Port, this means "<iSCSI name>,t,0x<TPGT> in
* UTF-8 encoding.
*/
- tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
- TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
scsi_name_len += 1 /* Include NULL terminator */;
/*
* The null-terminated, null-padded (see 4.4.2) SCSI
@@ -471,13 +441,12 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
- buf[1] = 0x86;
buf[2] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
- if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
+ if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
buf[6] = 0x01;
return 0;
}
@@ -486,7 +455,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
int have_tp = 0;
/*
@@ -494,27 +463,29 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
- if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
have_tp = 1;
if (cmd->data_length < (0x10 + 4)) {
- printk(KERN_INFO "Received data_length: %u"
+ pr_debug("Received data_length: %u"
" too small for EVPD 0xb0\n",
cmd->data_length);
- return -1;
+ return -EINVAL;
}
if (have_tp && cmd->data_length < (0x3c + 4)) {
- printk(KERN_INFO "Received data_length: %u"
+ pr_debug("Received data_length: %u"
" too small for TPE=1 EVPD 0xb0\n",
cmd->data_length);
have_tp = 0;
}
buf[0] = dev->transport->get_device_type(dev);
- buf[1] = 0xb0;
buf[3] = have_tp ? 0x3c : 0x10;
+ /* Set WSNZ to 1 */
+ buf[4] = 0x01;
+
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
@@ -523,12 +494,12 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP or the initiator sent a too
@@ -540,35 +511,51 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM UNMAP LBA COUNT
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
/*
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
&buf[24]);
/*
* Set OPTIMAL UNMAP GRANULARITY
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
/*
* UNMAP GRANULARITY ALIGNMENT
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
&buf[32]);
- if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
+ if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
buf[32] |= 0x80; /* Set the UGAVALID bit */
return 0;
}
+/* Block Device Characteristics VPD page */
+static int
+target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[3] = 0x3c;
+
+ if (cmd->data_length >= 5 &&
+ dev->se_sub_dev->se_dev_attrib.is_nonrot)
+ buf[5] = 1;
+
+ return 0;
+}
+
/* Thin Provisioning VPD */
static int
target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
/*
* From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
@@ -579,7 +566,6 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* defined in table 162.
*/
buf[0] = dev->transport->get_device_type(dev);
- buf[1] = 0xb2;
/*
* Set Hardcoded length mentioned above for DP=0
@@ -602,7 +588,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
* that the device server does not support the UNMAP command.
*/
- if (DEV_ATTRIB(dev)->emulate_tpu != 0)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
buf[5] = 0x80;
/*
@@ -611,18 +597,59 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* A TPWS bit set to zero indicates that the device server does not
* support the use of the WRITE SAME (16) command to unmap LBAs.
*/
- if (DEV_ATTRIB(dev)->emulate_tpws != 0)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40;
return 0;
}
static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+
+static struct {
+ uint8_t page;
+ int (*emulate)(struct se_cmd *, unsigned char *);
+} evpd_handlers[] = {
+ { .page = 0x00, .emulate = target_emulate_evpd_00 },
+ { .page = 0x80, .emulate = target_emulate_evpd_80 },
+ { .page = 0x83, .emulate = target_emulate_evpd_83 },
+ { .page = 0x86, .emulate = target_emulate_evpd_86 },
+ { .page = 0xb0, .emulate = target_emulate_evpd_b0 },
+ { .page = 0xb1, .emulate = target_emulate_evpd_b1 },
+ { .page = 0xb2, .emulate = target_emulate_evpd_b2 },
+};
+
+/* supported vital product data pages */
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+ int p;
+
+ if (cmd->data_length < 8)
+ return 0;
+ /*
+ * Only report the INQUIRY EVPD=1 pages after a valid NAA
+ * Registered Extended LUN WWN has been set via ConfigFS
+ * during device creation/restart.
+ */
+ if (cmd->se_dev->se_sub_dev->su_dev_flags &
+ SDF_EMULATED_VPD_UNIT_SERIAL) {
+ buf[3] = ARRAY_SIZE(evpd_handlers);
+ for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers),
+ cmd->data_length - 4); ++p)
+ buf[p + 4] = evpd_handlers[p].page;
+ }
+
+ return 0;
+}
+
+static int
target_emulate_inquiry(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
- unsigned char *cdb = cmd->t_task->t_task_cdb;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
+ unsigned char *cdb = cmd->t_task_cdb;
+ int p, ret;
if (!(cdb[1] & 0x1))
return target_emulate_inquiry_std(cmd);
@@ -635,38 +662,33 @@ target_emulate_inquiry(struct se_cmd *cmd)
* payload length left for the next outgoing EVPD metadata
*/
if (cmd->data_length < 4) {
- printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
- return -1;
+ return -EINVAL;
}
+
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = dev->transport->get_device_type(dev);
- switch (cdb[2]) {
- case 0x00:
- return target_emulate_evpd_00(cmd, buf);
- case 0x80:
- return target_emulate_evpd_80(cmd, buf);
- case 0x83:
- return target_emulate_evpd_83(cmd, buf);
- case 0x86:
- return target_emulate_evpd_86(cmd, buf);
- case 0xb0:
- return target_emulate_evpd_b0(cmd, buf);
- case 0xb2:
- return target_emulate_evpd_b2(cmd, buf);
- default:
- printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
- return -1;
- }
+ for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
+ if (cdb[2] == evpd_handlers[p].page) {
+ buf[1] = cdb[2];
+ ret = evpd_handlers[p].emulate(cmd, buf);
+ transport_kunmap_first_data_page(cmd);
+ return ret;
+ }
- return 0;
+ transport_kunmap_first_data_page(cmd);
+ pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ return -EINVAL;
}
static int
target_emulate_readcapacity(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
u32 blocks;
@@ -675,30 +697,36 @@ target_emulate_readcapacity(struct se_cmd *cmd)
else
blocks = (u32)blocks_long;
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff;
- buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
- buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
- buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
- buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
+ buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
*/
- if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
put_unaligned_be32(0xFFFFFFFF, &buf[0]);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
static int
target_emulate_readcapacity_16(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = (blocks >> 56) & 0xff;
buf[1] = (blocks >> 48) & 0xff;
buf[2] = (blocks >> 40) & 0xff;
@@ -707,17 +735,19 @@ target_emulate_readcapacity_16(struct se_cmd *cmd)
buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff;
- buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
- buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
- buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
- buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
+ buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
- if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
buf[14] = 0x80;
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -737,6 +767,35 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
p[1] = 0x0a;
p[2] = 2;
/*
+ * From spc4r23, 7.4.7 Control mode page
+ *
+ * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
+ * restrictions on the algorithm used for reordering commands
+ * having the SIMPLE task attribute (see SAM-4).
+ *
+ * Table 368 -- QUEUE ALGORITHM MODIFIER field
+ * Code Description
+ * 0h Restricted reordering
+ * 1h Unrestricted reordering allowed
+ * 2h to 7h Reserved
+ * 8h to Fh Vendor specific
+ *
+ * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
+ * the device server shall order the processing sequence of commands
+ * having the SIMPLE task attribute such that data integrity is maintained
+ * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
+ * requests is halted at any time, the final value of all data observable
+ * on the medium shall be the same as if all the commands had been processed
+ * with the ORDERED task attribute).
+ *
+ * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
+ * device server may reorder the processing sequence of commands having the
+ * SIMPLE task attribute in any manner. Any data integrity exposures related to
+ * command sequence order shall be explicitly handled by the application client
+ * through the selection of appropriate ommands and task attributes.
+ */
+ p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
+ /*
* From spc4r17, section 7.4.6 Control mode Page
*
* Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
@@ -765,8 +824,8 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
- p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
- (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
+ (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
@@ -779,7 +838,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
+ p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
@@ -792,7 +851,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p)
{
p[0] = 0x08;
p[1] = 0x12;
- if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
@@ -830,9 +889,9 @@ target_modesense_dpofua(unsigned char *buf, int type)
static int
target_emulate_modesense(struct se_cmd *cmd, int ten)
{
- struct se_device *dev = SE_DEV(cmd);
- char *cdb = cmd->t_task->t_task_cdb;
- unsigned char *rbuf = cmd->t_task->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ char *cdb = cmd->t_task_cdb;
+ unsigned char *rbuf;
int type = dev->transport->get_device_type(dev);
int offset = (ten) ? 8 : 4;
int length = 0;
@@ -856,7 +915,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
length += target_modesense_control(dev, &buf[offset+length]);
break;
default:
- printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
+ pr_err("Got Unknown Mode Page: 0x%02x\n",
cdb[2] & 0x3f);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
}
@@ -867,13 +926,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
buf[0] = (offset >> 8) & 0xff;
buf[1] = offset & 0xff;
- if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[3], type);
- if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
- (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[3], type);
if ((offset + 2) > cmd->data_length)
@@ -883,19 +942,22 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
offset -= 1;
buf[0] = offset & 0xff;
- if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[2], type);
- if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
- (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[2], type);
if ((offset + 1) > cmd->data_length)
offset = cmd->data_length;
}
+
+ rbuf = transport_kmap_first_data_page(cmd);
memcpy(rbuf, buf, offset);
+ transport_kunmap_first_data_page(cmd);
return 0;
}
@@ -903,16 +965,20 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
static int
target_emulate_request_sense(struct se_cmd *cmd)
{
- unsigned char *cdb = cmd->t_task->t_task_cdb;
- unsigned char *buf = cmd->t_task->t_task_buf;
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned char *buf;
u8 ua_asc = 0, ua_ascq = 0;
+ int err = 0;
if (cdb[1] & 0x01) {
- printk(KERN_ERR "REQUEST_SENSE description emulation not"
+ pr_err("REQUEST_SENSE description emulation not"
" supported\n");
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
+
+ buf = transport_kmap_first_data_page(cmd);
+
+ if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/*
* CURRENT ERROR, UNIT ATTENTION
*/
@@ -924,7 +990,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
- return 0;
+ err = -EINVAL;
+ goto end;
}
/*
* The Additional Sense Code (ASC) from the UNIT ATTENTION
@@ -944,7 +1011,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
- return 0;
+ err = -EINVAL;
+ goto end;
}
/*
* NO ADDITIONAL SENSE INFORMATION
@@ -953,6 +1021,9 @@ target_emulate_request_sense(struct se_cmd *cmd)
buf[7] = 0x0A;
}
+end:
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -963,13 +1034,13 @@ target_emulate_request_sense(struct se_cmd *cmd)
static int
target_emulate_unmap(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
- unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf, *ptr = NULL;
+ unsigned char *cdb = &cmd->t_task_cdb[0];
sector_t lba;
unsigned int size = cmd->data_length, range;
- int ret, offset;
+ int ret = 0, offset;
unsigned short dl, bd_dl;
/* First UNMAP block descriptor starts at 8 byte offset */
@@ -977,21 +1048,24 @@ target_emulate_unmap(struct se_task *task)
size -= 8;
dl = get_unaligned_be16(&cdb[0]);
bd_dl = get_unaligned_be16(&cdb[2]);
+
+ buf = transport_kmap_first_data_page(cmd);
+
ptr = &buf[offset];
- printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+ pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
while (size) {
lba = get_unaligned_be64(&ptr[0]);
range = get_unaligned_be32(&ptr[8]);
- printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
+ pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
- printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
+ pr_err("blkdev_issue_discard() failed: %d\n",
ret);
- return -1;
+ goto err;
}
ptr += 16;
@@ -1000,7 +1074,10 @@ target_emulate_unmap(struct se_task *task)
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
- return 0;
+err:
+ transport_kunmap_first_data_page(cmd);
+
+ return ret;
}
/*
@@ -1008,23 +1085,36 @@ target_emulate_unmap(struct se_task *task)
* Note this is not used for TCM/pSCSI passthrough
*/
static int
-target_emulate_write_same(struct se_task *task)
+target_emulate_write_same(struct se_task *task, int write_same32)
{
- struct se_cmd *cmd = TASK_CMD(task);
- struct se_device *dev = SE_DEV(cmd);
- sector_t lba = cmd->t_task->t_task_lba;
- unsigned int range;
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ sector_t range;
+ sector_t lba = cmd->t_task_lba;
+ unsigned int num_blocks;
int ret;
+ /*
+ * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict
+ * range when non zero is supplied, otherwise calculate the remaining
+ * range based on ->get_blocks() - starting LBA.
+ */
+ if (write_same32)
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
+ else
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
- range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+ if (num_blocks != 0)
+ range = num_blocks;
+ else
+ range = (dev->transport->get_blocks(dev) - lba);
- printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
- (unsigned long long)lba, range);
+ pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
+ (unsigned long long)lba, (unsigned long long)range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
- printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
- return -1;
+ pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
+ return ret;
}
task->task_scsi_status = GOOD;
@@ -1035,12 +1125,12 @@ target_emulate_write_same(struct se_task *task)
int
transport_emulate_control_cdb(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
- struct se_device *dev = SE_DEV(cmd);
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
unsigned short service_action;
int ret = 0;
- switch (cmd->t_task->t_task_cdb[0]) {
+ switch (cmd->t_task_cdb[0]) {
case INQUIRY:
ret = target_emulate_inquiry(cmd);
break;
@@ -1054,13 +1144,13 @@ transport_emulate_control_cdb(struct se_task *task)
ret = target_emulate_modesense(cmd, 1);
break;
case SERVICE_ACTION_IN:
- switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
+ switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
ret = target_emulate_readcapacity_16(cmd);
break;
default:
- printk(KERN_ERR "Unsupported SA: 0x%02x\n",
- cmd->t_task->t_task_cdb[1] & 0x1f);
+ pr_err("Unsupported SA: 0x%02x\n",
+ cmd->t_task_cdb[1] & 0x1f);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
break;
@@ -1069,7 +1159,7 @@ transport_emulate_control_cdb(struct se_task *task)
break;
case UNMAP:
if (!dev->transport->do_discard) {
- printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
+ pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@@ -1077,27 +1167,27 @@ transport_emulate_control_cdb(struct se_task *task)
break;
case WRITE_SAME_16:
if (!dev->transport->do_discard) {
- printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
+ pr_err("WRITE_SAME_16 emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
- ret = target_emulate_write_same(task);
+ ret = target_emulate_write_same(task, 0);
break;
case VARIABLE_LENGTH_CMD:
service_action =
- get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
+ get_unaligned_be16(&cmd->t_task_cdb[8]);
switch (service_action) {
case WRITE_SAME_32:
if (!dev->transport->do_discard) {
- printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
+ pr_err("WRITE_SAME_32 SA emulation not"
" supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
- ret = target_emulate_write_same(task);
+ ret = target_emulate_write_same(task, 1);
break;
default:
- printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
+ pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
" 0x%02x\n", service_action);
break;
}
@@ -1105,8 +1195,7 @@ transport_emulate_control_cdb(struct se_task *task)
case SYNCHRONIZE_CACHE:
case 0x91: /* SYNCHRONIZE_CACHE_16: */
if (!dev->transport->do_sync_cache) {
- printk(KERN_ERR
- "SYNCHRONIZE_CACHE emulation not supported"
+ pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@@ -1123,8 +1212,8 @@ transport_emulate_control_cdb(struct se_task *task)
case WRITE_FILEMARKS:
break;
default:
- printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
- cmd->t_task->t_task_cdb[0], dev->transport->name);
+ pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
+ cmd->t_task_cdb[0], dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index ee6fad979b5..b2575d8568c 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -37,6 +37,7 @@
#include <linux/parser.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
+#include <linux/spinlock.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -52,6 +53,8 @@
#include "target_core_rd.h"
#include "target_core_stat.h"
+extern struct t10_alua_lu_gp *default_lu_gp;
+
static struct list_head g_tf_list;
static struct mutex g_tf_lock;
@@ -61,6 +64,13 @@ struct target_core_configfs_attribute {
ssize_t (*store)(void *, const char *, size_t);
};
+static struct config_group target_core_hbagroup;
+static struct config_group alua_group;
+static struct config_group alua_lu_gps_group;
+
+static DEFINE_SPINLOCK(se_device_lock);
+static LIST_HEAD(se_dev_list);
+
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -94,12 +104,12 @@ static struct target_fabric_configfs *target_core_get_fabric(
{
struct target_fabric_configfs *tf;
- if (!(name))
+ if (!name)
return NULL;
mutex_lock(&g_tf_lock);
list_for_each_entry(tf, &g_tf_list, tf_list) {
- if (!(strcmp(tf->tf_name, name))) {
+ if (!strcmp(tf->tf_name, name)) {
atomic_inc(&tf->tf_access_cnt);
mutex_unlock(&g_tf_lock);
return tf;
@@ -120,7 +130,7 @@ static struct config_group *target_core_register_fabric(
struct target_fabric_configfs *tf;
int ret;
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
/*
* Ensure that TCM subsystem plugins are loaded at this point for
@@ -140,7 +150,7 @@ static struct config_group *target_core_register_fabric(
* registered, but simply provids auto loading logic for modules with
* mkdir(2) system calls with known TCM fabric modules.
*/
- if (!(strncmp(name, "iscsi", 5))) {
+ if (!strncmp(name, "iscsi", 5)) {
/*
* Automatically load the LIO Target fabric module when the
* following is called:
@@ -149,11 +159,11 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("iscsi_target_mod");
if (ret < 0) {
- printk(KERN_ERR "request_module() failed for"
+ pr_err("request_module() failed for"
" iscsi_target_mod.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
- } else if (!(strncmp(name, "loopback", 8))) {
+ } else if (!strncmp(name, "loopback", 8)) {
/*
* Automatically load the tcm_loop fabric module when the
* following is called:
@@ -162,25 +172,25 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("tcm_loop");
if (ret < 0) {
- printk(KERN_ERR "request_module() failed for"
+ pr_err("request_module() failed for"
" tcm_loop.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
}
tf = target_core_get_fabric(name);
- if (!(tf)) {
- printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
+ if (!tf) {
+ pr_err("target_core_get_fabric() failed for %s\n",
name);
return ERR_PTR(-EINVAL);
}
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
" %s\n", tf->tf_name);
/*
* On a successful target_core_get_fabric() look, the returned
* struct target_fabric_configfs *tf will contain a usage reference.
*/
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+ pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
&TF_CIT_TMPL(tf)->tfc_wwn_cit);
tf->tf_group.default_groups = tf->tf_default_groups;
@@ -192,14 +202,14 @@ static struct config_group *target_core_register_fabric(
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
&TF_CIT_TMPL(tf)->tfc_discovery_cit);
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name);
/*
* Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
*/
tf->tf_ops.tf_subsys = tf->tf_subsys;
tf->tf_fabric = &tf->tf_group.cg_item;
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
" for %s\n", name);
return &tf->tf_group;
@@ -218,18 +228,18 @@ static void target_core_deregister_fabric(
struct config_item *df_item;
int i;
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
" tf list\n", config_item_name(item));
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
" %s\n", tf->tf_name);
atomic_dec(&tf->tf_access_cnt);
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
" tf->tf_fabric for %s\n", tf->tf_name);
tf->tf_fabric = NULL;
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
" %s\n", config_item_name(item));
tf_group = &tf->tf_group;
@@ -296,22 +306,18 @@ struct target_fabric_configfs *target_fabric_configfs_init(
{
struct target_fabric_configfs *tf;
- if (!(fabric_mod)) {
- printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
- return NULL;
- }
if (!(name)) {
- printk(KERN_ERR "Unable to locate passed fabric name\n");
- return NULL;
+ pr_err("Unable to locate passed fabric name\n");
+ return ERR_PTR(-EINVAL);
}
- if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
- printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
+ if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
+ pr_err("Passed name: %s exceeds TARGET_FABRIC"
"_NAME_SIZE\n", name);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
- if (!(tf))
+ if (!tf)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&tf->tf_list);
@@ -330,9 +336,9 @@ struct target_fabric_configfs *target_fabric_configfs_init(
list_add_tail(&tf->tf_list, &g_tf_list);
mutex_unlock(&g_tf_lock);
- printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
">>>>>>>>>>>>>>\n");
- printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
+ pr_debug("Initialized struct target_fabric_configfs: %p for"
" %s\n", tf, tf->tf_name);
return tf;
}
@@ -361,140 +367,132 @@ static int target_fabric_tf_ops_check(
{
struct target_core_fabric_ops *tfo = &tf->tf_ops;
- if (!(tfo->get_fabric_name)) {
- printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
- return -EINVAL;
- }
- if (!(tfo->get_fabric_proto_ident)) {
- printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
+ if (!tfo->get_fabric_name) {
+ pr_err("Missing tfo->get_fabric_name()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_wwn)) {
- printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
+ if (!tfo->get_fabric_proto_ident) {
+ pr_err("Missing tfo->get_fabric_proto_ident()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_tag)) {
- printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
+ if (!tfo->tpg_get_wwn) {
+ pr_err("Missing tfo->tpg_get_wwn()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_default_depth)) {
- printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
+ if (!tfo->tpg_get_tag) {
+ pr_err("Missing tfo->tpg_get_tag()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_pr_transport_id)) {
- printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
+ if (!tfo->tpg_get_default_depth) {
+ pr_err("Missing tfo->tpg_get_default_depth()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_pr_transport_id_len)) {
- printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
+ if (!tfo->tpg_get_pr_transport_id) {
+ pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_demo_mode)) {
- printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
+ if (!tfo->tpg_get_pr_transport_id_len) {
+ pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_demo_mode_cache)) {
- printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
+ if (!tfo->tpg_check_demo_mode) {
+ pr_err("Missing tfo->tpg_check_demo_mode()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_demo_mode_write_protect)) {
- printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
+ if (!tfo->tpg_check_demo_mode_cache) {
+ pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_prod_mode_write_protect)) {
- printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
+ if (!tfo->tpg_check_demo_mode_write_protect) {
+ pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
return -EINVAL;
}
- if (!(tfo->tpg_alloc_fabric_acl)) {
- printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
+ if (!tfo->tpg_check_prod_mode_write_protect) {
+ pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
return -EINVAL;
}
- if (!(tfo->tpg_release_fabric_acl)) {
- printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
+ if (!tfo->tpg_alloc_fabric_acl) {
+ pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_inst_index)) {
- printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
+ if (!tfo->tpg_release_fabric_acl) {
+ pr_err("Missing tfo->tpg_release_fabric_acl()\n");
return -EINVAL;
}
- if (!(tfo->release_cmd_to_pool)) {
- printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
+ if (!tfo->tpg_get_inst_index) {
+ pr_err("Missing tfo->tpg_get_inst_index()\n");
return -EINVAL;
}
- if (!(tfo->release_cmd_direct)) {
- printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
+ if (!tfo->release_cmd) {
+ pr_err("Missing tfo->release_cmd()\n");
return -EINVAL;
}
- if (!(tfo->shutdown_session)) {
- printk(KERN_ERR "Missing tfo->shutdown_session()\n");
+ if (!tfo->shutdown_session) {
+ pr_err("Missing tfo->shutdown_session()\n");
return -EINVAL;
}
- if (!(tfo->close_session)) {
- printk(KERN_ERR "Missing tfo->close_session()\n");
+ if (!tfo->close_session) {
+ pr_err("Missing tfo->close_session()\n");
return -EINVAL;
}
- if (!(tfo->stop_session)) {
- printk(KERN_ERR "Missing tfo->stop_session()\n");
+ if (!tfo->stop_session) {
+ pr_err("Missing tfo->stop_session()\n");
return -EINVAL;
}
- if (!(tfo->fall_back_to_erl0)) {
- printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
+ if (!tfo->fall_back_to_erl0) {
+ pr_err("Missing tfo->fall_back_to_erl0()\n");
return -EINVAL;
}
- if (!(tfo->sess_logged_in)) {
- printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
+ if (!tfo->sess_logged_in) {
+ pr_err("Missing tfo->sess_logged_in()\n");
return -EINVAL;
}
- if (!(tfo->sess_get_index)) {
- printk(KERN_ERR "Missing tfo->sess_get_index()\n");
+ if (!tfo->sess_get_index) {
+ pr_err("Missing tfo->sess_get_index()\n");
return -EINVAL;
}
- if (!(tfo->write_pending)) {
- printk(KERN_ERR "Missing tfo->write_pending()\n");
+ if (!tfo->write_pending) {
+ pr_err("Missing tfo->write_pending()\n");
return -EINVAL;
}
- if (!(tfo->write_pending_status)) {
- printk(KERN_ERR "Missing tfo->write_pending_status()\n");
+ if (!tfo->write_pending_status) {
+ pr_err("Missing tfo->write_pending_status()\n");
return -EINVAL;
}
- if (!(tfo->set_default_node_attributes)) {
- printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
+ if (!tfo->set_default_node_attributes) {
+ pr_err("Missing tfo->set_default_node_attributes()\n");
return -EINVAL;
}
- if (!(tfo->get_task_tag)) {
- printk(KERN_ERR "Missing tfo->get_task_tag()\n");
+ if (!tfo->get_task_tag) {
+ pr_err("Missing tfo->get_task_tag()\n");
return -EINVAL;
}
- if (!(tfo->get_cmd_state)) {
- printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
+ if (!tfo->get_cmd_state) {
+ pr_err("Missing tfo->get_cmd_state()\n");
return -EINVAL;
}
- if (!(tfo->new_cmd_failure)) {
- printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
+ if (!tfo->queue_data_in) {
+ pr_err("Missing tfo->queue_data_in()\n");
return -EINVAL;
}
- if (!(tfo->queue_data_in)) {
- printk(KERN_ERR "Missing tfo->queue_data_in()\n");
+ if (!tfo->queue_status) {
+ pr_err("Missing tfo->queue_status()\n");
return -EINVAL;
}
- if (!(tfo->queue_status)) {
- printk(KERN_ERR "Missing tfo->queue_status()\n");
+ if (!tfo->queue_tm_rsp) {
+ pr_err("Missing tfo->queue_tm_rsp()\n");
return -EINVAL;
}
- if (!(tfo->queue_tm_rsp)) {
- printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
+ if (!tfo->set_fabric_sense_len) {
+ pr_err("Missing tfo->set_fabric_sense_len()\n");
return -EINVAL;
}
- if (!(tfo->set_fabric_sense_len)) {
- printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
+ if (!tfo->get_fabric_sense_len) {
+ pr_err("Missing tfo->get_fabric_sense_len()\n");
return -EINVAL;
}
- if (!(tfo->get_fabric_sense_len)) {
- printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
- return -EINVAL;
- }
- if (!(tfo->is_state_remove)) {
- printk(KERN_ERR "Missing tfo->is_state_remove()\n");
+ if (!tfo->is_state_remove) {
+ pr_err("Missing tfo->is_state_remove()\n");
return -EINVAL;
}
/*
@@ -502,20 +500,20 @@ static int target_fabric_tf_ops_check(
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
* target_core_fabric_configfs.c WWN+TPG group context code.
*/
- if (!(tfo->fabric_make_wwn)) {
- printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
+ if (!tfo->fabric_make_wwn) {
+ pr_err("Missing tfo->fabric_make_wwn()\n");
return -EINVAL;
}
- if (!(tfo->fabric_drop_wwn)) {
- printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
+ if (!tfo->fabric_drop_wwn) {
+ pr_err("Missing tfo->fabric_drop_wwn()\n");
return -EINVAL;
}
- if (!(tfo->fabric_make_tpg)) {
- printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
+ if (!tfo->fabric_make_tpg) {
+ pr_err("Missing tfo->fabric_make_tpg()\n");
return -EINVAL;
}
- if (!(tfo->fabric_drop_tpg)) {
- printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
+ if (!tfo->fabric_drop_tpg) {
+ pr_err("Missing tfo->fabric_drop_tpg()\n");
return -EINVAL;
}
@@ -533,22 +531,15 @@ static int target_fabric_tf_ops_check(
int target_fabric_configfs_register(
struct target_fabric_configfs *tf)
{
- struct config_group *su_group;
int ret;
- if (!(tf)) {
- printk(KERN_ERR "Unable to locate target_fabric_configfs"
+ if (!tf) {
+ pr_err("Unable to locate target_fabric_configfs"
" pointer\n");
return -EINVAL;
}
- if (!(tf->tf_subsys)) {
- printk(KERN_ERR "Unable to target struct config_subsystem"
- " pointer\n");
- return -EINVAL;
- }
- su_group = &tf->tf_subsys->su_group;
- if (!(su_group)) {
- printk(KERN_ERR "Unable to locate target struct config_group"
+ if (!tf->tf_subsys) {
+ pr_err("Unable to target struct config_subsystem"
" pointer\n");
return -EINVAL;
}
@@ -556,7 +547,7 @@ int target_fabric_configfs_register(
if (ret < 0)
return ret;
- printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
">>>>>>>>>>\n");
return 0;
}
@@ -565,48 +556,39 @@ EXPORT_SYMBOL(target_fabric_configfs_register);
void target_fabric_configfs_deregister(
struct target_fabric_configfs *tf)
{
- struct config_group *su_group;
struct configfs_subsystem *su;
- if (!(tf)) {
- printk(KERN_ERR "Unable to locate passed target_fabric_"
+ if (!tf) {
+ pr_err("Unable to locate passed target_fabric_"
"configfs\n");
return;
}
su = tf->tf_subsys;
- if (!(su)) {
- printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
+ if (!su) {
+ pr_err("Unable to locate passed tf->tf_subsys"
" pointer\n");
return;
}
- su_group = &tf->tf_subsys->su_group;
- if (!(su_group)) {
- printk(KERN_ERR "Unable to locate target struct config_group"
- " pointer\n");
- return;
- }
-
- printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
">>>>>>>>>>>>\n");
mutex_lock(&g_tf_lock);
if (atomic_read(&tf->tf_access_cnt)) {
mutex_unlock(&g_tf_lock);
- printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
+ pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
tf->tf_name);
BUG();
}
list_del(&tf->tf_list);
mutex_unlock(&g_tf_lock);
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
" %s\n", tf->tf_name);
tf->tf_module = NULL;
tf->tf_subsys = NULL;
kfree(tf);
- printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
">>>>>\n");
- return;
}
EXPORT_SYMBOL(target_fabric_configfs_deregister);
@@ -627,11 +609,12 @@ static ssize_t target_core_dev_show_attr_##_name( \
\
spin_lock(&se_dev->se_dev_lock); \
dev = se_dev->se_dev_ptr; \
- if (!(dev)) { \
+ if (!dev) { \
spin_unlock(&se_dev->se_dev_lock); \
return -ENODEV; \
} \
- rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
+ rb = snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)dev->se_sub_dev->se_dev_attrib._name); \
spin_unlock(&se_dev->se_dev_lock); \
\
return rb; \
@@ -650,14 +633,14 @@ static ssize_t target_core_dev_store_attr_##_name( \
\
spin_lock(&se_dev->se_dev_lock); \
dev = se_dev->se_dev_ptr; \
- if (!(dev)) { \
+ if (!dev) { \
spin_unlock(&se_dev->se_dev_lock); \
return -ENODEV; \
} \
ret = strict_strtoul(page, 0, &val); \
if (ret < 0) { \
spin_unlock(&se_dev->se_dev_lock); \
- printk(KERN_ERR "strict_strtoul() failed with" \
+ pr_err("strict_strtoul() failed with" \
" ret: %d\n", ret); \
return -EINVAL; \
} \
@@ -715,6 +698,12 @@ SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(enforce_pr_isids);
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(is_nonrot);
+SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_rest_reord);
+SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB_RO(hw_block_size);
SE_DEV_ATTR_RO(hw_block_size);
@@ -763,6 +752,8 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_emulate_tpu.attr,
&target_core_dev_attrib_emulate_tpws.attr,
&target_core_dev_attrib_enforce_pr_isids.attr,
+ &target_core_dev_attrib_is_nonrot.attr,
+ &target_core_dev_attrib_emulate_rest_reord.attr,
&target_core_dev_attrib_hw_block_size.attr,
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
@@ -819,7 +810,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
struct se_device *dev;
dev = se_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
@@ -846,13 +837,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* VPD Unit Serial Number that OS dependent multipath can depend on.
*/
if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
- printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
+ pr_err("Underlying SCSI device firmware provided VPD"
" Unit Serial, ignoring request\n");
return -EOPNOTSUPP;
}
- if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
- printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
+ if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
+ pr_err("Emulated VPD Unit Serial exceeds"
" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
return -EOVERFLOW;
}
@@ -863,9 +854,9 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* could cause negative effects.
*/
dev = su_dev->se_dev_ptr;
- if ((dev)) {
+ if (dev) {
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "Unable to set VPD Unit Serial while"
+ pr_err("Unable to set VPD Unit Serial while"
" active %d $FABRIC_MOD exports exist\n",
atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
@@ -883,7 +874,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
"%s", strstrip(buf));
su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
- printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+ pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
" %s\n", su_dev->t10_wwn.unit_serial);
return count;
@@ -905,19 +896,19 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
ssize_t len = 0;
dev = se_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
memset(buf, 0, VPD_TMP_BUF_SIZE);
spin_lock(&t10_wwn->t10_vpd_lock);
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
- if (!(vpd->protocol_identifier_set))
+ if (!vpd->protocol_identifier_set)
continue;
transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
- if ((len + strlen(buf) > PAGE_SIZE))
+ if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
@@ -952,7 +943,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
ssize_t len = 0; \
\
dev = se_dev->se_dev_ptr; \
- if (!(dev)) \
+ if (!dev) \
return -ENODEV; \
\
spin_lock(&t10_wwn->t10_vpd_lock); \
@@ -962,19 +953,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
} \
@@ -984,7 +975,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
}
/*
- * VPD page 0x83 Assoication: Logical Unit
+ * VPD page 0x83 Association: Logical Unit
*/
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
@@ -1083,7 +1074,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
*len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return *len;
@@ -1093,7 +1084,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(
PR_REG_ISID_ID_LEN);
*len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
- TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
spin_unlock(&dev->dev_reservation_lock);
@@ -1109,13 +1100,13 @@ static ssize_t target_core_dev_pr_show_spc2_res(
spin_lock(&dev->dev_reservation_lock);
se_nacl = dev->dev_reserved_node_acl;
- if (!(se_nacl)) {
+ if (!se_nacl) {
*len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return *len;
}
*len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
- TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -1128,10 +1119,10 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(
{
ssize_t len = 0;
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- switch (T10_RES(su_dev)->res_type) {
+ switch (su_dev->t10_pr.res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
page, &len);
@@ -1165,15 +1156,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
@@ -1202,13 +1193,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
struct se_subsystem_dev *su_dev,
char *page)
{
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
- return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
+ return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
}
SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1229,15 +1220,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
@@ -1245,7 +1236,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
lun = pr_reg->pr_reg_tg_pt_lun;
- tfo = TPG_TFO(se_tpg);
+ tfo = se_tpg->se_tpg_tfo;
len += sprintf(page+len, "SPC-3 Reservation: %s"
" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
@@ -1276,16 +1267,16 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
ssize_t len = 0;
int reg_count = 0, prf_isid;
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
- spin_lock(&T10_RES(su_dev)->registration_lock);
- list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ spin_lock(&su_dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
memset(buf, 0, 384);
@@ -1299,15 +1290,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
&i_buf[0] : "", pr_reg->pr_res_key,
pr_reg->pr_res_generation);
- if ((len + strlen(buf) > PAGE_SIZE))
+ if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
reg_count++;
}
- spin_unlock(&T10_RES(su_dev)->registration_lock);
+ spin_unlock(&su_dev->t10_pr.registration_lock);
- if (!(reg_count))
+ if (!reg_count)
len += sprintf(page+len, "None\n");
return len;
@@ -1327,15 +1318,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type(
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
@@ -1358,10 +1349,10 @@ static ssize_t target_core_dev_pr_show_attr_res_type(
{
ssize_t len = 0;
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- switch (T10_RES(su_dev)->res_type) {
+ switch (su_dev->t10_pr.res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
break;
@@ -1389,14 +1380,14 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
struct se_subsystem_dev *su_dev,
char *page)
{
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
- (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
+ (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1408,10 +1399,10 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
struct se_subsystem_dev *su_dev,
char *page)
{
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1460,14 +1451,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
u8 type = 0, scope;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_INFO "Unable to process APTPL metadata while"
+ pr_debug("Unable to process APTPL metadata while"
" active fabric exports exist\n");
return -EINVAL;
}
@@ -1496,8 +1487,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
- printk(KERN_ERR "APTPL metadata initiator_node="
+ if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
+ pr_err("APTPL metadata initiator_node="
" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
PR_APTPL_MAX_IPORT_LEN);
ret = -EINVAL;
@@ -1510,8 +1501,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(isid) > PR_REG_ISID_LEN) {
- printk(KERN_ERR "APTPL metadata initiator_isid"
+ if (strlen(isid) >= PR_REG_ISID_LEN) {
+ pr_err("APTPL metadata initiator_isid"
"= exceeds PR_REG_ISID_LEN: %d\n",
PR_REG_ISID_LEN);
ret = -EINVAL;
@@ -1526,7 +1517,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
ret = strict_strtoull(arg_p, 0, &tmp_ll);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoull() failed for"
+ pr_err("strict_strtoull() failed for"
" sa_res_key=\n");
goto out;
}
@@ -1571,8 +1562,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
- printk(KERN_ERR "APTPL metadata target_node="
+ if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
+ pr_err("APTPL metadata target_node="
" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
PR_APTPL_MAX_TPORT_LEN);
ret = -EINVAL;
@@ -1596,20 +1587,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
}
- if (!(i_port) || !(t_port) || !(sa_res_key)) {
- printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+ if (!i_port || !t_port || !sa_res_key) {
+ pr_err("Illegal parameters for APTPL registration\n");
ret = -EINVAL;
goto out;
}
if (res_holder && !(type)) {
- printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
+ pr_err("Illegal PR type: 0x%02x for reservation"
" holder\n", type);
ret = -EINVAL;
goto out;
}
- ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
+ ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
@@ -1662,7 +1653,7 @@ static ssize_t target_core_show_dev_info(void *p, char *page)
int bl = 0;
ssize_t read_bytes = 0;
- if (!(se_dev->se_dev_ptr))
+ if (!se_dev->se_dev_ptr)
return -ENODEV;
transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
@@ -1688,8 +1679,8 @@ static ssize_t target_core_store_dev_control(
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
- if (!(se_dev->se_dev_su_ptr)) {
- printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
+ if (!se_dev->se_dev_su_ptr) {
+ pr_err("Unable to locate struct se_subsystem_dev>se"
"_dev_su_ptr\n");
return -EINVAL;
}
@@ -1725,7 +1716,7 @@ static ssize_t target_core_store_dev_alias(
ssize_t read_bytes;
if (count > (SE_DEV_ALIAS_LEN-1)) {
- printk(KERN_ERR "alias count: %d exceeds"
+ pr_err("alias count: %d exceeds"
" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
SE_DEV_ALIAS_LEN-1);
return -EINVAL;
@@ -1735,7 +1726,7 @@ static ssize_t target_core_store_dev_alias(
read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
"%s", page);
- printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
+ pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
se_dev->se_dev_alias);
@@ -1771,7 +1762,7 @@ static ssize_t target_core_store_dev_udev_path(
ssize_t read_bytes;
if (count > (SE_UDEV_PATH_LEN-1)) {
- printk(KERN_ERR "udev_path count: %d exceeds"
+ pr_err("udev_path count: %d exceeds"
" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
SE_UDEV_PATH_LEN-1);
return -EINVAL;
@@ -1781,7 +1772,7 @@ static ssize_t target_core_store_dev_udev_path(
read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
- printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+ pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
se_dev->se_dev_udev_path);
@@ -1809,13 +1800,13 @@ static ssize_t target_core_store_dev_enable(
char *ptr;
ptr = strstr(page, "1");
- if (!(ptr)) {
- printk(KERN_ERR "For dev_enable ops, only valid value"
+ if (!ptr) {
+ pr_err("For dev_enable ops, only valid value"
" is \"1\"\n");
return -EINVAL;
}
- if ((se_dev->se_dev_ptr)) {
- printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
+ if (se_dev->se_dev_ptr) {
+ pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
}
@@ -1830,7 +1821,7 @@ static ssize_t target_core_store_dev_enable(
return -EINVAL;
se_dev->se_dev_ptr = dev;
- printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+ pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
" %p\n", se_dev->se_dev_ptr);
return count;
@@ -1854,22 +1845,22 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
+ if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
return len;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem)) {
- printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ if (!lu_gp_mem) {
+ pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
" pointer\n");
return -EINVAL;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if ((lu_gp)) {
+ if (lu_gp) {
lu_ci = &lu_gp->lu_gp_group.cg_item;
len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
config_item_name(lu_ci), lu_gp->lu_gp_id);
@@ -1893,17 +1884,17 @@ static ssize_t target_core_store_alua_lu_gp(
int move = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
- printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+ if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
+ pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&su_dev->se_dev_group.cg_item));
return -EINVAL;
}
if (count > LU_GROUP_NAME_BUF) {
- printk(KERN_ERR "ALUA LU Group Alias too large!\n");
+ pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
}
memset(buf, 0, LU_GROUP_NAME_BUF);
@@ -1919,27 +1910,27 @@ static ssize_t target_core_store_alua_lu_gp(
* core_alua_get_lu_gp_by_name below().
*/
lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
- if (!(lu_gp_new))
+ if (!lu_gp_new)
return -ENODEV;
}
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem)) {
+ if (!lu_gp_mem) {
if (lu_gp_new)
core_alua_put_lu_gp_from_name(lu_gp_new);
- printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
" pointer\n");
return -EINVAL;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if ((lu_gp)) {
+ if (lu_gp) {
/*
* Clearing an existing lu_gp association, and replacing
* with NULL
*/
- if (!(lu_gp_new)) {
- printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
+ if (!lu_gp_new) {
+ pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
" %hu\n",
config_item_name(&hba->hba_group.cg_item),
@@ -1964,7 +1955,7 @@ static ssize_t target_core_store_alua_lu_gp(
__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
- printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+ pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
" core/alua/lu_gps/%s, ID: %hu\n",
(move) ? "Moving" : "Adding",
config_item_name(&hba->hba_group.cg_item),
@@ -2008,7 +1999,7 @@ static void target_core_dev_release(struct config_item *item)
*`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
*/
if (se_dev->se_dev_ptr) {
- printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+ pr_debug("Target_Core_ConfigFS: Calling se_free_"
"virtual_device() for se_dev_ptr: %p\n",
se_dev->se_dev_ptr);
@@ -2017,14 +2008,14 @@ static void target_core_dev_release(struct config_item *item)
/*
* Release struct se_subsystem_dev->se_dev_su_ptr..
*/
- printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+ pr_debug("Target_Core_ConfigFS: Calling t->free_"
"device() for se_dev_su_ptr: %p\n",
se_dev->se_dev_su_ptr);
t->free_device(se_dev->se_dev_su_ptr);
}
- printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+ pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
"_dev_t: %p\n", se_dev);
kfree(se_dev);
}
@@ -2039,10 +2030,10 @@ static ssize_t target_core_dev_show(struct config_item *item,
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
- if (!(tc_attr->show))
+ if (!tc_attr->show)
return -EINVAL;
- return tc_attr->show((void *)se_dev, page);
+ return tc_attr->show(se_dev, page);
}
static ssize_t target_core_dev_store(struct config_item *item,
@@ -2055,10 +2046,10 @@ static ssize_t target_core_dev_store(struct config_item *item,
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
- if (!(tc_attr->store))
+ if (!tc_attr->store)
return -EINVAL;
- return tc_attr->store((void *)se_dev, page, count);
+ return tc_attr->store(se_dev, page, count);
}
static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2098,7 +2089,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
struct t10_alua_lu_gp *lu_gp,
char *page)
{
- if (!(lu_gp->lu_gp_valid_id))
+ if (!lu_gp->lu_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
@@ -2115,12 +2106,12 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
ret = strict_strtoul(page, 0, &lu_gp_id);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoul() returned %d for"
+ pr_err("strict_strtoul() returned %d for"
" lu_gp_id\n", ret);
return -EINVAL;
}
if (lu_gp_id > 0x0000ffff) {
- printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
+ pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", lu_gp_id);
return -EINVAL;
}
@@ -2129,7 +2120,7 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
if (ret < 0)
return -EINVAL;
- printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
+ pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
" Group: core/alua/lu_gps/%s to ID: %hu\n",
config_item_name(&alua_lu_gp_cg->cg_item),
lu_gp->lu_gp_id);
@@ -2167,7 +2158,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
- printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
@@ -2231,7 +2222,7 @@ static struct config_group *target_core_alua_create_lu_gp(
config_group_init_type_name(alua_lu_gp_cg, name,
&target_core_alua_lu_gp_cit);
- printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+ pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
" Group: core/alua/lu_gps/%s\n",
config_item_name(alua_lu_gp_ci));
@@ -2246,7 +2237,7 @@ static void target_core_alua_drop_lu_gp(
struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
struct t10_alua_lu_gp, lu_gp_group);
- printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+ pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
" Group: core/alua/lu_gps/%s, ID: %hu\n",
config_item_name(item), lu_gp->lu_gp_id);
/*
@@ -2305,22 +2296,22 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
unsigned long tmp;
int new_state, ret;
- if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
- printk(KERN_ERR "Unable to do implict ALUA on non valid"
+ if (!tg_pt_gp->tg_pt_gp_valid_id) {
+ pr_err("Unable to do implict ALUA on non valid"
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk("Unable to extract new ALUA access state from"
+ pr_err("Unable to extract new ALUA access state from"
" %s\n", page);
return -EINVAL;
}
new_state = (int)tmp;
if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
- printk(KERN_ERR "Unable to process implict configfs ALUA"
+ pr_err("Unable to process implict configfs ALUA"
" transition while TPGS_IMPLICT_ALUA is diabled\n");
return -EINVAL;
}
@@ -2351,8 +2342,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
unsigned long tmp;
int new_status, ret;
- if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
- printk(KERN_ERR "Unable to do set ALUA access status on non"
+ if (!tg_pt_gp->tg_pt_gp_valid_id) {
+ pr_err("Unable to do set ALUA access status on non"
" valid tg_pt_gp ID: %hu\n",
tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
@@ -2360,7 +2351,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract new ALUA access status"
+ pr_err("Unable to extract new ALUA access status"
" from %s\n", page);
return -EINVAL;
}
@@ -2369,7 +2360,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
if ((new_status != ALUA_STATUS_NONE) &&
(new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
(new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
- printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
+ pr_err("Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
}
@@ -2420,12 +2411,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_write_metadata\n");
+ pr_err("Unable to extract alua_write_metadata\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for alua_write_metadata:"
+ pr_err("Illegal value for alua_write_metadata:"
" %lu\n", tmp);
return -EINVAL;
}
@@ -2507,7 +2498,7 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
@@ -2524,12 +2515,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
ret = strict_strtoul(page, 0, &tg_pt_gp_id);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoul() returned %d for"
+ pr_err("strict_strtoul() returned %d for"
" tg_pt_gp_id\n", ret);
return -EINVAL;
}
if (tg_pt_gp_id > 0x0000ffff) {
- printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
+ pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", tg_pt_gp_id);
return -EINVAL;
}
@@ -2538,7 +2529,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
if (ret < 0)
return -EINVAL;
- printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
+ pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
"core/alua/tg_pt_gps/%s to ID: %hu\n",
config_item_name(&alua_tg_pt_gp_cg->cg_item),
tg_pt_gp->tg_pt_gp_id);
@@ -2572,14 +2563,14 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
lun = port->sep_lun;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
- "/%s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
- printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
@@ -2645,7 +2636,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
struct config_item *alua_tg_pt_gp_ci = NULL;
tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
- if (!(tg_pt_gp))
+ if (!tg_pt_gp)
return NULL;
alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
@@ -2654,7 +2645,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
config_group_init_type_name(alua_tg_pt_gp_cg, name,
&target_core_alua_tg_pt_gp_cit);
- printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
+ pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
" Group: alua/tg_pt_gps/%s\n",
config_item_name(alua_tg_pt_gp_ci));
@@ -2668,7 +2659,7 @@ static void target_core_alua_drop_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
struct t10_alua_tg_pt_gp, tg_pt_gp_group);
- printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
+ pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
" Group: alua/tg_pt_gps/%s, ID: %hu\n",
config_item_name(item), tg_pt_gp->tg_pt_gp_id);
/*
@@ -2759,21 +2750,21 @@ static struct config_group *target_core_make_subdev(
se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
if (!se_dev) {
- printk(KERN_ERR "Unable to allocate memory for"
+ pr_err("Unable to allocate memory for"
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_reservation.registration_lock);
- spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_pr.registration_lock);
+ spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_wwn.t10_sub_dev = se_dev;
se_dev->t10_alua.t10_sub_dev = se_dev;
se_dev->se_dev_attrib.da_sub_dev = se_dev;
@@ -2783,7 +2774,7 @@ static struct config_group *target_core_make_subdev(
dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
GFP_KERNEL);
- if (!(dev_cg->default_groups))
+ if (!dev_cg->default_groups)
goto out;
/*
* Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
@@ -2794,14 +2785,14 @@ static struct config_group *target_core_make_subdev(
* configfs tree for device object's struct config_group.
*/
se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
- if (!(se_dev->se_dev_su_ptr)) {
- printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ if (!se_dev->se_dev_su_ptr) {
+ pr_err("Unable to locate subsystem dependent pointer"
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_global->g_device_lock);
- list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
- spin_unlock(&se_global->g_device_lock);
+ spin_lock(&se_device_lock);
+ list_add_tail(&se_dev->se_dev_node, &se_dev_list);
+ spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@@ -2826,14 +2817,14 @@ static struct config_group *target_core_make_subdev(
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
- if (!(tg_pt_gp))
+ if (!tg_pt_gp)
goto out;
- tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(tg_pt_gp_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
+ if (!tg_pt_gp_cg->default_groups) {
+ pr_err("Unable to allocate tg_pt_gp_cg->"
"default_groups\n");
goto out;
}
@@ -2842,28 +2833,28 @@ static struct config_group *target_core_make_subdev(
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
tg_pt_gp_cg->default_groups[1] = NULL;
- T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+ se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
- dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+ dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
GFP_KERNEL);
if (!dev_stat_grp->default_groups) {
- printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n");
+ pr_err("Unable to allocate dev_stat_grp->default_groups\n");
goto out;
}
target_stat_setup_dev_default_groups(se_dev);
- printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+ pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
mutex_unlock(&hba->hba_access_mutex);
return &se_dev->se_dev_group;
out:
- if (T10_ALUA(se_dev)->default_tg_pt_gp) {
- core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
- T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+ if (se_dev->t10_alua.default_tg_pt_gp) {
+ core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
+ se_dev->t10_alua.default_tg_pt_gp = NULL;
}
if (dev_stat_grp)
kfree(dev_stat_grp->default_groups);
@@ -2896,11 +2887,11 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_global->g_device_lock);
- list_del(&se_dev->g_se_dev_list);
- spin_unlock(&se_global->g_device_lock);
+ spin_lock(&se_device_lock);
+ list_del(&se_dev->se_dev_node);
+ spin_unlock(&se_device_lock);
- dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+ dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
dev_stat_grp->default_groups[i] = NULL;
@@ -2908,7 +2899,7 @@ static void target_core_drop_subdev(
}
kfree(dev_stat_grp->default_groups);
- tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2919,7 +2910,7 @@ static void target_core_drop_subdev(
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
- T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+ se_dev->t10_alua.default_tg_pt_gp = NULL;
dev_cg = &se_dev->se_dev_group;
for (i = 0; dev_cg->default_groups[i]; i++) {
@@ -2988,13 +2979,13 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
ret = strict_strtoul(page, 0, &mode_flag);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
+ pr_err("Unable to extract hba mode flag: %d\n", ret);
return -EINVAL;
}
spin_lock(&hba->device_lock);
- if (!(list_empty(&hba->hba_dev_list))) {
- printk(KERN_ERR "Unable to set hba_mode with active devices\n");
+ if (!list_empty(&hba->hba_dev_list)) {
+ pr_err("Unable to set hba_mode with active devices\n");
spin_unlock(&hba->device_lock);
return -EINVAL;
}
@@ -3052,8 +3043,8 @@ static struct config_group *target_core_call_addhbatotarget(
int ret;
memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
- if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
- printk(KERN_ERR "Passed *name strlen(): %d exceeds"
+ if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
+ pr_err("Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
TARGET_CORE_NAME_MAX_LEN);
return ERR_PTR(-ENAMETOOLONG);
@@ -3061,8 +3052,8 @@ static struct config_group *target_core_call_addhbatotarget(
snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
str = strstr(buf, "_");
- if (!(str)) {
- printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+ if (!str) {
+ pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
return ERR_PTR(-EINVAL);
}
se_plugin_str = buf;
@@ -3071,7 +3062,7 @@ static struct config_group *target_core_call_addhbatotarget(
* Namely rd_direct and rd_mcp..
*/
str2 = strstr(str+1, "_");
- if ((str2)) {
+ if (str2) {
*str2 = '\0'; /* Terminate for *se_plugin_str */
str2++; /* Skip to start of plugin dependent ID */
str = str2;
@@ -3082,7 +3073,7 @@ static struct config_group *target_core_call_addhbatotarget(
ret = strict_strtoul(str, 0, &plugin_dep_id);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoul() returned %d for"
+ pr_err("strict_strtoul() returned %d for"
" plugin_dep_id\n", ret);
return ERR_PTR(-EINVAL);
}
@@ -3135,7 +3126,7 @@ static int __init target_core_init_configfs(void)
struct t10_alua_lu_gp *lu_gp;
int ret;
- printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
+ pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
" Engine: %s on %s/%s on "UTS_RELEASE"\n",
TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
@@ -3145,10 +3136,9 @@ static int __init target_core_init_configfs(void)
INIT_LIST_HEAD(&g_tf_list);
mutex_init(&g_tf_lock);
- init_scsi_index_table();
- ret = init_se_global();
+ ret = init_se_kmem_caches();
if (ret < 0)
- return -1;
+ return ret;
/*
* Create $CONFIGFS/target/core default group for HBA <-> Storage Object
* and ALUA Logical Unit Group and Target Port Group infrastructure.
@@ -3156,44 +3146,44 @@ static int __init target_core_init_configfs(void)
target_cg = &subsys->su_group;
target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(target_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
+ if (!target_cg->default_groups) {
+ pr_err("Unable to allocate target_cg->default_groups\n");
goto out_global;
}
- config_group_init_type_name(&se_global->target_core_hbagroup,
+ config_group_init_type_name(&target_core_hbagroup,
"core", &target_core_cit);
- target_cg->default_groups[0] = &se_global->target_core_hbagroup;
+ target_cg->default_groups[0] = &target_core_hbagroup;
target_cg->default_groups[1] = NULL;
/*
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
- hba_cg = &se_global->target_core_hbagroup;
+ hba_cg = &target_core_hbagroup;
hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(hba_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
+ if (!hba_cg->default_groups) {
+ pr_err("Unable to allocate hba_cg->default_groups\n");
goto out_global;
}
- config_group_init_type_name(&se_global->alua_group,
+ config_group_init_type_name(&alua_group,
"alua", &target_core_alua_cit);
- hba_cg->default_groups[0] = &se_global->alua_group;
+ hba_cg->default_groups[0] = &alua_group;
hba_cg->default_groups[1] = NULL;
/*
* Add ALUA Logical Unit Group and Target Port Group ConfigFS
* groups under /sys/kernel/config/target/core/alua/
*/
- alua_cg = &se_global->alua_group;
+ alua_cg = &alua_group;
alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(alua_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
+ if (!alua_cg->default_groups) {
+ pr_err("Unable to allocate alua_cg->default_groups\n");
goto out_global;
}
- config_group_init_type_name(&se_global->alua_lu_gps_group,
+ config_group_init_type_name(&alua_lu_gps_group,
"lu_gps", &target_core_alua_lu_gps_cit);
- alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
+ alua_cg->default_groups[0] = &alua_lu_gps_group;
alua_cg->default_groups[1] = NULL;
/*
* Add core/alua/lu_gps/default_lu_gp
@@ -3202,11 +3192,11 @@ static int __init target_core_init_configfs(void)
if (IS_ERR(lu_gp))
goto out_global;
- lu_gp_cg = &se_global->alua_lu_gps_group;
+ lu_gp_cg = &alua_lu_gps_group;
lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(lu_gp_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
+ if (!lu_gp_cg->default_groups) {
+ pr_err("Unable to allocate lu_gp_cg->default_groups\n");
goto out_global;
}
@@ -3214,17 +3204,17 @@ static int __init target_core_init_configfs(void)
&target_core_alua_lu_gp_cit);
lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
lu_gp_cg->default_groups[1] = NULL;
- se_global->default_lu_gp = lu_gp;
+ default_lu_gp = lu_gp;
/*
* Register the target_core_mod subsystem with configfs.
*/
ret = configfs_register_subsystem(subsys);
if (ret < 0) {
- printk(KERN_ERR "Error %d while registering subsystem %s\n",
+ pr_err("Error %d while registering subsystem %s\n",
ret, subsys->su_group.cg_item.ci_namebuf);
goto out_global;
}
- printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
+ pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
" Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
/*
@@ -3244,9 +3234,9 @@ out:
core_dev_release_virtual_lun0();
rd_module_exit();
out_global:
- if (se_global->default_lu_gp) {
- core_alua_free_lu_gp(se_global->default_lu_gp);
- se_global->default_lu_gp = NULL;
+ if (default_lu_gp) {
+ core_alua_free_lu_gp(default_lu_gp);
+ default_lu_gp = NULL;
}
if (lu_gp_cg)
kfree(lu_gp_cg->default_groups);
@@ -3255,8 +3245,8 @@ out_global:
if (hba_cg)
kfree(hba_cg->default_groups);
kfree(target_cg->default_groups);
- release_se_global();
- return -1;
+ release_se_kmem_caches();
+ return ret;
}
static void __exit target_core_exit_configfs(void)
@@ -3266,10 +3256,9 @@ static void __exit target_core_exit_configfs(void)
struct config_item *item;
int i;
- se_global->in_shutdown = 1;
subsys = target_core_subsystem[0];
- lu_gp_cg = &se_global->alua_lu_gps_group;
+ lu_gp_cg = &alua_lu_gps_group;
for (i = 0; lu_gp_cg->default_groups[i]; i++) {
item = &lu_gp_cg->default_groups[i]->cg_item;
lu_gp_cg->default_groups[i] = NULL;
@@ -3278,7 +3267,7 @@ static void __exit target_core_exit_configfs(void)
kfree(lu_gp_cg->default_groups);
lu_gp_cg->default_groups = NULL;
- alua_cg = &se_global->alua_group;
+ alua_cg = &alua_group;
for (i = 0; alua_cg->default_groups[i]; i++) {
item = &alua_cg->default_groups[i]->cg_item;
alua_cg->default_groups[i] = NULL;
@@ -3287,7 +3276,7 @@ static void __exit target_core_exit_configfs(void)
kfree(alua_cg->default_groups);
alua_cg->default_groups = NULL;
- hba_cg = &se_global->target_core_hbagroup;
+ hba_cg = &target_core_hbagroup;
for (i = 0; hba_cg->default_groups[i]; i++) {
item = &hba_cg->default_groups[i]->cg_item;
hba_cg->default_groups[i] = NULL;
@@ -3302,17 +3291,15 @@ static void __exit target_core_exit_configfs(void)
configfs_unregister_subsystem(subsys);
kfree(subsys->su_group.default_groups);
- core_alua_free_lu_gp(se_global->default_lu_gp);
- se_global->default_lu_gp = NULL;
+ core_alua_free_lu_gp(default_lu_gp);
+ default_lu_gp = NULL;
- printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
+ pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
" Infrastructure\n");
core_dev_release_virtual_lun0();
rd_module_exit();
- release_se_global();
-
- return;
+ release_se_kmem_caches();
}
MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8407f9ca2b3..b38b6c993e6 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1,7 +1,7 @@
/*******************************************************************************
* Filename: target_core_device.c (based on iscsi_target_device.c)
*
- * This file contains the iSCSI Virtual Device and Disk Transport
+ * This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
@@ -54,176 +54,183 @@
static void se_dev_start(struct se_device *dev);
static void se_dev_stop(struct se_device *dev);
-int transport_get_lun_for_cmd(
- struct se_cmd *se_cmd,
- unsigned char *cdb,
- u32 unpacked_lun)
+static struct se_hba *lun0_hba;
+static struct se_subsystem_dev *lun0_su_dev;
+/* not static, needed by tpg.c */
+struct se_device *g_lun0_dev;
+
+int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
- struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
- struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
+ struct se_device *dev;
unsigned long flags;
- int read_only = 0;
- spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
- deve = se_cmd->se_deve =
- &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- if (se_cmd) {
- deve->total_cmds++;
- deve->total_bytes += se_cmd->data_length;
-
- if (se_cmd->data_direction == DMA_TO_DEVICE) {
- if (deve->lun_flags &
- TRANSPORT_LUNFLAGS_READ_ONLY) {
- read_only = 1;
- goto out;
- }
- deve->write_bytes += se_cmd->data_length;
- } else if (se_cmd->data_direction ==
- DMA_FROM_DEVICE) {
- deve->read_bytes += se_cmd->data_length;
- }
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
+ se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+ if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ struct se_dev_entry *deve = se_cmd->se_deve;
+
+ deve->total_cmds++;
+ deve->total_bytes += se_cmd->data_length;
+
+ if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+ (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ " Access for 0x%08x\n",
+ se_cmd->se_tfo->get_fabric_name(),
+ unpacked_lun);
+ spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+ return -EACCES;
}
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ deve->write_bytes += se_cmd->data_length;
+ else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+ deve->read_bytes += se_cmd->data_length;
+
deve->deve_cmds++;
- se_lun = se_cmd->se_lun = deve->se_lun;
+ se_lun = deve->se_lun;
+ se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+ se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
-out:
- spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
if (!se_lun) {
- if (read_only) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ /*
+ * Use the se_portal_group->tpg_virt_lun0 to allow for
+ * REPORT_LUNS, et al to be returned when no active
+ * MappedLUN=0 exists for this Initiator Port.
+ */
+ if (unpacked_lun != 0) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
- CMD_TFO(se_cmd)->get_fabric_name(),
+ se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- return -1;
- } else {
- /*
- * Use the se_portal_group->tpg_virt_lun0 to allow for
- * REPORT_LUNS, et al to be returned when no active
- * MappedLUN=0 exists for this Initiator Port.
- */
- if (unpacked_lun != 0) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08x\n",
- CMD_TFO(se_cmd)->get_fabric_name(),
- unpacked_lun);
- return -1;
- }
- /*
- * Force WRITE PROTECT for virtual LUN 0
- */
- if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
- (se_cmd->data_direction != DMA_NONE)) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
- }
-#if 0
- printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
- CMD_TFO(se_cmd)->get_fabric_name());
-#endif
- se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
- se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
- se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ return -ENODEV;
}
+ /*
+ * Force WRITE PROTECT for virtual LUN 0
+ */
+ if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+ (se_cmd->data_direction != DMA_NONE)) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -EACCES;
+ }
+
+ se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->orig_fe_lun = 0;
+ se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
* Determine if the struct se_lun is online.
+ * FIXME: Check for LUN_RESET + UNIT Attention
*/
-/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
+ return -ENODEV;
}
- {
- struct se_device *dev = se_lun->lun_se_dev;
- spin_lock_irq(&dev->stats_lock);
+ /* Directly associate cmd with se_dev */
+ se_cmd->se_dev = se_lun->lun_se_dev;
+
+ /* TODO: get rid of this and use atomics for stats */
+ dev = se_lun->lun_se_dev;
+ spin_lock_irqsave(&dev->stats_lock, flags);
dev->num_cmds++;
if (se_cmd->data_direction == DMA_TO_DEVICE)
dev->write_bytes += se_cmd->data_length;
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
dev->read_bytes += se_cmd->data_length;
- spin_unlock_irq(&dev->stats_lock);
- }
+ spin_unlock_irqrestore(&dev->stats_lock, flags);
/*
* Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
* for tracking state of struct se_cmds during LUN shutdown events.
*/
spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
- list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
- atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
-#if 0
- printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
- CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
-#endif
+ list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
+ atomic_set(&se_cmd->transport_lun_active, 1);
spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
return 0;
}
-EXPORT_SYMBOL(transport_get_lun_for_cmd);
+EXPORT_SYMBOL(transport_lookup_cmd_lun);
-int transport_get_lun_for_tmr(
- struct se_cmd *se_cmd,
- u32 unpacked_lun)
+int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
- struct se_device *dev = NULL;
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
- struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+ unsigned long flags;
+
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
+ se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+ deve = se_cmd->se_deve;
- spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
- deve = se_cmd->se_deve =
- &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
- dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+ se_tmr->tmr_lun = deve->se_lun;
+ se_cmd->se_lun = deve->se_lun;
+ se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
-/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
+ se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
- spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
if (!se_lun) {
- printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+ pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
- CMD_TFO(se_cmd)->get_fabric_name(),
+ se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
+ return -ENODEV;
}
/*
* Determine if the struct se_lun is online.
+ * FIXME: Check for LUN_RESET + UNIT Attention
*/
-/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
+ return -ENODEV;
}
- spin_lock(&dev->se_tmr_lock);
- list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
- spin_unlock(&dev->se_tmr_lock);
+ /* Directly associate cmd with se_dev */
+ se_cmd->se_dev = se_lun->lun_se_dev;
+ se_tmr->tmr_dev = se_lun->lun_se_dev;
+
+ spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
+ list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
+ spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
return 0;
}
-EXPORT_SYMBOL(transport_get_lun_for_tmr);
+EXPORT_SYMBOL(transport_lookup_tmr_lun);
/*
* This function is called from core_scsi3_emulate_pro_register_and_move()
@@ -248,17 +255,17 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
continue;
lun = deve->se_lun;
- if (!(lun)) {
- printk(KERN_ERR "%s device entries device pointer is"
+ if (!lun) {
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
port = lun->lun_sep;
- if (!(port)) {
- printk(KERN_ERR "%s device entries device pointer is"
+ if (!port) {
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
if (port->sep_rtpi != rtpi)
@@ -294,9 +301,9 @@ int core_free_device_list_for_node(
continue;
if (!deve->se_lun) {
- printk(KERN_ERR "%s device entries device pointer is"
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
lun = deve->se_lun;
@@ -322,8 +329,6 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
deve->deve_cmds--;
spin_unlock_irq(&se_nacl->device_list_lock);
-
- return;
}
void core_update_device_list_access(
@@ -343,8 +348,6 @@ void core_update_device_list_access(
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
spin_unlock_irq(&nacl->device_list_lock);
-
- return;
}
/* core_update_device_list_for_node():
@@ -369,7 +372,7 @@ int core_update_device_list_for_node(
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*/
- if (!(enable)) {
+ if (!enable) {
/*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly concerted to MappedLUNs ->
@@ -392,18 +395,18 @@ int core_update_device_list_for_node(
*/
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
if (deve->se_lun_acl != NULL) {
- printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
+ pr_err("struct se_dev_entry->se_lun_acl"
" already set for demo mode -> explict"
" LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
- return -1;
+ return -EINVAL;
}
if (deve->se_lun != lun) {
- printk(KERN_ERR "struct se_dev_entry->se_lun does"
+ pr_err("struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
" -> explict LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
- return -1;
+ return -EINVAL;
}
deve->se_lun_acl = lun_acl;
trans = 1;
@@ -491,8 +494,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
spin_lock_bh(&tpg->acl_node_lock);
}
spin_unlock_bh(&tpg->acl_node_lock);
-
- return;
}
static struct se_port *core_alloc_port(struct se_device *dev)
@@ -500,9 +501,9 @@ static struct se_port *core_alloc_port(struct se_device *dev)
struct se_port *port, *port_tmp;
port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
- if (!(port)) {
- printk(KERN_ERR "Unable to allocate struct se_port\n");
- return NULL;
+ if (!port) {
+ pr_err("Unable to allocate struct se_port\n");
+ return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&port->sep_alua_list);
INIT_LIST_HEAD(&port->sep_list);
@@ -512,10 +513,10 @@ static struct se_port *core_alloc_port(struct se_device *dev)
spin_lock(&dev->se_port_lock);
if (dev->dev_port_count == 0x0000ffff) {
- printk(KERN_WARNING "Reached dev->dev_port_count =="
+ pr_warn("Reached dev->dev_port_count =="
" 0x0000ffff\n");
spin_unlock(&dev->se_port_lock);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
again:
/*
@@ -531,7 +532,7 @@ again:
* 3h to FFFFh Relative port 3 through 65 535
*/
port->sep_rtpi = dev->dev_rpti_counter++;
- if (!(port->sep_rtpi))
+ if (!port->sep_rtpi)
goto again;
list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
@@ -553,7 +554,7 @@ static void core_export_port(
struct se_port *port,
struct se_lun *lun)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
spin_lock(&dev->se_port_lock);
@@ -566,20 +567,20 @@ static void core_export_port(
list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
- if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
+ if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
- printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
+ pr_err("Unable to allocate t10_alua_tg_pt"
"_gp_member_t\n");
return;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- T10_ALUA(su_dev)->default_tg_pt_gp);
+ su_dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
+ pr_debug("%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
- TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
+ dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
}
dev->dev_port_count++;
@@ -606,8 +607,6 @@ static void core_release_port(struct se_device *dev, struct se_port *port)
list_del(&port->sep_list);
dev->dev_port_count--;
kfree(port);
-
- return;
}
int core_dev_export(
@@ -618,8 +617,8 @@ int core_dev_export(
struct se_port *port;
port = core_alloc_port(dev);
- if (!(port))
- return -1;
+ if (IS_ERR(port))
+ return PTR_ERR(port);
lun->lun_se_dev = dev;
se_dev_start(dev);
@@ -656,33 +655,35 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
struct se_lun *se_lun;
- struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
struct se_task *se_task;
- unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
+ unsigned char *buf;
u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
- list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
+ list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
break;
- if (!(se_task)) {
- printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
+ if (!se_task) {
+ pr_err("Unable to locate struct se_task for struct se_cmd\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
+ buf = transport_kmap_first_data_page(se_cmd);
+
/*
* If no struct se_session pointer is present, this struct se_cmd is
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!(se_sess)) {
+ if (!se_sess) {
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
goto done;
}
- spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &SE_NODE_ACL(se_sess)->device_list[i];
+ deve = &se_sess->se_node_acl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
se_lun = deve->se_lun;
@@ -699,12 +700,13 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
offset += 8;
cdb_offset += 8;
}
- spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
/*
* See SPC3 r07, page 159.
*/
done:
+ transport_kunmap_first_data_page(se_cmd);
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);
@@ -743,26 +745,20 @@ void se_release_device_for_hba(struct se_device *dev)
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
- kfree(dev->dev_status_queue_obj);
- kfree(dev->dev_queue_obj);
kfree(dev);
-
- return;
}
void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
- spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+ spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
- &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
+ &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
- spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
-
- return;
+ spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
}
/* se_free_virtual_device():
@@ -821,12 +817,13 @@ static void se_dev_stop(struct se_device *dev)
int se_dev_check_online(struct se_device *dev)
{
+ unsigned long flags;
int ret;
- spin_lock_irq(&dev->dev_status_lock);
+ spin_lock_irqsave(&dev->dev_status_lock, flags);
ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
(dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
- spin_unlock_irq(&dev->dev_status_lock);
+ spin_unlock_irqrestore(&dev->dev_status_lock, flags);
return ret;
}
@@ -848,59 +845,61 @@ void se_dev_set_default_attribs(
{
struct queue_limits *limits = &dev_limits->limits;
- DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
- DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
- DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
- DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
- DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
- DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
- DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
- DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
- DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
- DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
- DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+ dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
+ dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
+ dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+ dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
+ dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
+ dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
+ dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
/*
* The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
* iblock_create_virtdevice() from struct queue_limits values
* if blk_queue_discard()==1
*/
- DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
- DEV_ATTRIB(dev)->max_unmap_block_desc_count =
- DA_MAX_UNMAP_BLOCK_DESC_COUNT;
- DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
- DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+ DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
/*
* block_size is based on subsystem plugin dependent requirements.
*/
- DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
- DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
+ dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
+ dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
/*
* max_sectors is based on subsystem plugin dependent requirements.
*/
- DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
- DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
+ dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
/*
* Set optimal_sectors from max_sectors, which can be lowered via
* configfs.
*/
- DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
+ dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
/*
* queue_depth is based on subsystem plugin dependent requirements.
*/
- DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
- DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
+ dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
+ dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
}
int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
{
if (task_timeout > DA_TASK_TIMEOUT_MAX) {
- printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
+ pr_err("dev[%p]: Passed task_timeout: %u larger then"
" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
- return -1;
+ return -EINVAL;
} else {
- DEV_ATTRIB(dev)->task_timeout = task_timeout;
- printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
+ dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
+ pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
dev, task_timeout);
}
@@ -911,9 +910,9 @@ int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
{
- DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
- printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
- dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
+ dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
+ pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
return 0;
}
@@ -921,9 +920,10 @@ int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
u32 max_unmap_block_desc_count)
{
- DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
- printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
- dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
+ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+ max_unmap_block_desc_count;
+ pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
return 0;
}
@@ -931,9 +931,9 @@ int se_dev_set_unmap_granularity(
struct se_device *dev,
u32 unmap_granularity)
{
- DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
- printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
- dev, DEV_ATTRIB(dev)->unmap_granularity);
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
+ pr_debug("dev[%p]: Set unmap_granularity: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
return 0;
}
@@ -941,109 +941,109 @@ int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
u32 unmap_granularity_alignment)
{
- DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
- printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
- dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
+ pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
return 0;
}
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->dpo_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
- return -1;
+ if (dev->transport->dpo_emulated == NULL) {
+ pr_err("dev->transport->dpo_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
- return -1;
+ if (dev->transport->dpo_emulated(dev) == 0) {
+ pr_err("dev->transport->dpo_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_dpo = flag;
- printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
- " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
+ dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
+ pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
+ " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_write_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
- return -1;
+ if (dev->transport->fua_write_emulated == NULL) {
+ pr_err("dev->transport->fua_write_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
- return -1;
+ if (dev->transport->fua_write_emulated(dev) == 0) {
+ pr_err("dev->transport->fua_write_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_fua_write = flag;
- printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_fua_write);
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
+ pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
return 0;
}
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_read_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
- return -1;
+ if (dev->transport->fua_read_emulated == NULL) {
+ pr_err("dev->transport->fua_read_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
- return -1;
+ if (dev->transport->fua_read_emulated(dev) == 0) {
+ pr_err("dev->transport->fua_read_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_fua_read = flag;
- printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_fua_read);
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
+ pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->write_cache_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
- return -1;
+ if (dev->transport->write_cache_emulated == NULL) {
+ pr_err("dev->transport->write_cache_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
- return -1;
+ if (dev->transport->write_cache_emulated(dev) == 0) {
+ pr_err("dev->transport->write_cache_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_write_cache = flag;
- printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_write_cache);
+ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
+ pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
return 0;
}
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1) && (flag != 2)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ pr_err("dev[%p]: Unable to change SE Device"
" UA_INTRLCK_CTRL while dev_export_obj: %d count"
" exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
- printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
+ dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
+ pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
return 0;
}
@@ -1051,19 +1051,19 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
int se_dev_set_emulate_tas(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
+ pr_err("dev[%p]: Unable to change SE Device TAS while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_tas = flag;
- printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
- dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
+ dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
+ pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+ dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
return 0;
}
@@ -1071,20 +1071,20 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
- printk(KERN_ERR "Generic Block Discard not supported\n");
+ if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- DEV_ATTRIB(dev)->emulate_tpu = flag;
- printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+ dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
dev, flag);
return 0;
}
@@ -1092,20 +1092,20 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
- printk(KERN_ERR "Generic Block Discard not supported\n");
+ if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- DEV_ATTRIB(dev)->emulate_tpws = flag;
- printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+ dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
dev, flag);
return 0;
}
@@ -1113,12 +1113,36 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+ dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
+ pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+ (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
+ return 0;
+}
+
+int se_dev_set_is_nonrot(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ return -EINVAL;
+ }
+ dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
+ pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
+ dev, flag);
+ return 0;
+}
+
+int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
+{
+ if (flag != 0) {
+ printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
+ " reordering not implemented\n", dev);
+ return -ENOSYS;
}
- DEV_ATTRIB(dev)->enforce_pr_isids = flag;
- printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
- (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
+ dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
+ pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0;
}
@@ -1130,44 +1154,44 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
u32 orig_queue_depth = dev->queue_depth;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
+ pr_err("dev[%p]: Unable to change SE Device TCQ while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- if (!(queue_depth)) {
- printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
+ if (!queue_depth) {
+ pr_err("dev[%p]: Illegal ZERO value for queue"
"_depth\n", dev);
- return -1;
+ return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
- printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ pr_err("dev[%p]: Passed queue_depth: %u"
" exceeds TCM/SE_Device TCQ: %u\n",
dev, queue_depth,
- DEV_ATTRIB(dev)->hw_queue_depth);
- return -1;
+ dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ return -EINVAL;
}
} else {
- if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
- if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
- printk(KERN_ERR "dev[%p]: Passed queue_depth:"
+ if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
+ if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
- DEV_ATTRIB(dev)->hw_queue_depth);
- return -1;
+ dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ return -EINVAL;
}
}
}
- DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
+ dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
if (queue_depth > orig_queue_depth)
atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
else if (queue_depth < orig_queue_depth)
atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
- printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
+ pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
}
@@ -1177,50 +1201,50 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
int force = 0; /* Force setting for VDEVS */
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ pr_err("dev[%p]: Unable to change SE Device"
" max_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- if (!(max_sectors)) {
- printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
+ if (!max_sectors) {
+ pr_err("dev[%p]: Illegal ZERO value for"
" max_sectors\n", dev);
- return -1;
+ return -EINVAL;
}
if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
+ pr_err("dev[%p]: Passed max_sectors: %u less than"
" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MIN);
- return -1;
+ return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
+ pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
" %u\n", dev, max_sectors,
- DEV_ATTRIB(dev)->hw_max_sectors);
- return -1;
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ return -EINVAL;
}
} else {
- if (!(force) && (max_sectors >
- DEV_ATTRIB(dev)->hw_max_sectors)) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ if (!force && (max_sectors >
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
+ pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors"
": %u, use force=1 to override.\n", dev,
- max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
- return -1;
+ max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ return -EINVAL;
}
if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ pr_err("dev[%p]: Passed max_sectors: %u"
" greater than DA_STATUS_MAX_SECTORS_MAX:"
" %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MAX);
- return -1;
+ return -EINVAL;
}
}
- DEV_ATTRIB(dev)->max_sectors = max_sectors;
- printk("dev[%p]: SE Device max_sectors changed to %u\n",
+ dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
+ pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
dev, max_sectors);
return 0;
}
@@ -1228,25 +1252,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ pr_err("dev[%p]: Unable to change SE Device"
" optimal_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ pr_err("dev[%p]: Passed optimal_sectors cannot be"
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
- if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
- printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
+ if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
+ pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than max_sectors: %u\n", dev,
- optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
+ optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
return -EINVAL;
}
- DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
- printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
+ dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
+ pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
dev, optimal_sectors);
return 0;
}
@@ -1254,31 +1278,31 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
+ pr_err("dev[%p]: Unable to change SE Device block_size"
" while dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
if ((block_size != 512) &&
(block_size != 1024) &&
(block_size != 2048) &&
(block_size != 4096)) {
- printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
+ pr_err("dev[%p]: Illegal value for block_device: %u"
" for SE device, must be 512, 1024, 2048 or 4096\n",
dev, block_size);
- return -1;
+ return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ pr_err("dev[%p]: Not allowed to change block_size for"
" Physical Device, use for Linux/SCSI to change"
" block_size for underlying hardware\n", dev);
- return -1;
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->block_size = block_size;
- printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
+ dev->se_sub_dev->se_dev_attrib.block_size = block_size;
+ pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
return 0;
}
@@ -1293,13 +1317,13 @@ struct se_lun *core_dev_add_lun(
u32 lun_access = 0;
if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
- printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
+ pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
atomic_read(&dev->dev_access_obj.obj_access_count));
return NULL;
}
lun_p = core_tpg_pre_addlun(tpg, lun);
- if ((IS_ERR(lun_p)) || !(lun_p))
+ if ((IS_ERR(lun_p)) || !lun_p)
return NULL;
if (dev->dev_flags & DF_READ_ONLY)
@@ -1310,15 +1334,15 @@ struct se_lun *core_dev_add_lun(
if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
return NULL;
- printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
- " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
- TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
+ pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+ " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
*/
- if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
+ if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
@@ -1346,15 +1370,15 @@ int core_dev_del_lun(
int ret = 0;
lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
- if (!(lun))
+ if (!lun)
return ret;
core_tpg_post_dellun(tpg, lun);
- printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
- " device object\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
- TPG_TFO(tpg)->get_fabric_name());
+ pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+ " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name());
return 0;
}
@@ -1365,21 +1389,21 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
"_PER_TPG-1: %u for Target Portal Group: %hu\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
+ pr_err("%s Logical Unit Number: %u is not free on"
" Target Portal Group: %hu, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
@@ -1398,21 +1422,21 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
"_TPG-1: %u for Target Portal Group: %hu\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
@@ -1430,20 +1454,20 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_lun_acl *lacl;
struct se_node_acl *nacl;
- if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
- printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
+ pr_err("%s InitiatorName exceeds maximum size.\n",
+ tpg->se_tpg_tfo->get_fabric_name());
*ret = -EOVERFLOW;
return NULL;
}
nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!(nacl)) {
+ if (!nacl) {
*ret = -EINVAL;
return NULL;
}
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
- if (!(lacl)) {
- printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
+ if (!lacl) {
+ pr_err("Unable to allocate memory for struct se_lun_acl.\n");
*ret = -ENOMEM;
return NULL;
}
@@ -1466,16 +1490,16 @@ int core_dev_add_initiator_node_lun_acl(
struct se_node_acl *nacl;
lun = core_dev_get_lun(tpg, unpacked_lun);
- if (!(lun)) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ if (!lun) {
+ pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
return -EINVAL;
}
nacl = lacl->se_lun_nacl;
- if (!(nacl))
+ if (!nacl)
return -EINVAL;
if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
@@ -1494,9 +1518,9 @@ int core_dev_add_initiator_node_lun_acl(
smp_mb__after_atomic_inc();
spin_unlock(&lun->lun_acl_lock);
- printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
- " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+ pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+ " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
lacl->initiatorname);
/*
@@ -1519,7 +1543,7 @@ int core_dev_del_initiator_node_lun_acl(
struct se_node_acl *nacl;
nacl = lacl->se_lun_nacl;
- if (!(nacl))
+ if (!nacl)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
@@ -1533,10 +1557,10 @@ int core_dev_del_initiator_node_lun_acl(
lacl->se_lun = NULL;
- printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+ pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
" InitiatorNode: %s Mapped LUN: %u\n",
- TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
lacl->initiatorname, lacl->mapped_lun);
return 0;
@@ -1546,10 +1570,10 @@ void core_dev_free_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl)
{
- printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
- " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
- TPG_TFO(tpg)->get_fabric_name(),
+ pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+ " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
+ tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
@@ -1564,64 +1588,64 @@ int core_dev_setup_virtual_lun0(void)
char buf[16];
int ret;
- hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
+ hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
if (IS_ERR(hba))
return PTR_ERR(hba);
- se_global->g_lun0_hba = hba;
+ lun0_hba = hba;
t = hba->transport;
se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
- if (!(se_dev)) {
- printk(KERN_ERR "Unable to allocate memory for"
+ if (!se_dev) {
+ pr_err("Unable to allocate memory for"
" struct se_subsystem_dev\n");
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_reservation.registration_lock);
- spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_pr.registration_lock);
+ spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_wwn.t10_sub_dev = se_dev;
se_dev->t10_alua.t10_sub_dev = se_dev;
se_dev->se_dev_attrib.da_sub_dev = se_dev;
se_dev->se_dev_hba = hba;
se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
- if (!(se_dev->se_dev_su_ptr)) {
- printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ if (!se_dev->se_dev_su_ptr) {
+ pr_err("Unable to locate subsystem dependent pointer"
" from allocate_virtdevice()\n");
ret = -ENOMEM;
goto out;
}
- se_global->g_lun0_su_dev = se_dev;
+ lun0_su_dev = se_dev;
memset(buf, 0, 16);
sprintf(buf, "rd_pages=8");
t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (!(dev) || IS_ERR(dev)) {
- ret = -ENOMEM;
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
goto out;
}
se_dev->se_dev_ptr = dev;
- se_global->g_lun0_dev = dev;
+ g_lun0_dev = dev;
return 0;
out:
- se_global->g_lun0_su_dev = NULL;
+ lun0_su_dev = NULL;
kfree(se_dev);
- if (se_global->g_lun0_hba) {
- core_delete_hba(se_global->g_lun0_hba);
- se_global->g_lun0_hba = NULL;
+ if (lun0_hba) {
+ core_delete_hba(lun0_hba);
+ lun0_hba = NULL;
}
return ret;
}
@@ -1629,14 +1653,14 @@ out:
void core_dev_release_virtual_lun0(void)
{
- struct se_hba *hba = se_global->g_lun0_hba;
- struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
+ struct se_hba *hba = lun0_hba;
+ struct se_subsystem_dev *su_dev = lun0_su_dev;
- if (!(hba))
+ if (!hba)
return;
- if (se_global->g_lun0_dev)
- se_free_virtual_device(se_global->g_lun0_dev, hba);
+ if (g_lun0_dev)
+ se_free_virtual_device(g_lun0_dev, hba);
kfree(su_dev);
core_delete_hba(hba);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 07ab5a3bb8e..f1654694f4e 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -60,7 +60,7 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
cit->ct_owner = tf->tf_module; \
- printk("Setup generic %s\n", __stringify(_name)); \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
}
/* Start of tfc_tpg_mappedlun_cit */
@@ -80,8 +80,8 @@ static int target_fabric_mappedlun_link(
/*
* Ensure that the source port exists
*/
- if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
- printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
+ if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
+ pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
"_tpg does not exist\n");
return -EINVAL;
}
@@ -96,12 +96,12 @@ static int target_fabric_mappedlun_link(
* Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
*/
if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
- printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
+ pr_err("Illegal Initiator ACL SymLink outside of %s\n",
config_item_name(wwn_ci));
return -EINVAL;
}
if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
- printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
+ pr_err("Illegal Initiator ACL Symlink outside of %s"
" TPGT: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci));
return -EINVAL;
@@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link(
lun_access = deve->lun_flags;
else
lun_access =
- (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
+ (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE;
spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
@@ -147,7 +147,7 @@ static int target_fabric_mappedlun_unlink(
/*
* Determine if the underlying MappedLUN has already been released..
*/
- if (!(deve->se_lun))
+ if (!deve->se_lun)
return 0;
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
@@ -202,9 +202,9 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
TRANSPORT_LUNFLAGS_READ_WRITE,
lacl->se_lun_nacl);
- printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
+ pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %u Write Protect bit to %s\n",
- TPG_TFO(se_tpg)->get_fabric_name(),
+ se_tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
return count;
@@ -327,14 +327,14 @@ static struct config_group *target_fabric_make_mappedlun(
int ret = 0;
acl_ci = &group->cg_item;
- if (!(acl_ci)) {
- printk(KERN_ERR "Unable to locatel acl_ci\n");
+ if (!acl_ci) {
+ pr_err("Unable to locatel acl_ci\n");
return NULL;
}
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
- if (!(buf)) {
- printk(KERN_ERR "Unable to allocate memory for name buf\n");
+ if (!buf) {
+ pr_err("Unable to allocate memory for name buf\n");
return ERR_PTR(-ENOMEM);
}
snprintf(buf, strlen(name) + 1, "%s", name);
@@ -342,7 +342,7 @@ static struct config_group *target_fabric_make_mappedlun(
* Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
*/
if (strstr(buf, "lun_") != buf) {
- printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
+ pr_err("Unable to locate \"lun_\" from buf: %s"
" name: %s\n", buf, name);
ret = -EINVAL;
goto out;
@@ -358,7 +358,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
config_item_name(acl_ci), &ret);
- if (!(lacl)) {
+ if (!lacl) {
ret = -EINVAL;
goto out;
}
@@ -367,7 +367,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!lacl_cg->default_groups) {
- printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n");
+ pr_err("Unable to allocate lacl_cg->default_groups\n");
ret = -ENOMEM;
goto out;
}
@@ -379,11 +379,11 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
lacl_cg->default_groups[1] = NULL;
- ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ ml_stat_grp = &lacl->ml_stat_grps.stat_group;
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!ml_stat_grp->default_groups) {
- printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n");
+ pr_err("Unable to allocate ml_stat_grp->default_groups\n");
ret = -ENOMEM;
goto out;
}
@@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun(
struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
int i;
- ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ ml_stat_grp = &lacl->ml_stat_grps.stat_group;
for (i = 0; ml_stat_grp->default_groups[i]; i++) {
df_item = &ml_stat_grp->default_groups[i]->cg_item;
ml_stat_grp->default_groups[i] = NULL;
@@ -474,8 +474,8 @@ static struct config_group *target_fabric_make_nodeacl(
struct se_node_acl *se_nacl;
struct config_group *nacl_cg;
- if (!(tf->tf_ops.fabric_make_nodeacl)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
+ if (!tf->tf_ops.fabric_make_nodeacl) {
+ pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
return ERR_PTR(-ENOSYS);
}
@@ -572,13 +572,13 @@ static struct config_group *target_fabric_make_np(
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np;
- if (!(tf->tf_ops.fabric_make_np)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
+ if (!tf->tf_ops.fabric_make_np) {
+ pr_err("tf->tf_ops.fabric_make_np is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
- if (!(se_tpg_np) || IS_ERR(se_tpg_np))
+ if (!se_tpg_np || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
se_tpg_np->tpg_np_parent = se_tpg;
@@ -627,10 +627,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
@@ -641,10 +638,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
@@ -659,10 +653,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
@@ -673,10 +664,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
@@ -691,10 +679,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
@@ -705,10 +690,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
@@ -723,10 +705,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
@@ -737,10 +716,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
@@ -781,13 +757,13 @@ static int target_fabric_port_link(
tf = se_tpg->se_tpg_wwn->wwn_tf;
if (lun->lun_se_dev != NULL) {
- printk(KERN_ERR "Port Symlink already exists\n");
+ pr_err("Port Symlink already exists\n");
return -EEXIST;
}
dev = se_dev->se_dev_ptr;
- if (!(dev)) {
- printk(KERN_ERR "Unable to locate struct se_device pointer from"
+ if (!dev) {
+ pr_err("Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
@@ -795,8 +771,8 @@ static int target_fabric_port_link(
lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
lun->unpacked_lun);
- if ((IS_ERR(lun_p)) || !(lun_p)) {
- printk(KERN_ERR "core_dev_add_lun() failed\n");
+ if (IS_ERR(lun_p) || !lun_p) {
+ pr_err("core_dev_add_lun() failed\n");
ret = -EINVAL;
goto out;
}
@@ -888,7 +864,7 @@ static struct config_group *target_fabric_make_lun(
int errno;
if (strstr(name, "lun_") != name) {
- printk(KERN_ERR "Unable to locate \'_\" in"
+ pr_err("Unable to locate \'_\" in"
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
@@ -896,14 +872,14 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL);
lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
- if (!(lun))
+ if (!lun)
return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group;
lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!lun_cg->default_groups) {
- printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n");
+ pr_err("Unable to allocate lun_cg->default_groups\n");
return ERR_PTR(-ENOMEM);
}
@@ -914,11 +890,11 @@ static struct config_group *target_fabric_make_lun(
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
lun_cg->default_groups[1] = NULL;
- port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ port_stat_grp = &lun->port_stat_grps.stat_group;
port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!port_stat_grp->default_groups) {
- printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n");
+ pr_err("Unable to allocate port_stat_grp->default_groups\n");
errno = -ENOMEM;
goto out;
}
@@ -941,7 +917,7 @@ static void target_fabric_drop_lun(
struct config_group *lun_cg, *port_stat_grp;
int i;
- port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ port_stat_grp = &lun->port_stat_grps.stat_group;
for (i = 0; port_stat_grp->default_groups[i]; i++) {
df_item = &port_stat_grp->default_groups[i]->cg_item;
port_stat_grp->default_groups[i] = NULL;
@@ -1031,13 +1007,13 @@ static struct config_group *target_fabric_make_tpg(
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg;
- if (!(tf->tf_ops.fabric_make_tpg)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
+ if (!tf->tf_ops.fabric_make_tpg) {
+ pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
- if (!(se_tpg) || IS_ERR(se_tpg))
+ if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
/*
* Setup default groups from pre-allocated se_tpg->tpg_default_groups
@@ -1130,13 +1106,13 @@ static struct config_group *target_fabric_make_wwn(
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn;
- if (!(tf->tf_ops.fabric_make_wwn)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
+ if (!tf->tf_ops.fabric_make_wwn) {
+ pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
return ERR_PTR(-ENOSYS);
}
wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
- if (!(wwn) || IS_ERR(wwn))
+ if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
wwn->wwn_tf = tf;
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 1e193f32489..c4ea3a9a555 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -25,6 +25,7 @@
*
******************************************************************************/
+#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
@@ -61,9 +62,8 @@ u32 sas_get_pr_transport_id(
int *format_code,
unsigned char *buf)
{
- unsigned char binary, *ptr;
- int i;
- u32 off = 4;
+ unsigned char *ptr;
+
/*
* Set PROTOCOL IDENTIFIER to 6h for SAS
*/
@@ -74,10 +74,8 @@ u32 sas_get_pr_transport_id(
*/
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
- for (i = 0; i < 16; i += 2) {
- binary = transport_asciihex_to_binaryhex(&ptr[i]);
- buf[off++] = binary;
- }
+ hex2bin(&buf[4], ptr, 8);
+
/*
* The SAS Transport ID is a hardcoded 24-byte length
*/
@@ -157,7 +155,7 @@ u32 fc_get_pr_transport_id(
int *format_code,
unsigned char *buf)
{
- unsigned char binary, *ptr;
+ unsigned char *ptr;
int i;
u32 off = 8;
/*
@@ -172,12 +170,11 @@ u32 fc_get_pr_transport_id(
ptr = &se_nacl->initiatorname[0];
for (i = 0; i < 24; ) {
- if (!(strncmp(&ptr[i], ":", 1))) {
+ if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
}
- binary = transport_asciihex_to_binaryhex(&ptr[i]);
- buf[off++] = binary;
+ hex2bin(&buf[off++], &ptr[i], 1);
i += 2;
}
/*
@@ -386,7 +383,7 @@ char *iscsi_parse_pr_out_transport_id(
* Reserved
*/
if ((format_code != 0x00) && (format_code != 0x40)) {
- printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
+ pr_err("Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
return NULL;
}
@@ -406,7 +403,7 @@ char *iscsi_parse_pr_out_transport_id(
tid_len += padding;
if ((add_len + 4) != tid_len) {
- printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
+ pr_debug("LIO-Target Extracted add_len: %hu "
"does not match calculated tid_len: %u,"
" using tid_len instead\n", add_len+4, tid_len);
*out_tid_len = tid_len;
@@ -420,8 +417,8 @@ char *iscsi_parse_pr_out_transport_id(
*/
if (format_code == 0x40) {
p = strstr((char *)&buf[4], ",i,0x");
- if (!(p)) {
- printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
+ if (!p) {
+ pr_err("Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n",
(char *)&buf[4]);
return NULL;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 150c4305f38..bc1b33639b8 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -42,18 +42,6 @@
#include "target_core_file.h"
-#if 1
-#define DEBUG_FD_CACHE(x...) printk(x)
-#else
-#define DEBUG_FD_CACHE(x...)
-#endif
-
-#if 1
-#define DEBUG_FD_FUA(x...) printk(x)
-#else
-#define DEBUG_FD_FUA(x...)
-#endif
-
static struct se_subsystem_api fileio_template;
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
@@ -65,24 +53,21 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
struct fd_host *fd_host;
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
- if (!(fd_host)) {
- printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
- return -1;
+ if (!fd_host) {
+ pr_err("Unable to allocate memory for struct fd_host\n");
+ return -ENOMEM;
}
fd_host->fd_host_id = host_id;
- atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
- atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
- hba->hba_ptr = (void *) fd_host;
+ hba->hba_ptr = fd_host;
- printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+ pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
- " Target Core with TCQ Depth: %d MaxSectors: %u\n",
- hba->hba_id, fd_host->fd_host_id,
- atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+ " MaxSectors: %u\n",
+ hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
return 0;
}
@@ -91,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba)
{
struct fd_host *fd_host = hba->hba_ptr;
- printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+ pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
" Target Core\n", hba->hba_id, fd_host->fd_host_id);
kfree(fd_host);
@@ -104,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
- if (!(fd_dev)) {
- printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
+ if (!fd_dev) {
+ pr_err("Unable to allocate memory for struct fd_dev\n");
return NULL;
}
fd_dev->fd_host = fd_host;
- printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
+ pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev;
}
@@ -144,7 +129,7 @@ static struct se_device *fd_create_virtdevice(
set_fs(old_fs);
if (IS_ERR(dev_p)) {
- printk(KERN_ERR "getname(%s) failed: %lu\n",
+ pr_err("getname(%s) failed: %lu\n",
fd_dev->fd_dev_name, IS_ERR(dev_p));
ret = PTR_ERR(dev_p);
goto fail;
@@ -167,12 +152,12 @@ static struct se_device *fd_create_virtdevice(
file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file)) {
- printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+ pr_err("filp_open(%s) failed\n", dev_p);
ret = PTR_ERR(file);
goto fail;
}
if (!file || !file->f_dentry) {
- printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+ pr_err("filp_open(%s) failed\n", dev_p);
goto fail;
}
fd_dev->fd_file = file;
@@ -202,14 +187,14 @@ static struct se_device *fd_create_virtdevice(
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
- printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
+ pr_debug("FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
fd_dev->fd_dev_size,
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
- printk(KERN_ERR "FILEIO: Missing fd_dev_size="
+ pr_err("FILEIO: Missing fd_dev_size="
" parameter, and no backing struct"
" block_device\n");
goto fail;
@@ -226,15 +211,15 @@ static struct se_device *fd_create_virtdevice(
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba, &fileio_template,
- se_dev, dev_flags, (void *)fd_dev,
+ se_dev, dev_flags, fd_dev,
&dev_limits, "FILEIO", FD_VERSION);
- if (!(dev))
+ if (!dev)
goto fail;
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
- printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+ pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
@@ -272,45 +257,45 @@ static inline struct fd_request *FILE_REQ(struct se_task *task)
static struct se_task *
-fd_alloc_task(struct se_cmd *cmd)
+fd_alloc_task(unsigned char *cdb)
{
struct fd_request *fd_req;
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
- if (!(fd_req)) {
- printk(KERN_ERR "Unable to allocate struct fd_request\n");
+ if (!fd_req) {
+ pr_err("Unable to allocate struct fd_request\n");
return NULL;
}
- fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
-
return &fd_req->fd_task;
}
static int fd_do_readv(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct file *fd = req->fd_dev->fd_file;
+ struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
- loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ loff_t pos = (task->task_lba *
+ task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i;
- iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
- if (!(iov)) {
- printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
- return -1;
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+ if (!iov) {
+ pr_err("Unable to allocate fd_do_readv iov[]\n");
+ return -ENOMEM;
}
- for (i = 0; i < task->task_sg_num; i++) {
+ for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
- ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
+ ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs);
kfree(iov);
@@ -321,16 +306,16 @@ static int fd_do_readv(struct se_task *task)
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != task->task_size) {
- printk(KERN_ERR "vfs_readv() returned %d,"
+ pr_err("vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret,
(int)task->task_size);
- return -1;
+ return (ret < 0 ? ret : -EINVAL);
}
} else {
if (ret < 0) {
- printk(KERN_ERR "vfs_readv() returned %d for non"
+ pr_err("vfs_readv() returned %d for non"
" S_ISBLK\n", ret);
- return -1;
+ return ret;
}
}
@@ -340,34 +325,36 @@ static int fd_do_readv(struct se_task *task)
static int fd_do_writev(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct file *fd = req->fd_dev->fd_file;
+ struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
- loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ loff_t pos = (task->task_lba *
+ task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
- iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
- if (!(iov)) {
- printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
- return -1;
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+ if (!iov) {
+ pr_err("Unable to allocate fd_do_writev iov[]\n");
+ return -ENOMEM;
}
- for (i = 0; i < task->task_sg_num; i++) {
+ for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
- ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
+ ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs);
kfree(iov);
if (ret < 0 || ret != task->task_size) {
- printk(KERN_ERR "vfs_writev() returned %d\n", ret);
- return -1;
+ pr_err("vfs_writev() returned %d\n", ret);
+ return (ret < 0 ? ret : -EINVAL);
}
return 1;
@@ -375,10 +362,10 @@ static int fd_do_writev(struct se_task *task)
static void fd_emulate_sync_cache(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
- int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+ int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
@@ -392,11 +379,11 @@ static void fd_emulate_sync_cache(struct se_task *task)
/*
* Determine if we will be flushing the entire device.
*/
- if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
+ if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
start = 0;
end = LLONG_MAX;
} else {
- start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
+ start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
@@ -405,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
- printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+ pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (!immed)
transport_complete_sync_cache(cmd, ret == 0);
@@ -446,16 +433,16 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
- loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
+ loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
loff_t end = start + task->task_size;
int ret;
- DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+ pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
task->task_lba, task->task_size);
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
- printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+ pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
}
static int fd_do_task(struct se_task *task)
@@ -474,9 +461,9 @@ static int fd_do_task(struct se_task *task)
ret = fd_do_writev(task);
if (ret > 0 &&
- DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
- DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
- T_TASK(cmd)->t_tasks_fua) {
+ dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ cmd->t_tasks_fua) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@@ -549,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params(
snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
"%s", arg_p);
kfree(arg_p);
- printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
+ pr_debug("FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
break;
@@ -562,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params(
ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
kfree(arg_p);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoull() failed for"
+ pr_err("strict_strtoull() failed for"
" fd_dev_size=\n");
goto out;
}
- printk(KERN_INFO "FILEIO: Referencing Size: %llu"
+ pr_debug("FILEIO: Referencing Size: %llu"
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
case Opt_fd_buffered_io:
match_int(args, &arg);
if (arg != 1) {
- printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
+ pr_err("bogus fd_buffered_io=%d value\n", arg);
ret = -EINVAL;
goto out;
}
- printk(KERN_INFO "FILEIO: Using buffered I/O"
+ pr_debug("FILEIO: Using buffered I/O"
" operations for struct fd_dev\n");
fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
@@ -598,8 +585,8 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
- printk(KERN_ERR "Missing fd_dev_name=\n");
- return -1;
+ pr_err("Missing fd_dev_name=\n");
+ return -EINVAL;
}
return 0;
@@ -654,7 +641,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = dev->dev_ptr;
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
- DEV_ATTRIB(dev)->block_size);
+ dev->se_sub_dev->se_dev_attrib.block_size);
return blocks_long;
}
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index ef4de2b4bd4..daebd710b89 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -4,8 +4,6 @@
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
-/* Maximum queuedepth for the FILEIO HBA */
-#define FD_HBA_QUEUE_DEPTH 256
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
@@ -18,8 +16,6 @@ struct fd_request {
struct se_task fd_task;
/* SCSI CDB from iSCSI Command PDU */
unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
- /* FILEIO device */
- struct fd_dev *fd_dev;
} ____cacheline_aligned;
#define FBDF_HAS_PATH 0x01
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 0b8f8da8901..0639b975d6f 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -1,7 +1,7 @@
/*******************************************************************************
* Filename: target_core_hba.c
*
- * This file copntains the iSCSI HBA Transport related functions.
+ * This file contains the TCM HBA Transport related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
@@ -45,6 +45,11 @@
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
+static u32 hba_id_counter;
+
+static DEFINE_SPINLOCK(hba_lock);
+static LIST_HEAD(hba_list);
+
int transport_subsystem_register(struct se_subsystem_api *sub_api)
{
struct se_subsystem_api *s;
@@ -53,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
- if (!(strcmp(s->name, sub_api->name))) {
- printk(KERN_ERR "%p is already registered with"
+ if (!strcmp(s->name, sub_api->name)) {
+ pr_err("%p is already registered with"
" duplicate name %s, unable to process"
" request\n", s, s->name);
mutex_unlock(&subsystem_mutex);
@@ -64,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
list_add_tail(&sub_api->sub_api_list, &subsystem_list);
mutex_unlock(&subsystem_mutex);
- printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
+ pr_debug("TCM: Registered subsystem plugin: %s struct module:"
" %p\n", sub_api->name, sub_api->owner);
return 0;
}
@@ -104,21 +109,17 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) {
- printk(KERN_ERR "Unable to allocate struct se_hba\n");
+ pr_err("Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
- spin_lock_init(&hba->hba_queue_lock);
mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
- atomic_set(&hba->max_queue_depth, 0);
- atomic_set(&hba->left_queue_depth, 0);
-
hba->transport = core_get_backend(plugin_name);
if (!hba->transport) {
ret = -EINVAL;
@@ -129,12 +130,12 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
if (ret < 0)
goto out_module_put;
- spin_lock(&se_global->hba_lock);
- hba->hba_id = se_global->g_hba_id_counter++;
- list_add_tail(&hba->hba_list, &se_global->g_hba_list);
- spin_unlock(&se_global->hba_lock);
+ spin_lock(&hba_lock);
+ hba->hba_id = hba_id_counter++;
+ list_add_tail(&hba->hba_node, &hba_list);
+ spin_unlock(&hba_lock);
- printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
+ pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
return hba;
@@ -156,11 +157,11 @@ core_delete_hba(struct se_hba *hba)
hba->transport->detach_hba(hba);
- spin_lock(&se_global->hba_lock);
- list_del(&hba->hba_list);
- spin_unlock(&se_global->hba_lock);
+ spin_lock(&hba_lock);
+ list_del(&hba->hba_node);
+ spin_unlock(&hba_lock);
- printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
+ pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
if (hba->transport->owner)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 86639004af9..7e123410544 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -47,12 +47,6 @@
#include "target_core_iblock.h"
-#if 0
-#define DEBUG_IBLOCK(x...) printk(x)
-#else
-#define DEBUG_IBLOCK(x...)
-#endif
-
static struct se_subsystem_api iblock_template;
static void iblock_bio_done(struct bio *, int);
@@ -66,25 +60,22 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
struct iblock_hba *ib_host;
ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
- if (!(ib_host)) {
- printk(KERN_ERR "Unable to allocate memory for"
+ if (!ib_host) {
+ pr_err("Unable to allocate memory for"
" struct iblock_hba\n");
return -ENOMEM;
}
ib_host->iblock_host_id = host_id;
- atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
- atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
- hba->hba_ptr = (void *) ib_host;
+ hba->hba_ptr = ib_host;
- printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+ pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
- " Target Core TCQ Depth: %d\n", hba->hba_id,
- ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
+ pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
+ hba->hba_id, ib_host->iblock_host_id);
return 0;
}
@@ -93,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba)
{
struct iblock_hba *ib_host = hba->hba_ptr;
- printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
+ pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
kfree(ib_host);
@@ -106,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
struct iblock_hba *ib_host = hba->hba_ptr;
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
- if (!(ib_dev)) {
- printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
+ if (!ib_dev) {
+ pr_err("Unable to allocate struct iblock_dev\n");
return NULL;
}
ib_dev->ibd_host = ib_host;
- printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
+ pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return ib_dev;
}
@@ -131,8 +122,8 @@ static struct se_device *iblock_create_virtdevice(
u32 dev_flags = 0;
int ret = -EINVAL;
- if (!(ib_dev)) {
- printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
+ if (!ib_dev) {
+ pr_err("Unable to locate struct iblock_dev parameter\n");
return ERR_PTR(ret);
}
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
@@ -140,16 +131,16 @@ static struct se_device *iblock_create_virtdevice(
* These settings need to be made tunable..
*/
ib_dev->ibd_bio_set = bioset_create(32, 64);
- if (!(ib_dev->ibd_bio_set)) {
- printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
+ if (!ib_dev->ibd_bio_set) {
+ pr_err("IBLOCK: Unable to create bioset()\n");
return ERR_PTR(-ENOMEM);
}
- printk(KERN_INFO "IBLOCK: Created bio_set()\n");
+ pr_debug("IBLOCK: Created bio_set()\n");
/*
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
*/
- printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
+ pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
@@ -167,42 +158,41 @@ static struct se_device *iblock_create_virtdevice(
limits->logical_block_size = bdev_logical_block_size(bd);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
- dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
- dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
+ dev_limits.hw_queue_depth = q->nr_requests;
+ dev_limits.queue_depth = q->nr_requests;
- ib_dev->ibd_major = MAJOR(bd->bd_dev);
- ib_dev->ibd_minor = MINOR(bd->bd_dev);
ib_dev->ibd_bd = bd;
dev = transport_add_device_to_core_hba(hba,
- &iblock_template, se_dev, dev_flags, (void *)ib_dev,
+ &iblock_template, se_dev, dev_flags, ib_dev,
&dev_limits, "IBLOCK", IBLOCK_VERSION);
- if (!(dev))
+ if (!dev)
goto failed;
- ib_dev->ibd_depth = dev->queue_depth;
-
/*
* Check if the underlying struct block_device request_queue supports
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
- DEV_ATTRIB(dev)->max_unmap_lba_count =
+ dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
- DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
- DEV_ATTRIB(dev)->unmap_granularity =
+ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity =
q->limits.discard_granularity;
- DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
- printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
+ pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
+ if (blk_queue_nonrot(q))
+ dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
+
return dev;
failed:
@@ -211,8 +201,6 @@ failed:
ib_dev->ibd_bio_set = NULL;
}
ib_dev->ibd_bd = NULL;
- ib_dev->ibd_major = 0;
- ib_dev->ibd_minor = 0;
return ERR_PTR(ret);
}
@@ -233,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
}
static struct se_task *
-iblock_alloc_task(struct se_cmd *cmd)
+iblock_alloc_task(unsigned char *cdb)
{
struct iblock_req *ib_req;
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
- if (!(ib_req)) {
- printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
+ if (!ib_req) {
+ pr_err("Unable to allocate memory for struct iblock_req\n");
return NULL;
}
- ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
atomic_set(&ib_req->ib_bio_cnt, 0);
return &ib_req->ib_task;
}
@@ -257,12 +244,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
- if (block_size == DEV_ATTRIB(dev)->block_size)
+ if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
return blocks_long;
switch (block_size) {
case 4096:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 2048:
blocks_long <<= 1;
break;
@@ -276,7 +263,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 2048:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 1;
break;
@@ -291,7 +278,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 1024:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 2;
break;
@@ -306,7 +293,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 512:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 3;
break;
@@ -332,9 +319,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
*/
static void iblock_emulate_sync_cache(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
- int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
+ int immed = (cmd->t_task_cdb[1] & 0x2);
sector_t error_sector;
int ret;
@@ -351,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
*/
ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
if (ret != 0) {
- printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
+ pr_err("IBLOCK: block_issue_flush() failed: %d "
" error_sector: %llu\n", ret,
(unsigned long long)error_sector);
}
@@ -401,9 +388,9 @@ static int iblock_do_task(struct se_task *task)
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
*/
- if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
- (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
- T_TASK(task->task_se_cmd)->t_tasks_fua))
+ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ task->task_se_cmd->t_tasks_fua))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -415,8 +402,9 @@ static int iblock_do_task(struct se_task *task)
while (bio) {
nbio = bio->bi_next;
bio->bi_next = NULL;
- DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
- " bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
+ pr_debug("Calling submit_bio() task: %p bio: %p"
+ " bio->bi_sector: %llu\n", task, bio,
+ (unsigned long long)bio->bi_sector);
submit_bio(rw, bio);
bio = nbio;
@@ -470,7 +458,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, arg, token;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -486,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
switch (token) {
case Opt_udev_path:
if (ib_dev->ibd_bd) {
- printk(KERN_ERR "Unable to set udev_path= while"
+ pr_err("Unable to set udev_path= while"
" ib_dev->ibd_bd exists\n");
ret = -EEXIST;
goto out;
@@ -499,15 +487,11 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
"%s", arg_p);
kfree(arg_p);
- printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
+ pr_debug("IBLOCK: Referencing UDEV path: %s\n",
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
case Opt_force:
- match_int(args, &arg);
- ib_dev->ibd_force = arg;
- printk(KERN_INFO "IBLOCK: Set force=%d\n",
- ib_dev->ibd_force);
break;
default:
break;
@@ -526,8 +510,8 @@ static ssize_t iblock_check_configfs_dev_params(
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
- printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
- return -1;
+ pr_err("Missing udev_path= parameters for IBLOCK\n");
+ return -EINVAL;
}
return 0;
@@ -555,12 +539,11 @@ static ssize_t iblock_show_configfs_dev_params(
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
- ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
+ MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
- bl += sprintf(b + bl, "Major: %d Minor: %d\n",
- ibd->ibd_major, ibd->ibd_minor);
+ bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
}
return bl;
@@ -585,103 +568,103 @@ static struct bio *iblock_get_bio(
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
- if (!(bio)) {
- printk(KERN_ERR "Unable to allocate memory for bio\n");
+ if (!bio) {
+ pr_err("Unable to allocate memory for bio\n");
*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
return NULL;
}
- DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
- " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
- DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+ pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
+ " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
+ pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
bio->bi_bdev = ib_dev->ibd_bd;
- bio->bi_private = (void *) task;
+ bio->bi_private = task;
bio->bi_destructor = iblock_bio_destructor;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
atomic_inc(&ib_req->ib_bio_cnt);
- DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
- DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
+ pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
+ pr_debug("Set ib_req->ib_bio_cnt: %d\n",
atomic_read(&ib_req->ib_bio_cnt));
return bio;
}
-static int iblock_map_task_SG(struct se_task *task)
+static int iblock_map_data_SG(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
struct scatterlist *sg;
int ret = 0;
- u32 i, sg_num = task->task_sg_num;
+ u32 i, sg_num = task->task_sg_nents;
sector_t block_lba;
/*
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
*/
- if (DEV_ATTRIB(dev)->block_size == 4096)
+ if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
block_lba = (task->task_lba << 3);
- else if (DEV_ATTRIB(dev)->block_size == 2048)
+ else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
block_lba = (task->task_lba << 2);
- else if (DEV_ATTRIB(dev)->block_size == 1024)
+ else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
block_lba = (task->task_lba << 1);
- else if (DEV_ATTRIB(dev)->block_size == 512)
+ else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
block_lba = task->task_lba;
else {
- printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", DEV_ATTRIB(dev)->block_size);
+ pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
+ " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
- if (!(bio))
+ if (!bio)
return ret;
ib_req->ib_bio = bio;
hbio = tbio = bio;
/*
* Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
- * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
+ * from task->task_sg -> struct scatterlist memory.
*/
- for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
- DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
" %p len: %u offset: %u\n", task, bio, sg_page(sg),
sg->length, sg->offset);
again:
ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
if (ret != sg->length) {
- DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
- bio->bi_sector);
- DEBUG_IBLOCK("** task->task_size: %u\n",
+ pr_debug("*** Set bio->bi_sector: %llu\n",
+ (unsigned long long)bio->bi_sector);
+ pr_debug("** task->task_size: %u\n",
task->task_size);
- DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
+ pr_debug("*** bio->bi_max_vecs: %u\n",
bio->bi_max_vecs);
- DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
+ pr_debug("*** bio->bi_vcnt: %u\n",
bio->bi_vcnt);
bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
block_lba, sg_num);
- if (!(bio))
+ if (!bio)
goto fail;
tbio = tbio->bi_next = bio;
- DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
+ pr_debug("-----------------> Added +1 bio: %p to"
" list, Going to again\n", bio);
goto again;
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
- DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
+ pr_debug("task: %p bio-add_page() passed!, decremented"
" sg_num to %u\n", task, sg_num);
- DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
- " to %llu\n", task, block_lba);
- DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
+ pr_debug("task: %p bio_add_page() passed!, increased lba"
+ " to %llu\n", task, (unsigned long long)block_lba);
+ pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
" %u\n", task, bio->bi_vcnt);
}
@@ -727,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
- if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
err = -EIO;
if (err != 0) {
- printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
+ pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
@@ -742,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Wait to complete the task until the last bio as completed.
*/
- if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
ibr->ib_bio = NULL;
transport_complete_task(task, 0);
return;
}
- DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
- task, bio, task->task_lba, bio->bi_sector, err);
+ pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+ task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
/*
* bio_put() will call iblock_bio_destructor() to release the bio back
* to ibr->ib_bio_set.
@@ -759,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Wait to complete the task until the last bio as completed.
*/
- if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
/*
* Return GOOD status for task if zero ib_bio_err_cnt exists.
@@ -772,7 +755,7 @@ static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
- .map_task_SG = iblock_map_task_SG,
+ .map_data_SG = iblock_map_data_SG,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.allocate_virtdevice = iblock_allocate_virtdevice,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 64c1f4d69f7..a121cd1b657 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -3,9 +3,6 @@
#define IBLOCK_VERSION "4.0"
-#define IBLOCK_HBA_QUEUE_DEPTH 512
-#define IBLOCK_DEVICE_QUEUE_DEPTH 32
-#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
#define IBLOCK_MAX_CDBS 16
#define IBLOCK_LBA_SHIFT 9
@@ -15,18 +12,12 @@ struct iblock_req {
atomic_t ib_bio_cnt;
atomic_t ib_bio_err_cnt;
struct bio *ib_bio;
- struct iblock_dev *ib_dev;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
-#define IBDF_HAS_FORCE 0x02
struct iblock_dev {
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
- int ibd_force;
- int ibd_major;
- int ibd_minor;
- u32 ibd_depth;
u32 ibd_flags;
struct bio_set *ibd_bio_set;
struct block_device *ibd_bd;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index a79f518ca6e..1c1b849cd4f 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -62,7 +62,7 @@ int core_pr_dump_initiator_port(
char *buf,
u32 size)
{
- if (!(pr_reg->isid_present_at_reg))
+ if (!pr_reg->isid_present_at_reg)
return 0;
snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
@@ -95,7 +95,7 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
struct se_session *sess = cmd->se_sess;
int ret;
- if (!(sess))
+ if (!sess)
return 0;
spin_lock(&dev->dev_reservation_lock);
@@ -105,13 +105,13 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
}
if (dev->dev_reserved_node_acl != sess->se_node_acl) {
spin_unlock(&dev->dev_reservation_lock);
- return -1;
+ return -EINVAL;
}
if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
- ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
+ ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL;
spin_unlock(&dev->dev_reservation_lock);
return ret;
@@ -123,7 +123,7 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd)
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
- if (!(sess) || !(tpg))
+ if (!sess || !tpg)
return 0;
spin_lock(&dev->dev_reservation_lock);
@@ -142,9 +142,9 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd)
dev->dev_res_bin_isid = 0;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
}
- printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
- " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
- SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
+ " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -157,9 +157,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
- if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
- (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
- printk(KERN_ERR "LongIO and Obselete Bits set, returning"
+ if ((cmd->t_task_cdb[1] & 0x01) &&
+ (cmd->t_task_cdb[1] & 0x02)) {
+ pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
return PYX_TRANSPORT_ILLEGAL_REQUEST;
}
@@ -167,19 +167,19 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
* This is currently the case for target_core_mod passthrough struct se_cmd
* ops
*/
- if (!(sess) || !(tpg))
+ if (!sess || !tpg)
return 0;
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reserved_node_acl &&
(dev->dev_reserved_node_acl != sess->se_node_acl)) {
- printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
- TPG_TFO(tpg)->get_fabric_name());
- printk(KERN_ERR "Original reserver LUN: %u %s\n",
- SE_LUN(cmd)->unpacked_lun,
+ pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+ tpg->se_tpg_tfo->get_fabric_name());
+ pr_err("Original reserver LUN: %u %s\n",
+ cmd->se_lun->unpacked_lun,
dev->dev_reserved_node_acl->initiatorname);
- printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
- " from %s \n", SE_LUN(cmd)->unpacked_lun,
+ pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
+ " from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -192,9 +192,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
dev->dev_res_bin_isid = sess->sess_bin_isid;
dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
}
- printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
- " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
- SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+ " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -215,15 +215,15 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
struct se_session *se_sess = cmd->se_sess;
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
- struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
- unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
- int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
+ struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
+ unsigned char *cdb = &cmd->t_task_cdb[0];
+ int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
int conflict = 0;
- if (!(se_sess))
+ if (!se_sess)
return 0;
- if (!(crh))
+ if (!crh)
goto after_crh;
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -280,7 +280,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
}
if (conflict) {
- printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
+ pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -307,7 +307,7 @@ static int core_scsi3_pr_seq_non_holder(
u32 pr_reg_type)
{
struct se_dev_entry *se_deve;
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
int other_cdb = 0, ignore_reg;
int registered_nexus = 0, ret = 1; /* Conflict by default */
int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
@@ -362,7 +362,7 @@ static int core_scsi3_pr_seq_non_holder(
registered_nexus = 1;
break;
default:
- return -1;
+ return -EINVAL;
}
/*
* Referenced from spc4r17 table 45 for *NON* PR holder access
@@ -412,9 +412,9 @@ static int core_scsi3_pr_seq_non_holder(
ret = (registered_nexus) ? 0 : 1;
break;
default:
- printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- return -1;
+ return -EINVAL;
}
break;
case RELEASE:
@@ -459,9 +459,9 @@ static int core_scsi3_pr_seq_non_holder(
ret = 0; /* Allowed */
break;
default:
- printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
+ pr_err("Unknown MI Service Action: 0x%02x\n",
(cdb[1] & 0x1f));
- return -1;
+ return -EINVAL;
}
break;
case ACCESS_CONTROL_IN:
@@ -481,9 +481,9 @@ static int core_scsi3_pr_seq_non_holder(
* Case where the CDB is explicitly allowed in the above switch
* statement.
*/
- if (!(ret) && !(other_cdb)) {
+ if (!ret && !other_cdb) {
#if 0
- printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
+ pr_debug("Allowing explict CDB: 0x%02x for %s"
" reservation holder\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
#endif
@@ -498,7 +498,7 @@ static int core_scsi3_pr_seq_non_holder(
/*
* Conflict for write exclusive
*/
- printk(KERN_INFO "%s Conflict for unregistered nexus"
+ pr_debug("%s Conflict for unregistered nexus"
" %s CDB: 0x%02x to %s reservation\n",
transport_dump_cmd_direction(cmd),
se_sess->se_node_acl->initiatorname, cdb[0],
@@ -515,8 +515,8 @@ static int core_scsi3_pr_seq_non_holder(
* nexuses to issue CDBs.
*/
#if 0
- if (!(registered_nexus)) {
- printk(KERN_INFO "Allowing implict CDB: 0x%02x"
+ if (!registered_nexus) {
+ pr_debug("Allowing implict CDB: 0x%02x"
" for %s reservation on unregistered"
" nexus\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -531,14 +531,14 @@ static int core_scsi3_pr_seq_non_holder(
* allow commands from registered nexuses.
*/
#if 0
- printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
+ pr_debug("Allowing implict CDB: 0x%02x for %s"
" reservation\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
#endif
return 0;
}
}
- printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+ pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
" for %s reservation\n", transport_dump_cmd_direction(cmd),
(registered_nexus) ? "" : "un",
se_sess->se_node_acl->initiatorname, cdb[0],
@@ -549,7 +549,7 @@ static int core_scsi3_pr_seq_non_holder(
static u32 core_scsi3_pr_generation(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
u32 prg;
/*
* PRGeneration field shall contain the value of a 32-bit wrapping
@@ -561,7 +561,7 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
* See spc4r17 section 6.3.12 READ_KEYS service action
*/
spin_lock(&dev->dev_reservation_lock);
- prg = T10_RES(su_dev)->pr_generation++;
+ prg = su_dev->t10_pr.pr_generation++;
spin_unlock(&dev->dev_reservation_lock);
return prg;
@@ -575,7 +575,7 @@ static int core_scsi3_pr_reservation_check(
struct se_session *sess = cmd->se_sess;
int ret;
- if (!(sess))
+ if (!sess)
return 0;
/*
* A legacy SPC-2 reservation is being held.
@@ -584,7 +584,7 @@ static int core_scsi3_pr_reservation_check(
return core_scsi2_reservation_check(cmd, pr_reg_type);
spin_lock(&dev->dev_reservation_lock);
- if (!(dev->dev_pr_res_holder)) {
+ if (!dev->dev_pr_res_holder) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
@@ -592,14 +592,14 @@ static int core_scsi3_pr_reservation_check(
cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
spin_unlock(&dev->dev_reservation_lock);
- return -1;
+ return -EINVAL;
}
- if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
+ if (!dev->dev_pr_res_holder->isid_present_at_reg) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
- sess->sess_bin_isid) ? 0 : -1;
+ sess->sess_bin_isid) ? 0 : -EINVAL;
/*
* Use bit in *pr_reg_type to notify ISID mismatch in
* core_scsi3_pr_seq_non_holder().
@@ -620,19 +620,19 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
int all_tg_pt,
int aptpl)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
- if (!(pr_reg)) {
- printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+ if (!pr_reg) {
+ pr_err("Unable to allocate struct t10_pr_registration\n");
return NULL;
}
- pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
+ pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
GFP_ATOMIC);
- if (!(pr_reg->pr_aptpl_buf)) {
- printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
+ if (!pr_reg->pr_aptpl_buf) {
+ pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
kmem_cache_free(t10_pr_reg_cache, pr_reg);
return NULL;
}
@@ -692,12 +692,12 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
*/
pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
sa_res_key, all_tg_pt, aptpl);
- if (!(pr_reg))
+ if (!pr_reg)
return NULL;
/*
* Return pointer to pr_reg for ALL_TG_PT=0
*/
- if (!(all_tg_pt))
+ if (!all_tg_pt)
return pr_reg;
/*
* Create list of matching SCSI Initiator Port registrations
@@ -717,7 +717,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* that have not been make explict via a ConfigFS
* MappedLUN group for the SCSI Initiator Node ACL.
*/
- if (!(deve_tmp->se_lun_acl))
+ if (!deve_tmp->se_lun_acl)
continue;
nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
@@ -751,7 +751,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
*/
ret = core_scsi3_lunacl_depend_item(deve_tmp);
if (ret < 0) {
- printk(KERN_ERR "core_scsi3_lunacl_depend"
+ pr_err("core_scsi3_lunacl_depend"
"_item() failed\n");
atomic_dec(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic_dec();
@@ -769,7 +769,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, deve_tmp, NULL,
sa_res_key, all_tg_pt, aptpl);
- if (!(pr_reg_atp)) {
+ if (!pr_reg_atp) {
atomic_dec(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic_dec();
atomic_dec(&deve_tmp->pr_ref_count);
@@ -803,7 +803,7 @@ out:
}
int core_scsi3_alloc_aptpl_registration(
- struct t10_reservation_template *pr_tmpl,
+ struct t10_reservation *pr_tmpl,
u64 sa_res_key,
unsigned char *i_port,
unsigned char *isid,
@@ -817,15 +817,15 @@ int core_scsi3_alloc_aptpl_registration(
{
struct t10_pr_registration *pr_reg;
- if (!(i_port) || !(t_port) || !(sa_res_key)) {
- printk(KERN_ERR "Illegal parameters for APTPL registration\n");
- return -1;
+ if (!i_port || !t_port || !sa_res_key) {
+ pr_err("Illegal parameters for APTPL registration\n");
+ return -EINVAL;
}
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
- if (!(pr_reg)) {
- printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
- return -1;
+ if (!pr_reg) {
+ pr_err("Unable to allocate struct t10_pr_registration\n");
+ return -ENOMEM;
}
pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
@@ -869,7 +869,7 @@ int core_scsi3_alloc_aptpl_registration(
pr_reg->pr_res_holder = res_holder;
list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
- printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
+ pr_debug("SPC-3 PR APTPL Successfully added registration%s from"
" metadata\n", (res_holder) ? "+reservation" : "");
return 0;
}
@@ -891,13 +891,13 @@ static void core_scsi3_aptpl_reserve(
dev->dev_pr_res_holder = pr_reg;
spin_unlock(&dev->dev_reservation_lock);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+ pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
- TPG_TFO(tpg)->get_fabric_name(),
+ tpg->se_tpg_tfo->get_fabric_name(),
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
- TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
+ pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
}
@@ -913,7 +913,7 @@ static int __core_scsi3_check_aptpl_registration(
struct se_dev_entry *deve)
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
u16 tpgt;
@@ -925,8 +925,8 @@ static int __core_scsi3_check_aptpl_registration(
*/
snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
- TPG_TFO(tpg)->tpg_get_wwn(tpg));
- tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+ tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
/*
* Look for the matching registrations+reservation from those
* created from APTPL metadata. Note that multiple registrations
@@ -936,7 +936,7 @@ static int __core_scsi3_check_aptpl_registration(
spin_lock(&pr_tmpl->aptpl_reg_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
pr_reg_aptpl_list) {
- if (!(strcmp(pr_reg->pr_iport, i_port)) &&
+ if (!strcmp(pr_reg->pr_iport, i_port) &&
(pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
!(strcmp(pr_reg->pr_tport, t_port)) &&
(pr_reg->pr_reg_tpgt == tpgt) &&
@@ -980,11 +980,11 @@ int core_scsi3_check_aptpl_registration(
struct se_lun *lun,
struct se_lun_acl *lun_acl)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_node_acl *nacl = lun_acl->se_lun_nacl;
struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
@@ -1006,19 +1006,19 @@ static void __core_scsi3_dump_registration(
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+ pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
" Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
"_AND_MOVE" : (register_type == 1) ?
"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
(prf_isid) ? i_buf : "");
- printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+ pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
tfo->tpg_get_tag(se_tpg));
- printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
" Port(s)\n", tfo->get_fabric_name(),
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
- TRANSPORT(dev)->name);
- printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ dev->transport->name);
+ pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
" 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
pr_reg->pr_res_key, pr_reg->pr_res_generation,
pr_reg->pr_reg_aptpl);
@@ -1035,10 +1035,10 @@ static void __core_scsi3_add_registration(
int register_type,
int register_move)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
/*
* Increment PRgeneration counter for struct se_device upon a successful
@@ -1050,7 +1050,7 @@ static void __core_scsi3_add_registration(
* for the REGISTER.
*/
pr_reg->pr_res_generation = (register_move) ?
- T10_RES(su_dev)->pr_generation++ :
+ su_dev->t10_pr.pr_generation++ :
core_scsi3_pr_generation(dev);
spin_lock(&pr_tmpl->registration_lock);
@@ -1062,7 +1062,7 @@ static void __core_scsi3_add_registration(
/*
* Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
*/
- if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
+ if (!pr_reg->pr_reg_all_tg_pt || register_move)
return;
/*
* Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
@@ -1106,8 +1106,8 @@ static int core_scsi3_alloc_registration(
pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
sa_res_key, all_tg_pt, aptpl);
- if (!(pr_reg))
- return -1;
+ if (!pr_reg)
+ return -EPERM;
__core_scsi3_add_registration(dev, nacl, pr_reg,
register_type, register_move);
@@ -1119,7 +1119,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
struct se_node_acl *nacl,
unsigned char *isid)
{
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct se_portal_group *tpg;
@@ -1137,14 +1137,14 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
* If this registration does NOT contain a fabric provided
* ISID, then we have found a match.
*/
- if (!(pr_reg->isid_present_at_reg)) {
+ if (!pr_reg->isid_present_at_reg) {
/*
* Determine if this SCSI device server requires that
* SCSI Intiatior TransportID w/ ISIDs is enforced
* for fabric modules (iSCSI) requiring them.
*/
- if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
- if (DEV_ATTRIB(dev)->enforce_pr_isids)
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
+ if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
continue;
}
atomic_inc(&pr_reg->pr_res_holders);
@@ -1157,7 +1157,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
* SCSI Initiator Port TransportIDs, then we expect a valid
* matching ISID to be provided by the local SCSI Initiator Port.
*/
- if (!(isid))
+ if (!isid)
continue;
if (strcmp(isid, pr_reg->pr_reg_isid))
continue;
@@ -1180,9 +1180,9 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
struct se_portal_group *tpg = nacl->se_tpg;
unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
- if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
memset(&buf[0], 0, PR_REG_ISID_LEN);
- TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
+ tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
PR_REG_ISID_LEN);
isid_ptr = &buf[0];
}
@@ -1206,7 +1206,7 @@ static int core_scsi3_check_implict_release(
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if (!(pr_res_holder)) {
+ if (!pr_res_holder) {
spin_unlock(&dev->dev_reservation_lock);
return ret;
}
@@ -1236,11 +1236,11 @@ static int core_scsi3_check_implict_release(
(!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
pr_reg->pr_reg_nacl->initiatorname)) &&
(pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
- printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
+ pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1"
" UNREGISTER while existing reservation with matching"
" key 0x%016Lx is present from another SCSI Initiator"
" Port\n", pr_reg->pr_res_key);
- ret = -1;
+ ret = -EPERM;
}
spin_unlock(&dev->dev_reservation_lock);
@@ -1248,7 +1248,7 @@ static int core_scsi3_check_implict_release(
}
/*
- * Called with struct t10_reservation_template->registration_lock held.
+ * Called with struct t10_reservation->registration_lock held.
*/
static void __core_scsi3_free_registration(
struct se_device *dev,
@@ -1258,7 +1258,7 @@ static void __core_scsi3_free_registration(
{
struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
@@ -1283,25 +1283,25 @@ static void __core_scsi3_free_registration(
*/
while (atomic_read(&pr_reg->pr_res_holders) != 0) {
spin_unlock(&pr_tmpl->registration_lock);
- printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
+ pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
tfo->get_fabric_name());
cpu_relax();
spin_lock(&pr_tmpl->registration_lock);
}
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+ pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
" Node: %s%s\n", tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
- printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
" Port(s)\n", tfo->get_fabric_name(),
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
- TRANSPORT(dev)->name);
- printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ dev->transport->name);
+ pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
" 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
pr_reg->pr_res_generation);
- if (!(preempt_and_abort_list)) {
+ if (!preempt_and_abort_list) {
pr_reg->pr_reg_deve = NULL;
pr_reg->pr_reg_nacl = NULL;
kfree(pr_reg->pr_aptpl_buf);
@@ -1319,7 +1319,7 @@ void core_scsi3_free_pr_reg_from_nacl(
struct se_device *dev,
struct se_node_acl *nacl)
{
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
/*
* If the passed se_node_acl matches the reservation holder,
@@ -1349,7 +1349,7 @@ void core_scsi3_free_pr_reg_from_nacl(
void core_scsi3_free_all_registrations(
struct se_device *dev)
{
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
spin_lock(&dev->dev_reservation_lock);
@@ -1381,13 +1381,13 @@ void core_scsi3_free_all_registrations(
static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
{
- return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&tpg->tpg_group.cg_item);
}
static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
{
- configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&tpg->tpg_group.cg_item);
atomic_dec(&tpg->tpg_pr_ref_count);
@@ -1401,7 +1401,7 @@ static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
if (nacl->dynamic_node_acl)
return 0;
- return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&nacl->acl_group.cg_item);
}
@@ -1415,7 +1415,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
return;
}
- configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&nacl->acl_group.cg_item);
atomic_dec(&nacl->acl_pr_ref_count);
@@ -1430,13 +1430,13 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
/*
* For nacl->dynamic_node_acl=1
*/
- if (!(lun_acl))
+ if (!lun_acl)
return 0;
nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg;
- return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&lun_acl->se_lun_group.cg_item);
}
@@ -1448,7 +1448,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
/*
* For nacl->dynamic_node_acl=1
*/
- if (!(lun_acl)) {
+ if (!lun_acl) {
atomic_dec(&se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
return;
@@ -1456,7 +1456,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg;
- configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&lun_acl->se_lun_group.cg_item);
atomic_dec(&se_deve->pr_ref_count);
@@ -1471,10 +1471,10 @@ static int core_scsi3_decode_spec_i_port(
int all_tg_pt,
int aptpl)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_port *tmp_port;
struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
struct se_node_acl *dest_node_acl = NULL;
struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port(
struct list_head tid_dest_list;
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
struct target_core_fabric_ops *tmp_tf_ops;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tpdl, tid_len = 0;
@@ -1500,8 +1500,8 @@ static int core_scsi3_decode_spec_i_port(
* processing in the loop of tid_dest_list below.
*/
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
- if (!(tidh_new)) {
- printk(KERN_ERR "Unable to allocate tidh_new\n");
+ if (!tidh_new) {
+ pr_err("Unable to allocate tidh_new\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1509,10 +1509,10 @@ static int core_scsi3_decode_spec_i_port(
tidh_new->dest_node_acl = se_sess->se_node_acl;
tidh_new->dest_se_deve = local_se_deve;
- local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, local_se_deve, l_isid,
sa_res_key, all_tg_pt, aptpl);
- if (!(local_pr_reg)) {
+ if (!local_pr_reg) {
kfree(tidh_new);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -1524,6 +1524,8 @@ static int core_scsi3_decode_spec_i_port(
*/
tidh_new->dest_local_nexus = 1;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+ buf = transport_kmap_first_data_page(cmd);
/*
* For a PERSISTENT RESERVE OUT specify initiator ports payload,
* first extract TransportID Parameter Data Length, and make sure
@@ -1535,7 +1537,7 @@ static int core_scsi3_decode_spec_i_port(
tpdl |= buf[27] & 0xff;
if ((tpdl + 28) != cmd->data_length) {
- printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+ pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -1555,13 +1557,13 @@ static int core_scsi3_decode_spec_i_port(
spin_lock(&dev->se_port_lock);
list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
tmp_tpg = tmp_port->sep_tpg;
- if (!(tmp_tpg))
+ if (!tmp_tpg)
continue;
- tmp_tf_ops = TPG_TFO(tmp_tpg);
- if (!(tmp_tf_ops))
+ tmp_tf_ops = tmp_tpg->se_tpg_tfo;
+ if (!tmp_tf_ops)
continue;
- if (!(tmp_tf_ops->get_fabric_proto_ident) ||
- !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
+ if (!tmp_tf_ops->get_fabric_proto_ident ||
+ !tmp_tf_ops->tpg_parse_pr_out_transport_id)
continue;
/*
* Look for the matching proto_ident provided by
@@ -1575,7 +1577,7 @@ static int core_scsi3_decode_spec_i_port(
i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
tmp_tpg, (const char *)ptr, &tid_len,
&iport_ptr);
- if (!(i_str))
+ if (!i_str)
continue;
atomic_inc(&tmp_tpg->tpg_pr_ref_count);
@@ -1584,7 +1586,7 @@ static int core_scsi3_decode_spec_i_port(
ret = core_scsi3_tpg_depend_item(tmp_tpg);
if (ret != 0) {
- printk(KERN_ERR " core_scsi3_tpg_depend_item()"
+ pr_err(" core_scsi3_tpg_depend_item()"
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -1605,7 +1607,7 @@ static int core_scsi3_decode_spec_i_port(
}
spin_unlock_bh(&tmp_tpg->acl_node_lock);
- if (!(dest_node_acl)) {
+ if (!dest_node_acl) {
core_scsi3_tpg_undepend_item(tmp_tpg);
spin_lock(&dev->se_port_lock);
continue;
@@ -1613,7 +1615,7 @@ static int core_scsi3_decode_spec_i_port(
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
if (ret != 0) {
- printk(KERN_ERR "configfs_depend_item() failed"
+ pr_err("configfs_depend_item() failed"
" for dest_node_acl->acl_group\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -1623,9 +1625,9 @@ static int core_scsi3_decode_spec_i_port(
}
dest_tpg = tmp_tpg;
- printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
+ pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
" %s Port RTPI: %hu\n",
- TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, dest_rtpi);
spin_lock(&dev->se_port_lock);
@@ -1633,20 +1635,20 @@ static int core_scsi3_decode_spec_i_port(
}
spin_unlock(&dev->se_port_lock);
- if (!(dest_tpg)) {
- printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
+ if (!dest_tpg) {
+ pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
#if 0
- printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+ pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
" tid_len: %d for %s + %s\n",
- TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
+ dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
tpdl, tid_len, i_str, iport_ptr);
#endif
if (tid_len > tpdl) {
- printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+ pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1660,10 +1662,10 @@ static int core_scsi3_decode_spec_i_port(
*/
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
dest_rtpi);
- if (!(dest_se_deve)) {
- printk(KERN_ERR "Unable to locate %s dest_se_deve"
+ if (!dest_se_deve) {
+ pr_err("Unable to locate %s dest_se_deve"
" from destination RTPI: %hu\n",
- TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_rtpi);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
@@ -1674,7 +1676,7 @@ static int core_scsi3_decode_spec_i_port(
ret = core_scsi3_lunacl_depend_item(dest_se_deve);
if (ret < 0) {
- printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
+ pr_err("core_scsi3_lunacl_depend_item()"
" failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
@@ -1684,9 +1686,9 @@ static int core_scsi3_decode_spec_i_port(
goto out;
}
#if 0
- printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+ pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
" dest_se_deve mapped_lun: %u\n",
- TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
#endif
/*
@@ -1712,8 +1714,8 @@ static int core_scsi3_decode_spec_i_port(
*/
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
GFP_KERNEL);
- if (!(tidh_new)) {
- printk(KERN_ERR "Unable to allocate tidh_new\n");
+ if (!tidh_new) {
+ pr_err("Unable to allocate tidh_new\n");
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1741,10 +1743,10 @@ static int core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
- dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_se_deve, iport_ptr,
sa_res_key, all_tg_pt, aptpl);
- if (!(dest_pr_reg)) {
+ if (!dest_pr_reg) {
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1760,6 +1762,9 @@ static int core_scsi3_decode_spec_i_port(
tid_len = 0;
}
+
+ transport_kunmap_first_data_page(cmd);
+
/*
* Go ahead and create a registrations from tid_dest_list for the
* SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
@@ -1787,12 +1792,12 @@ static int core_scsi3_decode_spec_i_port(
prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
+ __core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
dest_pr_reg, 0, 0);
- printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
+ pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
" registered Transport ID for Node: %s%s Mapped LUN:"
- " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
+ " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, (prf_isid) ?
&i_buf[0] : "", dest_se_deve->mapped_lun);
@@ -1806,6 +1811,7 @@ static int core_scsi3_decode_spec_i_port(
return 0;
out:
+ transport_kunmap_first_data_page(cmd);
/*
* For the failure case, release everything from tid_dest_list
* including *dest_pr_reg and the configfs dependances..
@@ -1855,7 +1861,7 @@ static int __core_scsi3_update_aptpl_buf(
{
struct se_lun *lun;
struct se_portal_group *tpg;
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
unsigned char tmp[512], isid_buf[32];
ssize_t len = 0;
@@ -1873,8 +1879,8 @@ static int __core_scsi3_update_aptpl_buf(
/*
* Walk the registration list..
*/
- spin_lock(&T10_RES(su_dev)->registration_lock);
- list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ spin_lock(&su_dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
tmp[0] = '\0';
@@ -1900,7 +1906,7 @@ static int __core_scsi3_update_aptpl_buf(
"res_holder=1\nres_type=%02x\n"
"res_scope=%02x\nres_all_tg_pt=%d\n"
"mapped_lun=%u\n", reg_count,
- TPG_TFO(tpg)->get_fabric_name(),
+ tpg->se_tpg_tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_res_type,
pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
@@ -1910,17 +1916,17 @@ static int __core_scsi3_update_aptpl_buf(
"initiator_fabric=%s\ninitiator_node=%s\n%s"
"sa_res_key=%llu\nres_holder=0\n"
"res_all_tg_pt=%d\nmapped_lun=%u\n",
- reg_count, TPG_TFO(tpg)->get_fabric_name(),
+ reg_count, tpg->se_tpg_tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
pr_reg->pr_res_mapped_lun);
}
- if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
- printk(KERN_ERR "Unable to update renaming"
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+ pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&T10_RES(su_dev)->registration_lock);
- return -1;
+ spin_unlock(&su_dev->t10_pr.registration_lock);
+ return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
@@ -1929,23 +1935,23 @@ static int __core_scsi3_update_aptpl_buf(
*/
snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
"tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
- " %d\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ " %d\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
- if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
- printk(KERN_ERR "Unable to update renaming"
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+ pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&T10_RES(su_dev)->registration_lock);
- return -1;
+ spin_unlock(&su_dev->t10_pr.registration_lock);
+ return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
reg_count++;
}
- spin_unlock(&T10_RES(su_dev)->registration_lock);
+ spin_unlock(&su_dev->t10_pr.registration_lock);
- if (!(reg_count))
+ if (!reg_count)
len += sprintf(buf+len, "No Registrations or Reservations");
return 0;
@@ -1975,7 +1981,7 @@ static int __core_scsi3_write_aptpl_to_file(
unsigned char *buf,
u32 pr_aptpl_buf_len)
{
- struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
+ struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
struct file *file;
struct iovec iov[1];
mm_segment_t old_fs;
@@ -1986,22 +1992,22 @@ static int __core_scsi3_write_aptpl_to_file(
memset(iov, 0, sizeof(struct iovec));
memset(path, 0, 512);
- if (strlen(&wwn->unit_serial[0]) > 512) {
- printk(KERN_ERR "WWN value for struct se_device does not fit"
+ if (strlen(&wwn->unit_serial[0]) >= 512) {
+ pr_err("WWN value for struct se_device does not fit"
" into path buffer\n");
- return -1;
+ return -EMSGSIZE;
}
snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
- printk(KERN_ERR "filp_open(%s) for APTPL metadata"
+ pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
- return -1;
+ return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
}
iov[0].iov_base = &buf[0];
- if (!(pr_aptpl_buf_len))
+ if (!pr_aptpl_buf_len)
iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
else
iov[0].iov_len = pr_aptpl_buf_len;
@@ -2012,9 +2018,9 @@ static int __core_scsi3_write_aptpl_to_file(
set_fs(old_fs);
if (ret < 0) {
- printk("Error writing APTPL metadata file: %s\n", path);
+ pr_debug("Error writing APTPL metadata file: %s\n", path);
filp_close(file, NULL);
- return -1;
+ return -EIO;
}
filp_close(file, NULL);
@@ -2032,7 +2038,7 @@ static int core_scsi3_update_and_write_aptpl(
/*
* Can be called with a NULL pointer from PROUT service action CLEAR
*/
- if (!(in_buf)) {
+ if (!in_buf) {
memset(null_buf, 0, 64);
buf = &null_buf[0];
/*
@@ -2049,14 +2055,14 @@ static int core_scsi3_update_and_write_aptpl(
ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
clear_aptpl_metadata);
if (ret != 0)
- return -1;
+ return ret;
/*
* __core_scsi3_write_aptpl_to_file() will call strlen()
* on the passed buf to determine pr_aptpl_buf_len.
*/
ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
if (ret != 0)
- return -1;
+ return ret;
return ret;
}
@@ -2070,28 +2076,28 @@ static int core_scsi3_emulate_pro_register(
int spec_i_pt,
int ignore_key)
{
- struct se_session *se_sess = SE_SESS(cmd);
- struct se_device *dev = SE_DEV(cmd);
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve;
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
/* Used for APTPL metadata w/ UNREGISTER */
unsigned char *pr_aptpl_buf = NULL;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
int pr_holder = 0, ret = 0, type;
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
- if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
- TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
+ se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
PR_REG_ISID_LEN);
isid_ptr = &isid_buf[0];
}
@@ -2099,30 +2105,30 @@ static int core_scsi3_emulate_pro_register(
* Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
*/
pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
- if (!(pr_reg_e)) {
+ if (!pr_reg_e) {
if (res_key) {
- printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
+ pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
}
/*
* Do nothing but return GOOD status.
*/
- if (!(sa_res_key))
+ if (!sa_res_key)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
- if (!(spec_i_pt)) {
+ if (!spec_i_pt) {
/*
* Perform the Service Action REGISTER on the Initiator
* Port Endpoint that the PRO was received from on the
* Logical Unit of the SCSI device server.
*/
- ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ ret = core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, se_deve, isid_ptr,
sa_res_key, all_tg_pt, aptpl,
ignore_key, 0);
if (ret != 0) {
- printk(KERN_ERR "Unable to allocate"
+ pr_err("Unable to allocate"
" struct t10_pr_registration\n");
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -2143,10 +2149,10 @@ static int core_scsi3_emulate_pro_register(
/*
* Nothing left to do for the APTPL=0 case.
*/
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
- printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
" REGISTER\n");
return 0;
}
@@ -2155,15 +2161,15 @@ static int core_scsi3_emulate_pro_register(
* update the APTPL metadata information using its
* preallocated *pr_reg->pr_aptpl_buf.
*/
- pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
se_sess->se_node_acl, se_sess);
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret)) {
+ if (!ret) {
pr_tmpl->pr_aptpl_active = 1;
- printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
}
core_scsi3_put_pr_reg(pr_reg);
@@ -2175,9 +2181,9 @@ static int core_scsi3_emulate_pro_register(
pr_reg = pr_reg_e;
type = pr_reg->pr_res_type;
- if (!(ignore_key)) {
+ if (!ignore_key) {
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ pr_err("SPC-3 PR REGISTER: Received"
" res_key: 0x%016Lx does not match"
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key,
@@ -2187,7 +2193,7 @@ static int core_scsi3_emulate_pro_register(
}
}
if (spec_i_pt) {
- printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
+ pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -2197,7 +2203,7 @@ static int core_scsi3_emulate_pro_register(
* must also set ALL_TG_PT=1 in the incoming PROUT.
*/
if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
- printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+ pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
@@ -2209,8 +2215,8 @@ static int core_scsi3_emulate_pro_register(
if (aptpl) {
pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
GFP_KERNEL);
- if (!(pr_aptpl_buf)) {
- printk(KERN_ERR "Unable to allocate"
+ if (!pr_aptpl_buf) {
+ pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -2221,9 +2227,9 @@ static int core_scsi3_emulate_pro_register(
* Nexus sa_res_key=1 Change Reservation Key for registered I_T
* Nexus.
*/
- if (!(sa_res_key)) {
+ if (!sa_res_key) {
pr_holder = core_scsi3_check_implict_release(
- SE_DEV(cmd), pr_reg);
+ cmd->se_dev, pr_reg);
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
@@ -2240,7 +2246,7 @@ static int core_scsi3_emulate_pro_register(
&pr_tmpl->registration_list,
pr_reg_list) {
- if (!(pr_reg_p->pr_reg_all_tg_pt))
+ if (!pr_reg_p->pr_reg_all_tg_pt)
continue;
if (pr_reg_p->pr_res_key != res_key)
@@ -2260,7 +2266,7 @@ static int core_scsi3_emulate_pro_register(
/*
* Release the calling I_T Nexus registration now..
*/
- __core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
+ __core_scsi3_free_registration(cmd->se_dev, pr_reg,
NULL, 1);
/*
* From spc4r17, section 5.7.11.3 Unregistering
@@ -2289,10 +2295,10 @@ static int core_scsi3_emulate_pro_register(
}
spin_unlock(&pr_tmpl->registration_lock);
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(dev, NULL, 0);
- printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
" for UNREGISTER\n");
return 0;
}
@@ -2300,9 +2306,9 @@ static int core_scsi3_emulate_pro_register(
ret = core_scsi3_update_and_write_aptpl(dev,
&pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret)) {
+ if (!ret) {
pr_tmpl->pr_aptpl_active = 1;
- printk("SPC-3 PR: Set APTPL Bit Activated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
" for UNREGISTER\n");
}
@@ -2315,20 +2321,20 @@ static int core_scsi3_emulate_pro_register(
* READ_KEYS service action.
*/
pr_reg->pr_res_generation = core_scsi3_pr_generation(
- SE_DEV(cmd));
+ cmd->se_dev);
pr_reg->pr_res_key = sa_res_key;
- printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+ pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
" Key for %s to: 0x%016Lx PRgeneration:"
- " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
+ " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
(ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
pr_reg->pr_reg_nacl->initiatorname,
pr_reg->pr_res_key, pr_reg->pr_res_generation);
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(dev, NULL, 0);
core_scsi3_put_pr_reg(pr_reg);
- printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
" for REGISTER\n");
return 0;
}
@@ -2336,9 +2342,9 @@ static int core_scsi3_emulate_pro_register(
ret = core_scsi3_update_and_write_aptpl(dev,
&pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret)) {
+ if (!ret) {
pr_tmpl->pr_aptpl_active = 1;
- printk("SPC-3 PR: Set APTPL Bit Activated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
" for REGISTER\n");
}
@@ -2378,19 +2384,19 @@ static int core_scsi3_pro_reserve(
int scope,
u64 res_key)
{
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
struct se_dev_entry *se_deve;
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_res_holder;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
int ret, prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
se_tpg = se_sess->se_tpg;
@@ -2398,10 +2404,10 @@ static int core_scsi3_pro_reserve(
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
- pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
- if (!(pr_reg)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2415,7 +2421,7 @@ static int core_scsi3_pro_reserve(
* registered with the logical unit for the I_T nexus; and
*/
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+ pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
@@ -2432,7 +2438,7 @@ static int core_scsi3_pro_reserve(
* and that persistent reservation has a scope of LU_SCOPE.
*/
if (scope != PR_SCOPE_LU_SCOPE) {
- printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -2456,12 +2462,12 @@ static int core_scsi3_pro_reserve(
*/
if (pr_res_holder != pr_reg) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
- printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2478,13 +2484,13 @@ static int core_scsi3_pro_reserve(
if ((pr_res_holder->pr_res_type != type) ||
(pr_res_holder->pr_res_scope != scope)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
- printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s trying to change TYPE and/or SCOPE,"
" while reservation already held by [%s]: %s,"
" returning RESERVATION_CONFLICT\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2516,22 +2522,22 @@ static int core_scsi3_pro_reserve(
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
+ pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
+ cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL metadata"
" for RESERVE\n");
}
@@ -2558,7 +2564,7 @@ static int core_scsi3_emulate_pro_reserve(
ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
break;
default:
- printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
+ pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -2587,12 +2593,12 @@ static void __core_scsi3_complete_pro_release(
*/
dev->dev_pr_res_holder = NULL;
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+ pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
tfo->get_fabric_name(), (explict) ? "explict" : "implict",
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
+ pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
tfo->get_fabric_name(), se_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
/*
@@ -2608,22 +2614,22 @@ static int core_scsi3_emulate_pro_release(
u64 res_key)
{
struct se_device *dev = cmd->se_dev;
- struct se_session *se_sess = SE_SESS(cmd);
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
int ret, all_reg = 0;
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
- if (!(pr_reg)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2641,7 +2647,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if (!(pr_res_holder)) {
+ if (!pr_res_holder) {
/*
* No persistent reservation, return GOOD status.
*/
@@ -2678,7 +2684,7 @@ static int core_scsi3_emulate_pro_release(
* that is registered with the logical unit for the I_T nexus;
*/
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+ pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
@@ -2694,13 +2700,13 @@ static int core_scsi3_emulate_pro_release(
if ((pr_res_holder->pr_res_type != type) ||
(pr_res_holder->pr_res_scope != scope)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
- printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
+ pr_err("SPC-3 PR RELEASE: Attempted to release"
" reservation from [%s]: %s with different TYPE "
"and/or SCOPE while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2758,11 +2764,11 @@ static int core_scsi3_emulate_pro_release(
write_aptpl:
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
}
core_scsi3_put_pr_reg(pr_reg);
@@ -2775,18 +2781,18 @@ static int core_scsi3_emulate_pro_clear(
{
struct se_device *dev = cmd->se_dev;
struct se_node_acl *pr_reg_nacl;
- struct se_session *se_sess = SE_SESS(cmd);
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct se_session *se_sess = cmd->se_sess;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
u32 pr_res_mapped_lun = 0;
int calling_it_nexus = 0;
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
- pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
se_sess->se_node_acl, se_sess);
- if (!(pr_reg_n)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg_n) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2802,7 +2808,7 @@ static int core_scsi3_emulate_pro_clear(
* that is registered with the logical unit for the I_T nexus.
*/
if (res_key != pr_reg_n->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ pr_err("SPC-3 PR REGISTER: Received"
" res_key: 0x%016Lx does not match"
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
@@ -2839,18 +2845,18 @@ static int core_scsi3_emulate_pro_clear(
* command with CLEAR service action was received, with the
* additional sense code set to RESERVATIONS PREEMPTED.
*/
- if (!(calling_it_nexus))
+ if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
}
spin_unlock(&pr_tmpl->registration_lock);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
- CMD_TFO(cmd)->get_fabric_name());
+ pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
+ cmd->se_tfo->get_fabric_name());
if (pr_tmpl->pr_aptpl_active) {
- core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
- printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+ pr_debug("SPC-3 PR: Updated APTPL metadata"
" for CLEAR\n");
}
@@ -2889,12 +2895,12 @@ static void __core_scsi3_complete_pro_preempt(
pr_reg->pr_res_type = type;
pr_reg->pr_res_scope = scope;
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+ pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+ pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
/*
@@ -2920,7 +2926,7 @@ static void core_scsi3_release_preempt_and_abort(
if (pr_reg_holder == pr_reg)
continue;
if (pr_reg->pr_res_holder) {
- printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
+ pr_warn("pr_reg->pr_res_holder still set\n");
continue;
}
@@ -2954,25 +2960,25 @@ static int core_scsi3_pro_preempt(
u64 sa_res_key,
int abort)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve;
struct se_node_acl *pr_reg_nacl;
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
struct list_head preempt_and_abort_list;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
u32 pr_res_mapped_lun = 0;
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!(se_sess))
+ if (!se_sess)
return PYX_TRANSPORT_LU_COMM_FAILURE;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
- pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
- if (!(pr_reg_n)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg_n) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -2982,7 +2988,7 @@ static int core_scsi3_pro_preempt(
return PYX_TRANSPORT_RESERVATION_CONFLICT;
}
if (scope != PR_SCOPE_LU_SCOPE) {
- printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -2995,7 +3001,7 @@ static int core_scsi3_pro_preempt(
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
all_reg = 1;
- if (!(all_reg) && !(sa_res_key)) {
+ if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3009,7 +3015,7 @@ static int core_scsi3_pro_preempt(
* server shall perform a preempt by doing the following in an
* uninterrupted series of actions. (See below..)
*/
- if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
+ if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) {
/*
* No existing or SA Reservation Key matching reservations..
*
@@ -3036,7 +3042,7 @@ static int core_scsi3_pro_preempt(
* was received, with the additional sense code set
* to REGISTRATIONS PREEMPTED.
*/
- if (!(all_reg)) {
+ if (!all_reg) {
if (pr_reg->pr_res_key != sa_res_key)
continue;
@@ -3076,7 +3082,7 @@ static int core_scsi3_pro_preempt(
NULL, 0);
released_regs++;
}
- if (!(calling_it_nexus))
+ if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl,
pr_res_mapped_lun, 0x2A,
ASCQ_2AH_RESERVATIONS_PREEMPTED);
@@ -3089,7 +3095,7 @@ static int core_scsi3_pro_preempt(
* registered reservation key, then the device server shall
* complete the command with RESERVATION CONFLICT status.
*/
- if (!(released_regs)) {
+ if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3111,17 +3117,17 @@ static int core_scsi3_pro_preempt(
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk(KERN_INFO "SPC-3 PR: Updated APTPL"
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL"
" metadata for PREEMPT%s\n", (abort) ?
"_AND_ABORT" : "");
}
core_scsi3_put_pr_reg(pr_reg_n);
- core_scsi3_pr_generation(SE_DEV(cmd));
+ core_scsi3_pr_generation(cmd->se_dev);
return 0;
}
/*
@@ -3247,16 +3253,16 @@ static int core_scsi3_pro_preempt(
}
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
"%s\n", (abort) ? "_AND_ABORT" : "");
}
core_scsi3_put_pr_reg(pr_reg_n);
- core_scsi3_pr_generation(SE_DEV(cmd));
+ core_scsi3_pr_generation(cmd->se_dev);
return 0;
}
@@ -3281,7 +3287,7 @@ static int core_scsi3_emulate_pro_preempt(
res_key, sa_res_key, abort);
break;
default:
- printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
+ pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -3297,17 +3303,17 @@ static int core_scsi3_emulate_pro_register_and_move(
int aptpl,
int unreg)
{
- struct se_session *se_sess = SE_SESS(cmd);
- struct se_device *dev = SE_DEV(cmd);
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve, *dest_se_deve = NULL;
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_lun *se_lun = cmd->se_lun;
struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
struct se_port *se_port;
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ unsigned char *buf;
unsigned char *initiator_str;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
@@ -3315,14 +3321,14 @@ static int core_scsi3_emulate_pro_register_and_move(
unsigned short rtpi;
unsigned char proto_ident;
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
- tf_ops = TPG_TFO(se_tpg);
+ tf_ops = se_tpg->se_tpg_tfo;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Follow logic from spc4r17 Section 5.7.8, Table 50 --
@@ -3330,10 +3336,10 @@ static int core_scsi3_emulate_pro_register_and_move(
*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
- pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
- if (!(pr_reg)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
+ if (!pr_reg) {
+ pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -3342,7 +3348,7 @@ static int core_scsi3_emulate_pro_register_and_move(
* provided during this initiator's I_T nexus registration.
*/
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
@@ -3351,26 +3357,30 @@ static int core_scsi3_emulate_pro_register_and_move(
/*
* The service active reservation key needs to be non zero
*/
- if (!(sa_res_key)) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
+ if (!sa_res_key) {
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
+
/*
* Determine the Relative Target Port Identifier where the reservation
* will be moved to for the TransportID containing SCSI initiator WWN
* information.
*/
+ buf = transport_kmap_first_data_page(cmd);
rtpi = (buf[18] & 0xff) << 8;
rtpi |= buf[19] & 0xff;
tid_len = (buf[20] & 0xff) << 24;
tid_len |= (buf[21] & 0xff) << 16;
tid_len |= (buf[22] & 0xff) << 8;
tid_len |= buf[23] & 0xff;
+ transport_kunmap_first_data_page(cmd);
+ buf = NULL;
if ((tid_len + 24) != cmd->data_length) {
- printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+ pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
@@ -3382,10 +3392,10 @@ static int core_scsi3_emulate_pro_register_and_move(
if (se_port->sep_rtpi != rtpi)
continue;
dest_se_tpg = se_port->sep_tpg;
- if (!(dest_se_tpg))
+ if (!dest_se_tpg)
continue;
- dest_tf_ops = TPG_TFO(dest_se_tpg);
- if (!(dest_tf_ops))
+ dest_tf_ops = dest_se_tpg->se_tpg_tfo;
+ if (!dest_tf_ops)
continue;
atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
@@ -3394,7 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move(
ret = core_scsi3_tpg_depend_item(dest_se_tpg);
if (ret != 0) {
- printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
+ pr_err("core_scsi3_tpg_depend_item() failed"
" for dest_se_tpg\n");
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -3407,20 +3417,22 @@ static int core_scsi3_emulate_pro_register_and_move(
}
spin_unlock(&dev->se_port_lock);
- if (!(dest_se_tpg) || (!dest_tf_ops)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ if (!dest_se_tpg || !dest_tf_ops) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
+
+ buf = transport_kmap_first_data_page(cmd);
proto_ident = (buf[24] & 0x0f);
#if 0
- printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
" 0x%02x\n", proto_ident);
#endif
if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
" proto_ident: 0x%02x does not match ident: 0x%02x"
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
@@ -3429,7 +3441,7 @@ static int core_scsi3_emulate_pro_register_and_move(
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -3437,14 +3449,17 @@ static int core_scsi3_emulate_pro_register_and_move(
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
- if (!(initiator_str)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ if (!initiator_str) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
- printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+ transport_kunmap_first_data_page(cmd);
+ buf = NULL;
+
+ pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
"port" : "device", initiator_str, (iport_ptr != NULL) ?
iport_ptr : "");
@@ -3459,18 +3474,18 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_reg_nacl = pr_reg->pr_reg_nacl;
matching_iname = (!strcmp(initiator_str,
pr_reg_nacl->initiatorname)) ? 1 : 0;
- if (!(matching_iname))
+ if (!matching_iname)
goto after_iport_check;
- if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+ if (!iport_ptr || !pr_reg->isid_present_at_reg) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
- if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+ if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
@@ -3490,8 +3505,8 @@ after_iport_check:
}
spin_unlock_bh(&dest_se_tpg->acl_node_lock);
- if (!(dest_node_acl)) {
- printk(KERN_ERR "Unable to locate %s dest_node_acl for"
+ if (!dest_node_acl) {
+ pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3499,7 +3514,7 @@ after_iport_check:
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
if (ret != 0) {
- printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
+ pr_err("core_scsi3_nodeacl_depend_item() for"
" dest_node_acl\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -3508,7 +3523,7 @@ after_iport_check:
goto out;
}
#if 0
- printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
dest_node_acl->initiatorname);
#endif
@@ -3517,8 +3532,8 @@ after_iport_check:
* PORT IDENTIFIER.
*/
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
- if (!(dest_se_deve)) {
- printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
+ if (!dest_se_deve) {
+ pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
@@ -3526,7 +3541,7 @@ after_iport_check:
ret = core_scsi3_lunacl_depend_item(dest_se_deve);
if (ret < 0) {
- printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
+ pr_err("core_scsi3_lunacl_depend_item() failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
@@ -3534,7 +3549,7 @@ after_iport_check:
goto out;
}
#if 0
- printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
" ACL for dest_se_deve->mapped_lun: %u\n",
dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
dest_se_deve->mapped_lun);
@@ -3545,8 +3560,8 @@ after_iport_check:
*/
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if (!(pr_res_holder)) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
+ if (!pr_res_holder) {
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
@@ -3559,7 +3574,7 @@ after_iport_check:
* Register behaviors for a REGISTER AND MOVE service action
*/
if (pr_res_holder != pr_reg) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3576,7 +3591,7 @@ after_iport_check:
*/
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move"
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
@@ -3611,8 +3626,8 @@ after_iport_check:
*/
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
- if (!(dest_pr_reg)) {
- ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ if (!dest_pr_reg) {
+ ret = core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_se_deve, iport_ptr,
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
@@ -3644,16 +3659,16 @@ after_iport_check:
/*
* Increment PRGeneration for existing registrations..
*/
- if (!(new_reg))
+ if (!new_reg)
dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
spin_unlock(&dev->dev_reservation_lock);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+ pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
" created new reservation holder TYPE: %s on object RTPI:"
" %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
core_scsi3_pr_dump_type(type), rtpi,
dest_pr_reg->pr_res_generation);
- printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
+ pr_debug("SPC-3 PR Successfully moved reservation from"
" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
@@ -3681,24 +3696,28 @@ after_iport_check:
* Clear the APTPL metadata if APTPL has been disabled, otherwise
* write out the updated metadata to struct file for this SCSI device.
*/
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
- printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
" REGISTER_AND_MOVE\n");
} else {
pr_tmpl->pr_aptpl_active = 1;
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&dest_pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk("SPC-3 PR: Set APTPL Bit Activated for"
+ if (!ret)
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
" REGISTER_AND_MOVE\n");
}
+ transport_kunmap_first_data_page(cmd);
+
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
+ if (buf)
+ transport_kunmap_first_data_page(cmd);
if (dest_se_deve)
core_scsi3_lunacl_undepend_item(dest_se_deve);
if (dest_node_acl)
@@ -3723,7 +3742,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
*/
static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
{
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u64 res_key, sa_res_key;
int sa, scope, type, aptpl;
int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
@@ -3731,11 +3750,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!(SE_SESS(cmd)))
+ if (!cmd->se_sess)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (cmd->data_length < 24) {
- printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list"
+ pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -3745,6 +3764,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
sa = (cdb[1] & 0x1f);
scope = (cdb[2] & 0xf0);
type = (cdb[2] & 0x0f);
+
+ buf = transport_kmap_first_data_page(cmd);
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
@@ -3762,6 +3783,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
aptpl = (buf[17] & 0x01);
unreg = (buf[17] & 0x02);
}
+ transport_kunmap_first_data_page(cmd);
+ buf = NULL;
+
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
@@ -3776,9 +3800,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
* the sense key set to ILLEGAL REQUEST, and the additional sense
* code set to PARAMETER LIST LENGTH ERROR.
*/
- if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+ if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
(cmd->data_length != 24)) {
- printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter"
+ pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -3812,7 +3836,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
sa_res_key, aptpl, unreg);
default:
- printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -3827,25 +3851,26 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
*/
static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
{
- struct se_device *se_dev = SE_DEV(cmd);
- struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_device *se_dev = cmd->se_dev;
+ struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u32 add_len = 0, off = 8;
if (cmd->data_length < 8) {
- printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
+ pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
- buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
- buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
- buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+ buf = transport_kmap_first_data_page(cmd);
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
- spin_lock(&T10_RES(su_dev)->registration_lock);
- list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ spin_lock(&su_dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
/*
* Check for overflow of 8byte PRI READ_KEYS payload and
@@ -3865,13 +3890,15 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
add_len += 8;
}
- spin_unlock(&T10_RES(su_dev)->registration_lock);
+ spin_unlock(&su_dev->t10_pr.registration_lock);
buf[4] = ((add_len >> 24) & 0xff);
buf[5] = ((add_len >> 16) & 0xff);
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -3882,23 +3909,24 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
*/
static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
{
- struct se_device *se_dev = SE_DEV(cmd);
- struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_device *se_dev = cmd->se_dev;
+ struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u64 pr_res_key;
u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
if (cmd->data_length < 8) {
- printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+ pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
- buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
- buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
- buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+ buf = transport_kmap_first_data_page(cmd);
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
spin_lock(&se_dev->dev_reservation_lock);
pr_reg = se_dev->dev_pr_res_holder;
@@ -3911,10 +3939,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
- if (cmd->data_length < 22) {
- spin_unlock(&se_dev->dev_reservation_lock);
- return 0;
- }
+ if (cmd->data_length < 22)
+ goto err;
+
/*
* Set the Reservation key.
*
@@ -3951,7 +3978,10 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
buf[21] = (pr_reg->pr_res_scope & 0xf0) |
(pr_reg->pr_res_type & 0x0f);
}
+
+err:
spin_unlock(&se_dev->dev_reservation_lock);
+ transport_kunmap_first_data_page(cmd);
return 0;
}
@@ -3963,17 +3993,19 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
*/
static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ unsigned char *buf;
u16 add_len = 8; /* Hardcoded to 8. */
if (cmd->data_length < 6) {
- printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+ pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = ((add_len << 8) & 0xff);
buf[1] = (add_len & 0xff);
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
@@ -4004,6 +4036,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -4014,27 +4048,29 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
*/
static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
{
- struct se_device *se_dev = SE_DEV(cmd);
+ struct se_device *se_dev = cmd->se_dev;
struct se_node_acl *se_nacl;
- struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
+ unsigned char *buf;
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
u32 off = 8; /* off into first Full Status descriptor */
int format_code = 0;
if (cmd->data_length < 8) {
- printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+ pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
- buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
- buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
- buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+ buf = transport_kmap_first_data_page(cmd);
+
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -4051,11 +4087,11 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* Determine expected length of $FABRIC_MOD specific
* TransportID full status descriptor..
*/
- exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
+ exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len(
se_tpg, se_nacl, pr_reg, &format_code);
if ((exp_desc_len + add_len) > cmd->data_length) {
- printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
+ pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
" out of buffer: %d\n", cmd->data_length);
spin_lock(&pr_tmpl->registration_lock);
atomic_dec(&pr_reg->pr_res_holders);
@@ -4105,7 +4141,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* bit is set to one, the contents of the RELATIVE TARGET PORT
* IDENTIFIER field are not defined by this standard.
*/
- if (!(pr_reg->pr_reg_all_tg_pt)) {
+ if (!pr_reg->pr_reg_all_tg_pt) {
struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
@@ -4116,7 +4152,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
/*
* Now, have the $FABRIC_MOD fill in the protocol identifier
*/
- desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
+ desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg,
se_nacl, pr_reg, &format_code, &buf[off+4]);
spin_lock(&pr_tmpl->registration_lock);
@@ -4150,6 +4186,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -4165,7 +4203,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
case PRI_READ_FULL_STATUS:
return core_scsi3_pri_read_full_status(cmd);
default:
- printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
+ pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cdb[1] & 0x1f);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -4174,7 +4212,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
int core_scsi3_emulate_pr(struct se_cmd *cmd)
{
- unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+ unsigned char *cdb = &cmd->t_task_cdb[0];
struct se_device *dev = cmd->se_dev;
/*
* Following spc2r20 5.5.1 Reservations overview:
@@ -4186,7 +4224,7 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd)
* CONFLICT status.
*/
if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
- printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
+ pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -4213,39 +4251,39 @@ static int core_pt_seq_non_holder(
int core_setup_reservations(struct se_device *dev, int force_pt)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_reservation_template *rest = &su_dev->t10_reservation;
+ struct t10_reservation *rest = &su_dev->t10_pr;
/*
* If this device is from Target_Core_Mod/pSCSI, use the reservations
* of the Underlying SCSI hardware. In Linux/SCSI terms, this can
* cause a problem because libata and some SATA RAID HBAs appear
* under Linux/SCSI, but to emulate reservations themselves.
*/
- if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
+ if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
rest->res_type = SPC_PASSTHROUGH;
rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
- printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
- " emulation\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
+ " emulation\n", dev->transport->name);
return 0;
}
/*
* If SPC-3 or above is reported by real or emulated struct se_device,
* use emulated Persistent Reservations.
*/
- if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+ if (dev->transport->get_device_rev(dev) >= SCSI_3) {
rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
- printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
- " emulation\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
+ " emulation\n", dev->transport->name);
} else {
rest->res_type = SPC2_RESERVATIONS;
rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
rest->pr_ops.t10_seq_non_holder =
&core_scsi2_reservation_seq_non_holder;
- printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
- TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
+ dev->transport->name);
}
return 0;
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 5603bcfd86d..c8f47d06458 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int core_scsi2_emulate_crh(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
- struct t10_reservation_template *, u64,
+ struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
unsigned char *, u16, u32, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *,
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 331d423fd0e..2b7b0da9146 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template;
static void pscsi_req_done(struct request *, int);
-/* pscsi_get_sh():
- *
- *
- */
-static struct Scsi_Host *pscsi_get_sh(u32 host_no)
-{
- struct Scsi_Host *sh = NULL;
-
- sh = scsi_host_lookup(host_no);
- if (IS_ERR(sh)) {
- printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
- " %u\n", host_no);
- return NULL;
- }
-
- return sh;
-}
-
/* pscsi_attach_hba():
*
* pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
@@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no)
*/
static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
{
- int hba_depth;
struct pscsi_hba_virt *phv;
phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
- if (!(phv)) {
- printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
- return -1;
+ if (!phv) {
+ pr_err("Unable to allocate struct pscsi_hba_virt\n");
+ return -ENOMEM;
}
phv->phv_host_id = host_id;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
- hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
- atomic_set(&hba->left_queue_depth, hba_depth);
- atomic_set(&hba->max_queue_depth, hba_depth);
- hba->hba_ptr = (void *)phv;
+ hba->hba_ptr = phv;
- printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+ pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
- " Target Core with TCQ Depth: %d\n", hba->hba_id,
- atomic_read(&hba->max_queue_depth));
+ pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
+ hba->hba_id);
return 0;
}
@@ -114,12 +91,12 @@ static void pscsi_detach_hba(struct se_hba *hba)
if (scsi_host) {
scsi_host_put(scsi_host);
- printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
+ pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
" Generic Target Core\n", hba->hba_id,
(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
"Unknown");
} else
- printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
+ pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
" from Generic Target Core\n", hba->hba_id);
kfree(phv);
@@ -130,20 +107,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
- int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
/*
* Release the struct Scsi_Host
*/
- if (!(mode_flag)) {
- if (!(sh))
+ if (!mode_flag) {
+ if (!sh)
return 0;
phv->phv_lld_host = NULL;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
- atomic_set(&hba->left_queue_depth, hba_depth);
- atomic_set(&hba->max_queue_depth, hba_depth);
- printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+ pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
" %s\n", hba->hba_id, (sh->hostt->name) ?
(sh->hostt->name) : "Unknown");
@@ -154,27 +128,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* Otherwise, locate struct Scsi_Host from the original passed
* pSCSI Host ID and enable for phba mode
*/
- sh = pscsi_get_sh(phv->phv_host_id);
- if (!(sh)) {
- printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
+ sh = scsi_host_lookup(phv->phv_host_id);
+ if (IS_ERR(sh)) {
+ pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
- return -1;
+ return PTR_ERR(sh);
}
- /*
- * Usually the SCSI LLD will use the hostt->can_queue value to define
- * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
- * this at all and set sh->can_queue at runtime.
- */
- hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
- sh->hostt->can_queue : sh->can_queue;
-
- atomic_set(&hba->left_queue_depth, hba_depth);
- atomic_set(&hba->max_queue_depth, hba_depth);
phv->phv_lld_host = sh;
phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
- printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+ pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
return 1;
@@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
if (!buf)
- return -1;
+ return -ENOMEM;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = INQUIRY;
@@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
out_free:
kfree(buf);
- return -1;
+ return -EPERM;
}
static void
@@ -293,15 +257,15 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
page_83 = &buf[off];
ident_len = page_83[3];
if (!ident_len) {
- printk(KERN_ERR "page_83[3]: identifier"
+ pr_err("page_83[3]: identifier"
" length zero!\n");
break;
}
- printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
+ pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
if (!vpd) {
- printk(KERN_ERR "Unable to allocate memory for"
+ pr_err("Unable to allocate memory for"
" struct t10_vpd\n");
goto out;
}
@@ -353,7 +317,7 @@ static struct se_device *pscsi_add_device_to_list(
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
- printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
+ pr_err("Set broken SCSI Device %d:%d:%d"
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
@@ -364,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list(
q = sd->request_queue;
limits = &dev_limits.limits;
limits->logical_block_size = sd->sector_size;
- limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
- queue_max_hw_sectors(q) : sd->host->max_sectors;
- limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
- queue_max_sectors(q) : sd->host->max_sectors;
+ limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
dev_limits.hw_queue_depth = sd->queue_depth;
dev_limits.queue_depth = sd->queue_depth;
/*
@@ -391,9 +353,9 @@ static struct se_device *pscsi_add_device_to_list(
pdv->pdv_sd = sd;
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
- se_dev, dev_flags, (void *)pdv,
+ se_dev, dev_flags, pdv,
&dev_limits, NULL, NULL);
- if (!(dev)) {
+ if (!dev) {
pdv->pdv_sd = NULL;
return NULL;
}
@@ -423,14 +385,14 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
struct pscsi_dev_virt *pdv;
pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
- if (!(pdv)) {
- printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
+ if (!pdv) {
+ pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL;
}
pdv->pdv_se_hba = hba;
- printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
- return (void *)pdv;
+ pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+ return pdv;
}
/*
@@ -450,7 +412,7 @@ static struct se_device *pscsi_create_type_disk(
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
- printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
@@ -463,19 +425,19 @@ static struct se_device *pscsi_create_type_disk(
bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
if (IS_ERR(bd)) {
- printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
+ pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
return NULL;
}
pdv->pdv_bd = bd;
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!(dev)) {
+ if (!dev) {
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
scsi_device_put(sd);
return NULL;
}
- printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
return dev;
@@ -497,7 +459,7 @@ static struct se_device *pscsi_create_type_rom(
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
- printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
@@ -505,11 +467,11 @@ static struct se_device *pscsi_create_type_rom(
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!(dev)) {
+ if (!dev) {
scsi_device_put(sd);
return NULL;
}
- printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
@@ -533,10 +495,10 @@ static struct se_device *pscsi_create_type_other(
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!(dev))
+ if (!dev)
return NULL;
- printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
@@ -555,8 +517,8 @@ static struct se_device *pscsi_create_virtdevice(
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
- if (!(pdv)) {
- printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
+ if (!pdv) {
+ pr_err("Unable to locate struct pscsi_dev_virt"
" parameter\n");
return ERR_PTR(-EINVAL);
}
@@ -564,9 +526,9 @@ static struct se_device *pscsi_create_virtdevice(
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
*/
- if (!(sh)) {
+ if (!sh) {
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
- printk(KERN_ERR "pSCSI: Unable to locate struct"
+ pr_err("pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
return ERR_PTR(-ENODEV);
}
@@ -575,7 +537,7 @@ static struct se_device *pscsi_create_virtdevice(
* reference, we enforce that udev_path has been set
*/
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
- printk(KERN_ERR "pSCSI: udev_path attribute has not"
+ pr_err("pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
return ERR_PTR(-EINVAL);
}
@@ -586,8 +548,8 @@ static struct se_device *pscsi_create_virtdevice(
*/
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
spin_lock(&hba->device_lock);
- if (!(list_empty(&hba->hba_dev_list))) {
- printk(KERN_ERR "pSCSI: Unable to set hba_mode"
+ if (!list_empty(&hba->hba_dev_list)) {
+ pr_err("pSCSI: Unable to set hba_mode"
" with active devices\n");
spin_unlock(&hba->device_lock);
return ERR_PTR(-EEXIST);
@@ -601,16 +563,16 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
sh = phv->phv_lld_host;
} else {
- sh = pscsi_get_sh(pdv->pdv_host_id);
- if (!(sh)) {
- printk(KERN_ERR "pSCSI: Unable to locate"
+ sh = scsi_host_lookup(pdv->pdv_host_id);
+ if (IS_ERR(sh)) {
+ pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return ERR_PTR(-ENODEV);
+ return (struct se_device *) sh;
}
}
} else {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
- printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
+ pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while"
" struct Scsi_Host exists\n");
return ERR_PTR(-EEXIST);
}
@@ -639,7 +601,7 @@ static struct se_device *pscsi_create_virtdevice(
break;
}
- if (!(dev)) {
+ if (!dev) {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
@@ -653,7 +615,7 @@ static struct se_device *pscsi_create_virtdevice(
}
spin_unlock_irq(sh->host_lock);
- printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+ pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
@@ -728,13 +690,12 @@ static int pscsi_transport_complete(struct se_task *task)
*/
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
- if (!TASK_CMD(task)->se_deve)
+ if (!task->task_se_cmd->se_deve)
goto after_mode_sense;
- if (TASK_CMD(task)->se_deve->lun_flags &
+ if (task->task_se_cmd->se_deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) {
- unsigned char *buf = (unsigned char *)
- T_TASK(task->task_se_cmd)->t_task_buf;
+ unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -743,6 +704,8 @@ static int pscsi_transport_complete(struct se_task *task)
if (!(buf[2] & 0x80))
buf[2] |= 0x80;
}
+
+ transport_kunmap_first_data_page(task->task_se_cmd);
}
}
after_mode_sense:
@@ -766,8 +729,8 @@ after_mode_sense:
u32 blocksize;
buf = sg_virt(&sg[0]);
- if (!(buf)) {
- printk(KERN_ERR "Unable to get buf for scatterlist\n");
+ if (!buf) {
+ pr_err("Unable to get buf for scatterlist\n");
goto after_mode_select;
}
@@ -797,34 +760,20 @@ after_mode_select:
}
static struct se_task *
-pscsi_alloc_task(struct se_cmd *cmd)
+pscsi_alloc_task(unsigned char *cdb)
{
struct pscsi_plugin_task *pt;
- unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
- pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
+ /*
+ * Dynamically alloc cdb space, since it may be larger than
+ * TCM_MAX_COMMAND_SIZE
+ */
+ pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
if (!pt) {
- printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
+ pr_err("Unable to allocate struct pscsi_plugin_task\n");
return NULL;
}
- /*
- * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
- * allocate the extended CDB buffer for per struct se_task context
- * pt->pscsi_cdb now.
- */
- if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
-
- pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
- if (!(pt->pscsi_cdb)) {
- printk(KERN_ERR "pSCSI: Unable to allocate extended"
- " pt->pscsi_cdb\n");
- kfree(pt);
- return NULL;
- }
- } else
- pt->pscsi_cdb = &pt->__pscsi_cdb[0];
-
return &pt->pscsi_task;
}
@@ -849,7 +798,7 @@ static inline void pscsi_blk_init_request(
* also set the end_io_data pointer.to struct se_task.
*/
req->end_io = pscsi_req_done;
- req->end_io_data = (void *)task;
+ req->end_io_data = task;
/*
* Load the referenced struct se_task's SCSI CDB into
* include/linux/blkdev.h:struct request->cmd
@@ -859,7 +808,7 @@ static inline void pscsi_blk_init_request(
/*
* Setup pointer for outgoing sense data.
*/
- req->sense = (void *)&pt->pscsi_sense[0];
+ req->sense = &pt->pscsi_sense[0];
req->sense_len = 0;
}
@@ -874,8 +823,8 @@ static int pscsi_blk_get_request(struct se_task *task)
pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
(task->task_data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
- if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
- printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
+ if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) {
+ pr_err("PSCSI: blk_get_request() failed: %ld\n",
IS_ERR(pt->pscsi_req));
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -920,15 +869,8 @@ static int pscsi_do_task(struct se_task *task)
static void pscsi_free_task(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct se_cmd *cmd = task->task_se_cmd;
/*
- * Release the extended CDB allocation from pscsi_alloc_task()
- * if one exists.
- */
- if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
- kfree(pt->pscsi_cdb);
- /*
* We do not release the bio(s) here associated with this task, as
* this is handled by bio_put() and pscsi_bi_endio().
*/
@@ -973,7 +915,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
switch (token) {
case Opt_scsi_host_id:
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
- printk(KERN_ERR "PSCSI[%d]: Unable to accept"
+ pr_err("PSCSI[%d]: Unable to accept"
" scsi_host_id while phv_mode =="
" PHV_LLD_SCSI_HOST_NO\n",
phv->phv_host_id);
@@ -982,14 +924,14 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
}
match_int(args, &arg);
pdv->pdv_host_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
+ pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
" %d\n", phv->phv_host_id, pdv->pdv_host_id);
pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
break;
case Opt_scsi_channel_id:
match_int(args, &arg);
pdv->pdv_channel_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
+ pr_debug("PSCSI[%d]: Referencing SCSI Channel"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_channel_id);
pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
@@ -997,7 +939,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
case Opt_scsi_target_id:
match_int(args, &arg);
pdv->pdv_target_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
+ pr_debug("PSCSI[%d]: Referencing SCSI Target"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_target_id);
pdv->pdv_flags |= PDF_HAS_TARGET_ID;
@@ -1005,7 +947,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
case Opt_scsi_lun_id:
match_int(args, &arg);
pdv->pdv_lun_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
+ pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
pdv->pdv_flags |= PDF_HAS_LUN_ID;
break;
@@ -1028,9 +970,9 @@ static ssize_t pscsi_check_configfs_dev_params(
if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
!(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
- printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
+ pr_err("Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n");
- return -1;
+ return -EINVAL;
}
return 0;
@@ -1090,7 +1032,7 @@ static void pscsi_bi_endio(struct bio *bio, int error)
bio_put(bio);
}
-static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
+static inline struct bio *pscsi_get_bio(int sg_num)
{
struct bio *bio;
/*
@@ -1098,8 +1040,8 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
* in block/blk-core.c:blk_make_request()
*/
bio = bio_kmalloc(GFP_KERNEL, sg_num);
- if (!(bio)) {
- printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
+ if (!bio) {
+ pr_err("PSCSI: bio_kmalloc() failed\n");
return NULL;
}
bio->bi_end_io = pscsi_bi_endio;
@@ -1107,13 +1049,7 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
return bio;
}
-#if 0
-#define DEBUG_PSCSI(x...) printk(x)
-#else
-#define DEBUG_PSCSI(x...)
-#endif
-
-static int __pscsi_map_task_SG(
+static int __pscsi_map_SG(
struct se_task *task,
struct scatterlist *task_sg,
u32 task_sg_num,
@@ -1134,7 +1070,7 @@ static int __pscsi_map_task_SG(
return 0;
/*
* For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
- * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
+ * the bio_vec maplist from task->task_sg ->
* struct scatterlist memory. The struct se_task->task_sg[] currently needs
* to be attached to struct bios for submission to Linux/SCSI using
* struct request to struct scsi_device->request_queue.
@@ -1143,34 +1079,34 @@ static int __pscsi_map_task_SG(
* is ported to upstream SCSI passthrough functionality that accepts
* struct scatterlist->page_link or struct page as a paraemeter.
*/
- DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
+ pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
for_each_sg(task_sg, sg, task_sg_num, i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
- DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+ pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
page, len, off);
while (len > 0 && data_len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
- if (!(bio)) {
+ if (!bio) {
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
nr_pages -= nr_vecs;
/*
* Calls bio_kmalloc() and sets bio->bi_end_io()
*/
- bio = pscsi_get_bio(pdv, nr_vecs);
- if (!(bio))
+ bio = pscsi_get_bio(nr_vecs);
+ if (!bio)
goto fail;
if (rw)
bio->bi_rw |= REQ_WRITE;
- DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
+ pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs);
/*
@@ -1185,7 +1121,7 @@ static int __pscsi_map_task_SG(
tbio = tbio->bi_next = bio;
}
- DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
+ pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
@@ -1194,11 +1130,11 @@ static int __pscsi_map_task_SG(
if (rc != bytes)
goto fail;
- DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+ pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
bio->bi_vcnt, nr_vecs);
if (bio->bi_vcnt > nr_vecs) {
- DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
+ pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
/*
@@ -1220,15 +1156,15 @@ static int __pscsi_map_task_SG(
* Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
* primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
*/
- if (!(bidi_read)) {
+ if (!bidi_read) {
/*
* Starting with v2.6.31, call blk_make_request() passing in *hbio to
* allocate the pSCSI task a struct request.
*/
pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
- if (!(pt->pscsi_req)) {
- printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
+ if (!pt->pscsi_req) {
+ pr_err("pSCSI: blk_make_request() failed\n");
goto fail;
}
/*
@@ -1237,7 +1173,7 @@ static int __pscsi_map_task_SG(
*/
pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
- return task->task_sg_num;
+ return task->task_sg_nents;
}
/*
* Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
@@ -1245,13 +1181,13 @@ static int __pscsi_map_task_SG(
*/
pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
- if (!(pt->pscsi_req->next_rq)) {
- printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
+ if (!pt->pscsi_req->next_rq) {
+ pr_err("pSCSI: blk_make_request() failed for BIDI\n");
goto fail;
}
pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
- return task->task_sg_num;
+ return task->task_sg_nents;
fail:
while (hbio) {
bio = hbio;
@@ -1262,7 +1198,10 @@ fail:
return ret;
}
-static int pscsi_map_task_SG(struct se_task *task)
+/*
+ * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call.
+ */
+static int pscsi_map_SG(struct se_task *task)
{
int ret;
@@ -1270,14 +1209,14 @@ static int pscsi_map_task_SG(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
- ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
+ ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0);
if (ret >= 0 && task->task_sg_bidi) {
/*
* If present, set up the extra BIDI-COMMAND SCSI READ
* struct request and payload.
*/
- ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
- task->task_sg_num, 1);
+ ret = __pscsi_map_SG(task, task->task_sg_bidi,
+ task->task_sg_nents, 1);
}
if (ret < 0)
@@ -1285,33 +1224,6 @@ static int pscsi_map_task_SG(struct se_task *task)
return 0;
}
-/* pscsi_map_task_non_SG():
- *
- *
- */
-static int pscsi_map_task_non_SG(struct se_task *task)
-{
- struct se_cmd *cmd = TASK_CMD(task);
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
- int ret = 0;
-
- if (pscsi_blk_get_request(task) < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- if (!task->task_size)
- return 0;
-
- ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
- pt->pscsi_req, T_TASK(cmd)->t_task_buf,
- task->task_size, GFP_KERNEL);
- if (ret < 0) {
- printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- }
- return 0;
-}
-
static int pscsi_CDB_none(struct se_task *task)
{
return pscsi_blk_get_request(task);
@@ -1383,9 +1295,9 @@ static inline void pscsi_process_SAM_status(
struct pscsi_plugin_task *pt)
{
task->task_scsi_status = status_byte(pt->pscsi_result);
- if ((task->task_scsi_status)) {
+ if (task->task_scsi_status) {
task->task_scsi_status <<= 1;
- printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
+ pr_debug("PSCSI Status Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
}
@@ -1395,18 +1307,16 @@ static inline void pscsi_process_SAM_status(
transport_complete_task(task, (!task->task_scsi_status));
break;
default:
- printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
+ pr_debug("PSCSI Host Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- TASK_CMD(task)->transport_error_status =
+ task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
transport_complete_task(task, 0);
break;
}
-
- return;
}
static void pscsi_req_done(struct request *req, int uptodate)
@@ -1433,8 +1343,8 @@ static struct se_subsystem_api pscsi_template = {
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
.cdb_none = pscsi_CDB_none,
- .map_task_non_SG = pscsi_map_task_non_SG,
- .map_task_SG = pscsi_map_task_SG,
+ .map_control_SG = pscsi_map_SG,
+ .map_data_SG = pscsi_map_SG,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index a4cd5d352c3..ebf4f1ae2c8 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -2,7 +2,6 @@
#define TARGET_CORE_PSCSI_H
#define PSCSI_VERSION "v4.0"
-#define PSCSI_VIRTUAL_HBA_DEPTH 2048
/* used in pscsi_find_alloc_len() */
#ifndef INQUIRY_DATA_SIZE
@@ -24,13 +23,12 @@
struct pscsi_plugin_task {
struct se_task pscsi_task;
- unsigned char *pscsi_cdb;
- unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
struct request *pscsi_req;
+ unsigned char pscsi_cdb[0];
} ____cacheline_aligned;
#define PDF_HAS_CHANNEL_ID 0x01
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 7837dd365a9..3dd81d24d9a 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -44,12 +44,8 @@
#include "target_core_rd.h"
-static struct se_subsystem_api rd_dr_template;
static struct se_subsystem_api rd_mcp_template;
-/* #define DEBUG_RAMDISK_MCP */
-/* #define DEBUG_RAMDISK_DR */
-
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
*
@@ -59,24 +55,21 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
struct rd_host *rd_host;
rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
- if (!(rd_host)) {
- printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
+ if (!rd_host) {
+ pr_err("Unable to allocate memory for struct rd_host\n");
return -ENOMEM;
}
rd_host->rd_host_id = host_id;
- atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
- atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
- hba->hba_ptr = (void *) rd_host;
+ hba->hba_ptr = rd_host;
- printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+ pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
- " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
- rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
- RD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+ " MaxSectors: %u\n", hba->hba_id,
+ rd_host->rd_host_id, RD_MAX_SECTORS);
return 0;
}
@@ -85,7 +78,7 @@ static void rd_detach_hba(struct se_hba *hba)
{
struct rd_host *rd_host = hba->hba_ptr;
- printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+ pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
kfree(rd_host);
@@ -114,7 +107,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
for (j = 0; j < sg_per_table; j++) {
pg = sg_page(&sg[j]);
- if ((pg)) {
+ if (pg) {
__free_page(pg);
page_count++;
}
@@ -123,7 +116,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
kfree(sg);
}
- printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+ pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
@@ -148,7 +141,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
struct scatterlist *sg;
if (rd_dev->rd_page_count <= 0) {
- printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+ pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
@@ -157,8 +150,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
- if (!(sg_table)) {
- printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+ if (!sg_table) {
+ pr_err("Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -ENOMEM;
}
@@ -172,13 +165,13 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
- if (!(sg)) {
- printk(KERN_ERR "Unable to allocate scatterlist array"
+ if (!sg) {
+ pr_err("Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -ENOMEM;
}
- sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+ sg_init_table(sg, sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
@@ -188,8 +181,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
- if (!(pg)) {
- printk(KERN_ERR "Unable to allocate scatterlist"
+ if (!pg) {
+ pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
@@ -201,7 +194,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
total_sg_needed -= sg_per_table;
}
- printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+ pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
@@ -218,8 +211,8 @@ static void *rd_allocate_virtdevice(
struct rd_host *rd_host = hba->hba_ptr;
rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
- if (!(rd_dev)) {
- printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
+ if (!rd_dev) {
+ pr_err("Unable to allocate memory for struct rd_dev\n");
return NULL;
}
@@ -229,11 +222,6 @@ static void *rd_allocate_virtdevice(
return rd_dev;
}
-static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
-{
- return rd_allocate_virtdevice(hba, name, 1);
-}
-
static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
{
return rd_allocate_virtdevice(hba, name, 0);
@@ -273,16 +261,15 @@ static struct se_device *rd_create_virtdevice(
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba,
- (rd_dev->rd_direct) ? &rd_dr_template :
- &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
+ &rd_mcp_template, se_dev, dev_flags, rd_dev,
&dev_limits, prod, rev);
- if (!(dev))
+ if (!dev)
goto fail;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
rd_dev->rd_queue_depth = dev->queue_depth;
- printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+ pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
" %u pages in %u tables, %lu total bytes\n",
rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
@@ -296,14 +283,6 @@ fail:
return ERR_PTR(ret);
}
-static struct se_device *rd_DIRECT_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
-{
- return rd_create_virtdevice(hba, se_dev, p, 1);
-}
-
static struct se_device *rd_MEMCPY_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
@@ -330,16 +309,15 @@ static inline struct rd_request *RD_REQ(struct se_task *task)
}
static struct se_task *
-rd_alloc_task(struct se_cmd *cmd)
+rd_alloc_task(unsigned char *cdb)
{
struct rd_request *rd_req;
rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
if (!rd_req) {
- printk(KERN_ERR "Unable to allocate struct rd_request\n");
+ pr_err("Unable to allocate struct rd_request\n");
return NULL;
}
- rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
return &rd_req->rd_task;
}
@@ -360,7 +338,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return sg_table;
}
- printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
+ pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
page);
return NULL;
@@ -373,7 +351,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
static int rd_MEMCPY_read(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_dev;
+ struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -382,32 +360,32 @@ static int rd_MEMCPY_read(struct rd_request *req)
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = task->task_sg;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+
+ pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
-#endif
+
src_offset = rd_offset;
while (req->rd_size) {
if ((sg_d[i].length - dst_offset) <
(sg_s[j].length - src_offset)) {
length = (sg_d[i].length - dst_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+
+ pr_debug("Step 1 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
sg_s[j].length);
- printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+ pr_debug("Step 1 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -424,15 +402,15 @@ static int rd_MEMCPY_read(struct rd_request *req)
page_end = 0;
} else {
length = (sg_s[j].length - src_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+
+ pr_debug("Step 2 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset,
j, sg_s[j].length);
- printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+ pr_debug("Step 2 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -456,32 +434,29 @@ static int rd_MEMCPY_read(struct rd_request *req)
memcpy(dst, src, length);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
-#endif
+
req->rd_size -= length;
- if (!(req->rd_size))
+ if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u in same page table\n",
+ pr_debug("page: %u in same page table\n",
req->rd_page);
-#endif
continue;
}
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "getting new page table for page: %u\n",
+
+ pr_debug("getting new page table for page: %u\n",
req->rd_page);
-#endif
+
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
sg_s = &table->sg_table[j = 0];
}
@@ -496,7 +471,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
static int rd_MEMCPY_write(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_dev;
+ struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -505,32 +480,32 @@ static int rd_MEMCPY_write(struct rd_request *req)
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
sg_s = task->task_sg;
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+
+ pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
-#endif
+
dst_offset = rd_offset;
while (req->rd_size) {
if ((sg_s[i].length - src_offset) <
(sg_d[j].length - dst_offset)) {
length = (sg_s[i].length - src_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+
+ pr_debug("Step 1 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
- printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+ pr_debug("Step 1 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -547,15 +522,15 @@ static int rd_MEMCPY_write(struct rd_request *req)
page_end = 0;
} else {
length = (sg_d[j].length - dst_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+
+ pr_debug("Step 2 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
- printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+ pr_debug("Step 2 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -579,32 +554,29 @@ static int rd_MEMCPY_write(struct rd_request *req)
memcpy(dst, src, length);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
-#endif
+
req->rd_size -= length;
- if (!(req->rd_size))
+ if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u in same page table\n",
+ pr_debug("page: %u in same page table\n",
req->rd_page);
-#endif
continue;
}
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "getting new page table for page: %u\n",
+
+ pr_debug("getting new page table for page: %u\n",
req->rd_page);
-#endif
+
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
sg_d = &table->sg_table[j = 0];
}
@@ -623,11 +595,11 @@ static int rd_MEMCPY_do_task(struct se_task *task)
unsigned long long lba;
int ret;
- req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+ req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
lba = task->task_lba;
req->rd_offset = (do_div(lba,
- (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
- DEV_ATTRIB(dev)->block_size;
+ (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
+ dev->se_sub_dev->se_dev_attrib.block_size;
req->rd_size = task->task_size;
if (task->task_data_direction == DMA_FROM_DEVICE)
@@ -644,274 +616,6 @@ static int rd_MEMCPY_do_task(struct se_task *task)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
-/* rd_DIRECT_with_offset():
- *
- *
- */
-static int rd_DIRECT_with_offset(
- struct se_task *task,
- struct list_head *se_mem_list,
- u32 *se_mem_cnt,
- u32 *task_offset)
-{
- struct rd_request *req = RD_REQ(task);
- struct rd_dev *dev = req->rd_dev;
- struct rd_dev_sg_table *table;
- struct se_mem *se_mem;
- struct scatterlist *sg_s;
- u32 j = 0, set_offset = 1;
- u32 get_next_table = 0, offset_length, table_sg_end;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
- (task->task_data_direction == DMA_TO_DEVICE) ?
- "Write" : "Read",
- task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
-#endif
- while (req->rd_size) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- return -1;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
-
- if (set_offset) {
- offset_length = sg_s[j].length - req->rd_offset;
- if (offset_length > req->rd_size)
- offset_length = req->rd_size;
-
- se_mem->se_page = sg_page(&sg_s[j++]);
- se_mem->se_off = req->rd_offset;
- se_mem->se_len = offset_length;
-
- set_offset = 0;
- get_next_table = (j > table_sg_end);
- goto check_eot;
- }
-
- offset_length = (req->rd_size < req->rd_offset) ?
- req->rd_size : req->rd_offset;
-
- se_mem->se_page = sg_page(&sg_s[j]);
- se_mem->se_len = offset_length;
-
- set_offset = 1;
-
-check_eot:
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
- " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
- req->rd_page, req->rd_size, offset_length, j, se_mem,
- se_mem->se_page, se_mem->se_off, se_mem->se_len);
-#endif
- list_add_tail(&se_mem->se_list, se_mem_list);
- (*se_mem_cnt)++;
-
- req->rd_size -= offset_length;
- if (!(req->rd_size))
- goto out;
-
- if (!set_offset && !get_next_table)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "page: %u in same page table\n",
- req->rd_page);
-#endif
- continue;
- }
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "getting new page table for page: %u\n",
- req->rd_page);
-#endif
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- sg_s = &table->sg_table[j = 0];
- }
-
-out:
- T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
- *se_mem_cnt);
-#endif
- return 0;
-}
-
-/* rd_DIRECT_without_offset():
- *
- *
- */
-static int rd_DIRECT_without_offset(
- struct se_task *task,
- struct list_head *se_mem_list,
- u32 *se_mem_cnt,
- u32 *task_offset)
-{
- struct rd_request *req = RD_REQ(task);
- struct rd_dev *dev = req->rd_dev;
- struct rd_dev_sg_table *table;
- struct se_mem *se_mem;
- struct scatterlist *sg_s;
- u32 length, j = 0;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
- (task->task_data_direction == DMA_TO_DEVICE) ?
- "Write" : "Read",
- task->task_lba, req->rd_size, req->rd_page);
-#endif
- while (req->rd_size) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- return -1;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
-
- length = (req->rd_size < sg_s[j].length) ?
- req->rd_size : sg_s[j].length;
-
- se_mem->se_page = sg_page(&sg_s[j++]);
- se_mem->se_len = length;
-
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
- " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
- req->rd_size, j, se_mem, se_mem->se_page,
- se_mem->se_off, se_mem->se_len);
-#endif
- list_add_tail(&se_mem->se_list, se_mem_list);
- (*se_mem_cnt)++;
-
- req->rd_size -= length;
- if (!(req->rd_size))
- goto out;
-
- if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_DR
- printk("page: %u in same page table\n",
- req->rd_page);
-#endif
- continue;
- }
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "getting new page table for page: %u\n",
- req->rd_page);
-#endif
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- sg_s = &table->sg_table[j = 0];
- }
-
-out:
- T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
- *se_mem_cnt);
-#endif
- return 0;
-}
-
-/* rd_DIRECT_do_se_mem_map():
- *
- *
- */
-static int rd_DIRECT_do_se_mem_map(
- struct se_task *task,
- struct list_head *se_mem_list,
- void *in_mem,
- struct se_mem *in_se_mem,
- struct se_mem **out_se_mem,
- u32 *se_mem_cnt,
- u32 *task_offset_in)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- struct rd_request *req = RD_REQ(task);
- u32 task_offset = *task_offset_in;
- unsigned long long lba;
- int ret;
-
- req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
- PAGE_SIZE);
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
- DEV_ATTRIB(task->se_dev)->block_size;
- req->rd_size = task->task_size;
-
- if (req->rd_offset)
- ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
- task_offset_in);
- else
- ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
- task_offset_in);
-
- if (ret < 0)
- return ret;
-
- if (CMD_TFO(cmd)->task_sg_chaining == 0)
- return 0;
- /*
- * Currently prevent writers from multiple HW fabrics doing
- * pci_map_sg() to RD_DR's internal scatterlist memory.
- */
- if (cmd->data_direction == DMA_TO_DEVICE) {
- printk(KERN_ERR "DMA_TO_DEVICE not supported for"
- " RAMDISK_DR with task_sg_chaining=1\n");
- return -1;
- }
- /*
- * Special case for if task_sg_chaining is enabled, then
- * we setup struct se_task->task_sg[], as it will be used by
- * transport_do_task_sg_chain() for creating chainged SGLs
- * across multiple struct se_task->task_sg[].
- */
- if (!(transport_calc_sg_num(task,
- list_entry(T_TASK(cmd)->t_mem_list->next,
- struct se_mem, se_list),
- task_offset)))
- return -1;
-
- return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
- list_entry(T_TASK(cmd)->t_mem_list->next,
- struct se_mem, se_list),
- out_se_mem, se_mem_cnt, task_offset_in);
-}
-
-/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static int rd_DIRECT_do_task(struct se_task *task)
-{
- /*
- * At this point the locally allocated RD tables have been mapped
- * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
- */
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
-}
-
/* rd_free_task(): (Part of se_subsystem_api_t template)
*
*
@@ -956,7 +660,7 @@ static ssize_t rd_set_configfs_dev_params(
case Opt_rd_pages:
match_int(args, &arg);
rd_dev->rd_page_count = arg;
- printk(KERN_INFO "RAMDISK: Referencing Page"
+ pr_debug("RAMDISK: Referencing Page"
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
@@ -974,8 +678,8 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
- printk(KERN_INFO "Missing rd_pages= parameter\n");
- return -1;
+ pr_debug("Missing rd_pages= parameter\n");
+ return -EINVAL;
}
return 0;
@@ -1021,32 +725,11 @@ static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = dev->dev_ptr;
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
- DEV_ATTRIB(dev)->block_size) - 1;
+ dev->se_sub_dev->se_dev_attrib.block_size) - 1;
return blocks_long;
}
-static struct se_subsystem_api rd_dr_template = {
- .name = "rd_dr",
- .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
- .attach_hba = rd_attach_hba,
- .detach_hba = rd_detach_hba,
- .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
- .create_virtdevice = rd_DIRECT_create_virtdevice,
- .free_device = rd_free_device,
- .alloc_task = rd_alloc_task,
- .do_task = rd_DIRECT_do_task,
- .free_task = rd_free_task,
- .check_configfs_dev_params = rd_check_configfs_dev_params,
- .set_configfs_dev_params = rd_set_configfs_dev_params,
- .show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_cdb = rd_get_cdb,
- .get_device_rev = rd_get_device_rev,
- .get_device_type = rd_get_device_type,
- .get_blocks = rd_get_blocks,
- .do_se_mem_map = rd_DIRECT_do_se_mem_map,
-};
-
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
@@ -1071,13 +754,8 @@ int __init rd_module_init(void)
{
int ret;
- ret = transport_subsystem_register(&rd_dr_template);
- if (ret < 0)
- return ret;
-
ret = transport_subsystem_register(&rd_mcp_template);
if (ret < 0) {
- transport_subsystem_release(&rd_dr_template);
return ret;
}
@@ -1086,6 +764,5 @@ int __init rd_module_init(void)
void rd_module_exit(void)
{
- transport_subsystem_release(&rd_dr_template);
transport_subsystem_release(&rd_mcp_template);
}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 3ea19e29d8e..0d027732cd0 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -7,8 +7,6 @@
/* Largest piece of memory kmalloc can allocate */
#define RD_MAX_ALLOCATION_SIZE 65536
-/* Maximum queuedepth for the Ramdisk HBA */
-#define RD_HBA_QUEUE_DEPTH 256
#define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512
@@ -34,8 +32,6 @@ struct rd_request {
u32 rd_page_count;
/* Scatterlist count */
u32 rd_size;
- /* Ramdisk device */
- struct rd_dev *rd_dev;
} ____cacheline_aligned;
struct rd_dev_sg_table {
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
index dc6fed037ab..72843441d4f 100644
--- a/drivers/target/target_core_scdb.c
+++ b/drivers/target/target_core_scdb.c
@@ -42,13 +42,13 @@
*/
void split_cdb_XX_6(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
cdb[1] = (lba >> 16) & 0x1f;
cdb[2] = (lba >> 8) & 0xff;
cdb[3] = lba & 0xff;
- cdb[4] = *sectors & 0xff;
+ cdb[4] = sectors & 0xff;
}
/* split_cdb_XX_10():
@@ -57,11 +57,11 @@ void split_cdb_XX_6(
*/
void split_cdb_XX_10(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be16(*sectors, &cdb[7]);
+ put_unaligned_be16(sectors, &cdb[7]);
}
/* split_cdb_XX_12():
@@ -70,11 +70,11 @@ void split_cdb_XX_10(
*/
void split_cdb_XX_12(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be32(*sectors, &cdb[6]);
+ put_unaligned_be32(sectors, &cdb[6]);
}
/* split_cdb_XX_16():
@@ -83,11 +83,11 @@ void split_cdb_XX_12(
*/
void split_cdb_XX_16(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[2]);
- put_unaligned_be32(*sectors, &cdb[10]);
+ put_unaligned_be32(sectors, &cdb[10]);
}
/*
@@ -97,9 +97,9 @@ void split_cdb_XX_16(
*/
void split_cdb_XX_32(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[12]);
- put_unaligned_be32(*sectors, &cdb[28]);
+ put_unaligned_be32(sectors, &cdb[28]);
}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
index 98cd1c01ed8..48e9ccc9585 100644
--- a/drivers/target/target_core_scdb.h
+++ b/drivers/target/target_core_scdb.h
@@ -1,10 +1,10 @@
#ifndef TARGET_CORE_SCDB_H
#define TARGET_CORE_SCDB_H
-extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *);
#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 5e3a067a747..a8d6e1dee93 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -402,8 +402,8 @@ static ssize_t target_stat_scsi_lu_show_attr_lu_name(
return -ENODEV;
/* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n",
- (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
- (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None");
+ (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
+ dev->se_sub_dev->t10_wwn.unit_serial : "None");
}
DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
@@ -413,17 +413,17 @@ static ssize_t target_stat_scsi_lu_show_attr_vend(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
- int j;
- char str[28];
+ int i;
+ char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
if (!dev)
return -ENODEV;
+
/* scsiLuVendorId */
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- for (j = 0; j < 8; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
- DEV_T10_WWN(dev)->vendor[j] : 0x20;
- str[8] = 0;
+ for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
+ dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
+ str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(vend);
@@ -434,18 +434,17 @@ static ssize_t target_stat_scsi_lu_show_attr_prod(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
- int j;
- char str[28];
+ int i;
+ char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
if (!dev)
return -ENODEV;
/* scsiLuProductId */
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- for (j = 0; j < 16; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
- DEV_T10_WWN(dev)->model[j] : 0x20;
- str[16] = 0;
+ for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
+ dev->se_sub_dev->t10_wwn.model[i] : ' ';
+ str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(prod);
@@ -456,18 +455,17 @@ static ssize_t target_stat_scsi_lu_show_attr_rev(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
- int j;
- char str[28];
+ int i;
+ char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
if (!dev)
return -ENODEV;
/* scsiLuRevisionId */
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- for (j = 0; j < 4; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
- DEV_T10_WWN(dev)->revision[j] : 0x20;
- str[4] = 0;
+ for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
+ str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
+ dev->se_sub_dev->t10_wwn.revision[i] : ' ';
+ str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(rev);
@@ -484,7 +482,7 @@ static ssize_t target_stat_scsi_lu_show_attr_dev_type(
/* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n",
- TRANSPORT(dev)->get_device_type(dev));
+ dev->transport->get_device_type(dev));
}
DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
@@ -668,18 +666,18 @@ static struct config_item_type target_stat_scsi_lu_cit = {
*/
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
{
- struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group;
+ struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
- config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group,
+ config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
- config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group,
+ config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
- config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group,
+ config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
- dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group;
- dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group;
- dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group;
+ dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
+ dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
+ dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
dev_stat_grp->default_groups[3] = NULL;
}
@@ -922,7 +920,7 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name(
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
- TPG_TFO(tpg)->get_fabric_name(), sep->sep_index);
+ tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -945,8 +943,8 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
- TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -1128,7 +1126,7 @@ static ssize_t target_stat_scsi_transport_show_attr_device(
tpg = sep->sep_tpg;
/* scsiTransportType */
ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -1150,7 +1148,7 @@ static ssize_t target_stat_scsi_transport_show_attr_indx(
}
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -1173,10 +1171,10 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
return -ENODEV;
}
tpg = sep->sep_tpg;
- wwn = DEV_T10_WWN(dev);
+ wwn = &dev->se_sub_dev->t10_wwn;
/* scsiTransportDevName */
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
(strlen(wwn->unit_serial)) ? wwn->unit_serial :
wwn->vendor);
spin_unlock(&lun->lun_sep_lock);
@@ -1212,18 +1210,18 @@ static struct config_item_type target_stat_scsi_transport_cit = {
*/
void target_stat_setup_port_default_groups(struct se_lun *lun)
{
- struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
- config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group,
+ config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
"scsi_port", &target_stat_scsi_port_cit);
- config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group,
+ config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
- config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group,
+ config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
"scsi_transport", &target_stat_scsi_transport_cit);
- port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group;
- port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group;
- port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group;
+ port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
+ port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
+ port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
port_stat_grp->default_groups[3] = NULL;
}
@@ -1264,7 +1262,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1314,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port(
}
tpg = nacl->se_tpg;
/* scsiAuthIntrTgtPortIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1632,7 +1630,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1682,7 +1680,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
}
tpg = nacl->se_tpg;
/* scsiPortIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1708,7 +1706,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
tpg = nacl->se_tpg;
/* scsiAttIntrPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->sess_get_index(se_sess));
+ tpg->se_tpg_tfo->sess_get_index(se_sess));
spin_unlock_irq(&nacl->nacl_sess_lock);
return ret;
}
@@ -1757,8 +1755,8 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
tpg = nacl->se_tpg;
/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
memset(buf, 0, 64);
- if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL)
- TPG_TFO(tpg)->sess_get_initiator_sid(se_sess,
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
+ tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
(unsigned char *)&buf[0], 64);
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
@@ -1797,14 +1795,14 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = {
*/
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
{
- struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group,
+ config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
- config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group,
+ config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
- ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group;
- ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group;
+ ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
+ ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
ml_stat_grp->default_groups[2] = NULL;
}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 59b8b9c5ad7..27d4925e51c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -41,13 +41,6 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
-#define DEBUG_LUN_RESET
-#ifdef DEBUG_LUN_RESET
-#define DEBUG_LR(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_LR(x...)
-#endif
-
struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
@@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req(
tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
GFP_ATOMIC : GFP_KERNEL);
- if (!(tmr)) {
- printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+ if (!tmr) {
+ pr_err("Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
}
tmr->task_cmd = se_cmd;
@@ -75,10 +68,16 @@ void core_tmr_release_req(
{
struct se_device *dev = tmr->tmr_dev;
- spin_lock(&dev->se_tmr_lock);
+ if (!dev) {
+ kmem_cache_free(se_tmr_req_cache, tmr);
+ return;
+ }
+
+ spin_lock_irq(&dev->se_tmr_lock);
list_del(&tmr->tmr_list);
+ spin_unlock_irq(&dev->se_tmr_lock);
+
kmem_cache_free(se_tmr_req_cache, tmr);
- spin_unlock(&dev->se_tmr_lock);
}
static void core_tmr_handle_tas_abort(
@@ -87,14 +86,14 @@ static void core_tmr_handle_tas_abort(
int tas,
int fe_count)
{
- if (!(fe_count)) {
+ if (!fe_count) {
transport_cmd_finish_abort(cmd, 1);
return;
}
/*
* TASK ABORTED status (TAS) bit support
*/
- if (((tmr_nacl != NULL) &&
+ if ((tmr_nacl &&
(tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
transport_send_task_abort(cmd);
@@ -107,15 +106,14 @@ int core_tmr_lun_reset(
struct list_head *preempt_and_abort_list,
struct se_cmd *prout_cmd)
{
- struct se_cmd *cmd;
- struct se_queue_req *qr, *qr_tmp;
+ struct se_cmd *cmd, *tcmd;
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
- struct se_queue_obj *qobj = dev->dev_queue_obj;
+ struct se_queue_obj *qobj = &dev->dev_queue_obj;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_task *task, *task_tmp;
unsigned long flags;
- int fe_count, state, tas;
+ int fe_count, tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
@@ -127,7 +125,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- tas = DEV_ATTRIB(dev)->emulate_tas;
+ tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
@@ -136,20 +134,20 @@ int core_tmr_lun_reset(
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
- DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
+ pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
- TPG_TFO(tmr_tpg)->get_fabric_name(),
+ tmr_tpg->se_tpg_tfo->get_fabric_name(),
tmr_nacl->initiatorname);
}
}
- DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
+ pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
- TRANSPORT(dev)->name, tas);
+ dev->transport->name, tas);
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irq(&dev->se_tmr_lock);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
@@ -158,8 +156,8 @@ int core_tmr_lun_reset(
continue;
cmd = tmr_p->task_cmd;
- if (!(cmd)) {
- printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
+ if (!cmd) {
+ pr_err("Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
@@ -167,33 +165,33 @@ int core_tmr_lun_reset(
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
- if ((preempt_and_abort_list != NULL) &&
+ if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
- spin_unlock(&dev->se_tmr_lock);
+ spin_unlock_irq(&dev->se_tmr_lock);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->t_transport_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_lock_irq(&dev->se_tmr_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- spin_lock(&dev->se_tmr_lock);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_lock_irq(&dev->se_tmr_lock);
continue;
}
- DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+ pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_cmd_finish_abort_tmr(cmd);
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irq(&dev->se_tmr_lock);
}
- spin_unlock(&dev->se_tmr_lock);
+ spin_unlock_irq(&dev->se_tmr_lock);
/*
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
* This is following sam4r17, section 5.6 Aborting commands, Table 38
@@ -218,23 +216,17 @@ int core_tmr_lun_reset(
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
t_state_list) {
- if (!(TASK_CMD(task))) {
- printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ if (!task->task_se_cmd) {
+ pr_err("task->task_se_cmd is NULL!\n");
continue;
}
- cmd = TASK_CMD(task);
+ cmd = task->task_se_cmd;
- if (!T_TASK(cmd)) {
- printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
- " %p ITT: 0x%08x\n", task, cmd,
- CMD_TFO(cmd)->get_task_tag(cmd));
- continue;
- }
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if ((preempt_and_abort_list != NULL) &&
+ if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
@@ -248,38 +240,38 @@ int core_tmr_lun_reset(
atomic_set(&task->task_state_active, 0);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ pr_debug("LUN_RESET: %s cmd: %p task: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
"def_t_state: %d/%d cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
- CMD_TFO(cmd)->get_task_tag(cmd), 0,
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
- cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
- DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+ cmd->se_tfo->get_task_tag(cmd), 0,
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
+ cmd->deferred_t_state, cmd->t_task_cdb[0]);
+ pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
- CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
- T_TASK(cmd)->t_task_cdbs,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
- atomic_read(&T_TASK(cmd)->t_transport_active),
- atomic_read(&T_TASK(cmd)->t_transport_stop),
- atomic_read(&T_TASK(cmd)->t_transport_sent));
+ cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
+ cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+ atomic_read(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
+ pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
" for dev: %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
- DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
+ pr_debug("LUN_RESET Completed task: %p shutdown for"
" dev: %p\n", task, dev);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
@@ -289,34 +281,34 @@ int core_tmr_lun_reset(
}
__transport_stop_task_timer(task, &flags);
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
- DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
+ &cmd->t_state_lock, flags);
+ pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+ atomic_read(&cmd->t_task_cdbs_ex_left));
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
+ fe_count = atomic_read(&cmd->t_fe_count);
- if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
- DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+ if (atomic_read(&cmd->t_transport_active)) {
+ pr_debug("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ atomic_set(&cmd->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+ pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -331,25 +323,12 @@ int core_tmr_lun_reset(
* reference, otherwise the struct se_cmd is released.
*/
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
- cmd = (struct se_cmd *)qr->cmd;
- if (!(cmd)) {
- /*
- * Skip these for non PREEMPT_AND_ABORT usage..
- */
- if (preempt_and_abort_list != NULL)
- continue;
-
- atomic_dec(&qobj->queue_cnt);
- list_del(&qr->qr_list);
- kfree(qr);
- continue;
- }
+ list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if ((preempt_and_abort_list != NULL) &&
+ if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
@@ -359,30 +338,22 @@ int core_tmr_lun_reset(
if (prout_cmd == cmd)
continue;
- atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+ atomic_dec(&cmd->t_transport_queue_active);
atomic_dec(&qobj->queue_cnt);
- list_del(&qr->qr_list);
+ list_del(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- state = qr->state;
- kfree(qr);
-
- DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+ pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
- "Preempt" : "", cmd, state,
- atomic_read(&T_TASK(cmd)->t_fe_count));
+ "Preempt" : "", cmd, cmd->t_state,
+ atomic_read(&cmd->t_fe_count));
/*
* Signal that the command has failed via cmd->se_cmd_flags,
- * and call TFO->new_cmd_failure() to wakeup any fabric
- * dependent code used to wait for unsolicited data out
- * allocation to complete. The fabric module is expected
- * to dump any remaining unsolicited data out for the aborted
- * command at this point.
*/
transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
- atomic_read(&T_TASK(cmd)->t_fe_count));
+ atomic_read(&cmd->t_fe_count));
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
}
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -390,21 +361,21 @@ int core_tmr_lun_reset(
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
- if (!(preempt_and_abort_list) &&
+ if (!preempt_and_abort_list &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
- printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+ pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
spin_lock_irq(&dev->stats_lock);
dev->num_resets++;
spin_unlock_irq(&dev->stats_lock);
- DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+ pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
- TRANSPORT(dev)->name);
+ dev->transport->name);
return 0;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 5ec745fed93..4f1ba4c5ef1 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -44,6 +44,12 @@
#include <target/target_core_fabric_ops.h>
#include "target_core_hba.h"
+#include "target_core_stat.h"
+
+extern struct se_device *g_lun0_dev;
+
+static DEFINE_SPINLOCK(tpg_lock);
+static LIST_HEAD(tpg_list);
/* core_clear_initiator_node_from_tpg():
*
@@ -66,9 +72,9 @@ static void core_clear_initiator_node_from_tpg(
continue;
if (!deve->se_lun) {
- printk(KERN_ERR "%s device entries device pointer is"
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
@@ -80,14 +86,13 @@ static void core_clear_initiator_node_from_tpg(
spin_lock(&lun->lun_acl_lock);
list_for_each_entry_safe(acl, acl_tmp,
&lun->lun_acl_list, lacl_list) {
- if (!(strcmp(acl->initiatorname,
- nacl->initiatorname)) &&
- (acl->mapped_lun == deve->mapped_lun))
+ if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
+ (acl->mapped_lun == deve->mapped_lun))
break;
}
if (!acl) {
- printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
+ pr_err("Unable to locate struct se_lun_acl for %s,"
" mapped_lun: %u\n", nacl->initiatorname,
deve->mapped_lun);
spin_unlock(&lun->lun_acl_lock);
@@ -115,7 +120,7 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_node_acl *acl;
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!(strcmp(acl->initiatorname, initiatorname)))
+ if (!strcmp(acl->initiatorname, initiatorname))
return acl;
}
@@ -134,8 +139,8 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!(strcmp(acl->initiatorname, initiatorname)) &&
- (!(acl->dynamic_node_acl))) {
+ if (!strcmp(acl->initiatorname, initiatorname) &&
+ !acl->dynamic_node_acl) {
spin_unlock_bh(&tpg->acl_node_lock);
return acl;
}
@@ -171,7 +176,7 @@ void core_tpg_add_node_to_devs(
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
*/
- if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
+ if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
@@ -181,16 +186,16 @@ void core_tpg_add_node_to_devs(
* Allow only optical drives to issue R/W in default RO
* demo mode.
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
+ if (dev->transport->get_device_type(dev) == TYPE_DISK)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
}
- printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+ pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
" access for LUN in Demo Mode\n",
- TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
@@ -210,8 +215,8 @@ static int core_set_queue_depth_for_node(
struct se_node_acl *acl)
{
if (!acl->queue_depth) {
- printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
- "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
+ pr_err("Queue depth for %s Initiator Node: %s is 0,"
+ "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
acl->initiatorname);
acl->queue_depth = 1;
}
@@ -230,10 +235,10 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
- if (!(nacl->device_list)) {
- printk(KERN_ERR "Unable to allocate memory for"
+ if (!nacl->device_list) {
+ pr_err("Unable to allocate memory for"
" struct se_node_acl->device_list\n");
- return -1;
+ return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
@@ -259,14 +264,14 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_node_acl *acl;
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if ((acl))
+ if (acl)
return acl;
- if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
+ if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
return NULL;
- acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
- if (!(acl))
+ acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
+ if (!acl)
return NULL;
INIT_LIST_HEAD(&acl->acl_list);
@@ -274,23 +279,23 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
- acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
+ acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
- TPG_TFO(tpg)->set_default_node_attributes(acl);
+ tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
@@ -301,10 +306,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock);
- printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
- TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
return acl;
}
@@ -351,12 +356,12 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if ((acl)) {
+ if (acl) {
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
- printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
- " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
+ pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
+ " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
spin_unlock_bh(&tpg->acl_node_lock);
/*
* Release the locally allocated struct se_node_acl
@@ -364,22 +369,22 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
* a pointer to an existing demo mode node ACL.
*/
if (se_nacl)
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
se_nacl);
goto done;
}
- printk(KERN_ERR "ACL entry for %s Initiator"
+ pr_err("ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
- " request.\n", TPG_TFO(tpg)->get_fabric_name(),
- initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return ERR_PTR(-EEXIST);
}
spin_unlock_bh(&tpg->acl_node_lock);
- if (!(se_nacl)) {
- printk("struct se_node_acl pointer is NULL\n");
+ if (!se_nacl) {
+ pr_err("struct se_node_acl pointer is NULL\n");
return ERR_PTR(-EINVAL);
}
/*
@@ -400,16 +405,16 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
- TPG_TFO(tpg)->set_default_node_attributes(acl);
+ tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-ENOMEM);
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-EINVAL);
}
@@ -419,10 +424,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
spin_unlock_bh(&tpg->acl_node_lock);
done:
- printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
- TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
return acl;
}
@@ -457,7 +462,7 @@ int core_tpg_del_initiator_node_acl(
/*
* Determine if the session needs to be closed by our context.
*/
- if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
spin_unlock_bh(&tpg->session_lock);
@@ -465,7 +470,7 @@ int core_tpg_del_initiator_node_acl(
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
- TPG_TFO(tpg)->close_session(sess);
+ tpg->se_tpg_tfo->close_session(sess);
spin_lock_bh(&tpg->session_lock);
}
@@ -475,10 +480,10 @@ int core_tpg_del_initiator_node_acl(
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
- printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
- TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
+ pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
return 0;
}
@@ -500,11 +505,11 @@ int core_tpg_set_initiator_node_queue_depth(
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!(acl)) {
- printk(KERN_ERR "Access Control List entry for %s Initiator"
+ if (!acl) {
+ pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
- " request.\n", TPG_TFO(tpg)->get_fabric_name(),
- initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return -ENODEV;
}
@@ -520,12 +525,12 @@ int core_tpg_set_initiator_node_queue_depth(
continue;
if (!force) {
- printk(KERN_ERR "Unable to change queue depth for %s"
+ pr_err("Unable to change queue depth for %s"
" Initiator Node: %s while session is"
" operational. To forcefully change the queue"
" depth and force session reinstatement"
" use the \"force=1\" parameter.\n",
- TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
spin_unlock_bh(&tpg->session_lock);
spin_lock_bh(&tpg->acl_node_lock);
@@ -537,7 +542,7 @@ int core_tpg_set_initiator_node_queue_depth(
/*
* Determine if the session needs to be closed by our context.
*/
- if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
init_sess = sess;
@@ -549,7 +554,7 @@ int core_tpg_set_initiator_node_queue_depth(
* Change the value in the Node's struct se_node_acl, and call
* core_set_queue_depth_for_node() to add the requested queue depth.
*
- * Finally call TPG_TFO(tpg)->close_session() to force session
+ * Finally call tpg->se_tpg_tfo->close_session() to force session
* reinstatement to occur if there is an active session for the
* $FABRIC_MOD Initiator Node in question.
*/
@@ -561,10 +566,10 @@ int core_tpg_set_initiator_node_queue_depth(
* Force session reinstatement if
* core_set_queue_depth_for_node() failed, because we assume
* the $FABRIC_MOD has already the set session reinstatement
- * bit from TPG_TFO(tpg)->shutdown_session() called above.
+ * bit from tpg->se_tpg_tfo->shutdown_session() called above.
*/
if (init_sess)
- TPG_TFO(tpg)->close_session(init_sess);
+ tpg->se_tpg_tfo->close_session(init_sess);
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
@@ -578,12 +583,12 @@ int core_tpg_set_initiator_node_queue_depth(
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
if (init_sess)
- TPG_TFO(tpg)->close_session(init_sess);
+ tpg->se_tpg_tfo->close_session(init_sess);
- printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
+ pr_debug("Successfuly changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
- initiatorname, TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
@@ -597,7 +602,7 @@ EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
- struct se_device *dev = se_global->g_lun0_dev;
+ struct se_device *dev = g_lun0_dev;
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
int ret;
@@ -614,7 +619,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
if (ret < 0)
- return -1;
+ return ret;
return 0;
}
@@ -638,8 +643,8 @@ int core_tpg_register(
se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
- if (!(se_tpg->tpg_lun_list)) {
- printk(KERN_ERR "Unable to allocate struct se_portal_group->"
+ if (!se_tpg->tpg_lun_list) {
+ pr_err("Unable to allocate struct se_portal_group->"
"tpg_lun_list\n");
return -ENOMEM;
}
@@ -663,7 +668,7 @@ int core_tpg_register(
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
- INIT_LIST_HEAD(&se_tpg->se_tpg_list);
+ INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->acl_node_lock);
spin_lock_init(&se_tpg->session_lock);
@@ -676,11 +681,11 @@ int core_tpg_register(
}
}
- spin_lock_bh(&se_global->se_tpg_lock);
- list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
- spin_unlock_bh(&se_global->se_tpg_lock);
+ spin_lock_bh(&tpg_lock);
+ list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
+ spin_unlock_bh(&tpg_lock);
- printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+ pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
@@ -694,16 +699,16 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
{
struct se_node_acl *nacl, *nacl_tmp;
- printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+ pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
" for endpoint: %s Portal Tag %u\n",
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
- "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
- TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+ "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
- spin_lock_bh(&se_global->se_tpg_lock);
- list_del(&se_tpg->se_tpg_list);
- spin_unlock_bh(&se_global->se_tpg_lock);
+ spin_lock_bh(&tpg_lock);
+ list_del(&se_tpg->se_tpg_node);
+ spin_unlock_bh(&tpg_lock);
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
@@ -721,7 +726,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
- TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+ se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
spin_lock_bh(&se_tpg->acl_node_lock);
}
@@ -743,21 +748,21 @@ struct se_lun *core_tpg_pre_addlun(
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
- TPG_TFO(tpg)->get_fabric_name(),
+ tpg->se_tpg_tfo->get_fabric_name(),
unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
- printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
+ pr_err("TPG Logical Unit Number: %u is already active"
" on %s Target Portal Group: %u, ignoring request.\n",
- unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-EINVAL);
}
@@ -772,8 +777,11 @@ int core_tpg_post_addlun(
u32 lun_access,
void *lun_ptr)
{
- if (core_dev_export(lun_ptr, tpg, lun) < 0)
- return -1;
+ int ret;
+
+ ret = core_dev_export(lun_ptr, tpg, lun);
+ if (ret < 0)
+ return ret;
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
@@ -799,21 +807,21 @@ struct se_lun *core_tpg_pre_dellun(
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %u, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4dafeb8b563..89760329d5d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -58,139 +58,12 @@
#include "target_core_scdb.h"
#include "target_core_ua.h"
-/* #define DEBUG_CDB_HANDLER */
-#ifdef DEBUG_CDB_HANDLER
-#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CDB_H(x...)
-#endif
-
-/* #define DEBUG_CMD_MAP */
-#ifdef DEBUG_CMD_MAP
-#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CMD_M(x...)
-#endif
-
-/* #define DEBUG_MEM_ALLOC */
-#ifdef DEBUG_MEM_ALLOC
-#define DEBUG_MEM(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM(x...)
-#endif
-
-/* #define DEBUG_MEM2_ALLOC */
-#ifdef DEBUG_MEM2_ALLOC
-#define DEBUG_MEM2(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM2(x...)
-#endif
-
-/* #define DEBUG_SG_CALC */
-#ifdef DEBUG_SG_CALC
-#define DEBUG_SC(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SC(x...)
-#endif
-
-/* #define DEBUG_SE_OBJ */
-#ifdef DEBUG_SE_OBJ
-#define DEBUG_SO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SO(x...)
-#endif
-
-/* #define DEBUG_CMD_VOL */
-#ifdef DEBUG_CMD_VOL
-#define DEBUG_VOL(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_VOL(x...)
-#endif
-
-/* #define DEBUG_CMD_STOP */
-#ifdef DEBUG_CMD_STOP
-#define DEBUG_CS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CS(x...)
-#endif
-
-/* #define DEBUG_PASSTHROUGH */
-#ifdef DEBUG_PASSTHROUGH
-#define DEBUG_PT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_PT(x...)
-#endif
-
-/* #define DEBUG_TASK_STOP */
-#ifdef DEBUG_TASK_STOP
-#define DEBUG_TS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TS(x...)
-#endif
-
-/* #define DEBUG_TRANSPORT_STOP */
-#ifdef DEBUG_TRANSPORT_STOP
-#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TRANSPORT_S(x...)
-#endif
-
-/* #define DEBUG_TASK_FAILURE */
-#ifdef DEBUG_TASK_FAILURE
-#define DEBUG_TF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TF(x...)
-#endif
-
-/* #define DEBUG_DEV_OFFLINE */
-#ifdef DEBUG_DEV_OFFLINE
-#define DEBUG_DO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_DO(x...)
-#endif
-
-/* #define DEBUG_TASK_STATE */
-#ifdef DEBUG_TASK_STATE
-#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TSTATE(x...)
-#endif
-
-/* #define DEBUG_STATUS_THR */
-#ifdef DEBUG_STATUS_THR
-#define DEBUG_ST(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_ST(x...)
-#endif
-
-/* #define DEBUG_TASK_TIMEOUT */
-#ifdef DEBUG_TASK_TIMEOUT
-#define DEBUG_TT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TT(x...)
-#endif
-
-/* #define DEBUG_GENERIC_REQUEST_FAILURE */
-#ifdef DEBUG_GENERIC_REQUEST_FAILURE
-#define DEBUG_GRF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_GRF(x...)
-#endif
-
-/* #define DEBUG_SAM_TASK_ATTRS */
-#ifdef DEBUG_SAM_TASK_ATTRS
-#define DEBUG_STA(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_STA(x...)
-#endif
-
-struct se_global *se_global;
+static int sub_api_initialized;
static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
-struct kmem_cache *se_mem_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
@@ -201,116 +74,87 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
typedef int (*map_func_t)(struct se_task *, u32);
static int transport_generic_write_pending(struct se_cmd *);
-static int transport_processing_thread(void *);
+static int transport_processing_thread(void *param);
static int __transport_execute_tasks(struct se_device *dev);
static void transport_complete_task_attr(struct se_cmd *cmd);
+static int transport_complete_qf(struct se_cmd *cmd);
+static void transport_handle_queue_full(struct se_cmd *cmd,
+ struct se_device *dev, int (*qf_callback)(struct se_cmd *));
static void transport_direct_request_timeout(struct se_cmd *cmd);
static void transport_free_dev_tasks(struct se_cmd *cmd);
-static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
- unsigned long long starting_lba, u32 sectors,
+static u32 transport_allocate_tasks(struct se_cmd *cmd,
+ unsigned long long starting_lba,
enum dma_data_direction data_direction,
- struct list_head *mem_list, int set_counts);
-static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
- u32 dma_size);
+ struct scatterlist *sgl, unsigned int nents);
+static int transport_generic_get_mem(struct se_cmd *cmd);
static int transport_generic_remove(struct se_cmd *cmd,
- int release_to_pool, int session_reinstatement);
-static int transport_get_sectors(struct se_cmd *cmd);
-static struct list_head *transport_init_se_mem_list(void);
-static int transport_map_sg_to_mem(struct se_cmd *cmd,
- struct list_head *se_mem_list, void *in_mem,
- u32 *se_mem_cnt);
-static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
- unsigned char *dst, struct list_head *se_mem_list);
+ int session_reinstatement);
static void transport_release_fe_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
struct se_queue_obj *qobj);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void transport_stop_all_task_timers(struct se_cmd *cmd);
-int init_se_global(void)
+int init_se_kmem_caches(void)
{
- struct se_global *global;
-
- global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
- if (!(global)) {
- printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
- return -1;
- }
-
- INIT_LIST_HEAD(&global->g_lu_gps_list);
- INIT_LIST_HEAD(&global->g_se_tpg_list);
- INIT_LIST_HEAD(&global->g_hba_list);
- INIT_LIST_HEAD(&global->g_se_dev_list);
- spin_lock_init(&global->g_device_lock);
- spin_lock_init(&global->hba_lock);
- spin_lock_init(&global->se_tpg_lock);
- spin_lock_init(&global->lu_gps_lock);
- spin_lock_init(&global->plugin_class_lock);
-
se_cmd_cache = kmem_cache_create("se_cmd_cache",
sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!(se_cmd_cache)) {
- printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+ if (!se_cmd_cache) {
+ pr_err("kmem_cache_create for struct se_cmd failed\n");
goto out;
}
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
- if (!(se_tmr_req_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+ if (!se_tmr_req_cache) {
+ pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
0, NULL);
- if (!(se_sess_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_session"
+ if (!se_sess_cache) {
+ pr_err("kmem_cache_create() for struct se_session"
" failed\n");
goto out;
}
se_ua_cache = kmem_cache_create("se_ua_cache",
sizeof(struct se_ua), __alignof__(struct se_ua),
0, NULL);
- if (!(se_ua_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
- goto out;
- }
- se_mem_cache = kmem_cache_create("se_mem_cache",
- sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
- if (!(se_mem_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
+ if (!se_ua_cache) {
+ pr_err("kmem_cache_create() for struct se_ua failed\n");
goto out;
}
t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
sizeof(struct t10_pr_registration),
__alignof__(struct t10_pr_registration), 0, NULL);
- if (!(t10_pr_reg_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+ if (!t10_pr_reg_cache) {
+ pr_err("kmem_cache_create() for struct t10_pr_registration"
" failed\n");
goto out;
}
t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
0, NULL);
- if (!(t10_alua_lu_gp_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+ if (!t10_alua_lu_gp_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
" failed\n");
goto out;
}
t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
sizeof(struct t10_alua_lu_gp_member),
__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
- if (!(t10_alua_lu_gp_mem_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+ if (!t10_alua_lu_gp_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
"cache failed\n");
goto out;
}
t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
sizeof(struct t10_alua_tg_pt_gp),
__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
- if (!(t10_alua_tg_pt_gp_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ if (!t10_alua_tg_pt_gp_cache) {
+ pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"cache failed\n");
goto out;
}
@@ -319,14 +163,12 @@ int init_se_global(void)
sizeof(struct t10_alua_tg_pt_gp_member),
__alignof__(struct t10_alua_tg_pt_gp_member),
0, NULL);
- if (!(t10_alua_tg_pt_gp_mem_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ if (!t10_alua_tg_pt_gp_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"mem_t failed\n");
goto out;
}
- se_global = global;
-
return 0;
out:
if (se_cmd_cache)
@@ -337,8 +179,6 @@ out:
kmem_cache_destroy(se_sess_cache);
if (se_ua_cache)
kmem_cache_destroy(se_ua_cache);
- if (se_mem_cache)
- kmem_cache_destroy(se_mem_cache);
if (t10_pr_reg_cache)
kmem_cache_destroy(t10_pr_reg_cache);
if (t10_alua_lu_gp_cache)
@@ -349,45 +189,25 @@ out:
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
if (t10_alua_tg_pt_gp_mem_cache)
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
- kfree(global);
- return -1;
+ return -ENOMEM;
}
-void release_se_global(void)
+void release_se_kmem_caches(void)
{
- struct se_global *global;
-
- global = se_global;
- if (!(global))
- return;
-
kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
- kmem_cache_destroy(se_mem_cache);
kmem_cache_destroy(t10_pr_reg_cache);
kmem_cache_destroy(t10_alua_lu_gp_cache);
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
- kfree(global);
-
- se_global = NULL;
}
-/* SCSI statistics table index */
-static struct scsi_index_table scsi_index_table;
-
-/*
- * Initialize the index table for allocating unique row indexes to various mib
- * tables.
- */
-void init_scsi_index_table(void)
-{
- memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
- spin_lock_init(&scsi_index_table.lock);
-}
+/* This code ensures unique mib indexes are handed out. */
+static DEFINE_SPINLOCK(scsi_mib_index_lock);
+static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
/*
* Allocate a new row index for the entry type specified
@@ -396,16 +216,11 @@ u32 scsi_get_new_index(scsi_index_t type)
{
u32 new_index;
- if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
- printk(KERN_ERR "Invalid index type %d\n", type);
- return -EINVAL;
- }
+ BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
- spin_lock(&scsi_index_table.lock);
- new_index = ++scsi_index_table.scsi_mib_index[type];
- if (new_index == 0)
- new_index = ++scsi_index_table.scsi_mib_index[type];
- spin_unlock(&scsi_index_table.lock);
+ spin_lock(&scsi_mib_index_lock);
+ new_index = ++scsi_mib_index[type];
+ spin_unlock(&scsi_mib_index_lock);
return new_index;
}
@@ -425,34 +240,37 @@ static int transport_subsystem_reqmods(void)
ret = request_module("target_core_iblock");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_iblock\n");
+ pr_err("Unable to load target_core_iblock\n");
ret = request_module("target_core_file");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_file\n");
+ pr_err("Unable to load target_core_file\n");
ret = request_module("target_core_pscsi");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_pscsi\n");
+ pr_err("Unable to load target_core_pscsi\n");
ret = request_module("target_core_stgt");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_stgt\n");
+ pr_err("Unable to load target_core_stgt\n");
return 0;
}
int transport_subsystem_check_init(void)
{
- if (se_global->g_sub_api_initialized)
+ int ret;
+
+ if (sub_api_initialized)
return 0;
/*
* Request the loading of known TCM subsystem plugins..
*/
- if (transport_subsystem_reqmods() < 0)
- return -1;
+ ret = transport_subsystem_reqmods();
+ if (ret < 0)
+ return ret;
- se_global->g_sub_api_initialized = 1;
+ sub_api_initialized = 1;
return 0;
}
@@ -461,8 +279,8 @@ struct se_session *transport_init_session(void)
struct se_session *se_sess;
se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
- if (!(se_sess)) {
- printk(KERN_ERR "Unable to allocate struct se_session from"
+ if (!se_sess) {
+ pr_err("Unable to allocate struct se_session from"
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
@@ -497,9 +315,9 @@ void __transport_register_session(
* If the fabric module supports an ISID based TransportID,
* save this value in binary from the fabric I_T Nexus now.
*/
- if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
memset(&buf[0], 0, PR_REG_ISID_LEN);
- TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
+ se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
&buf[0], PR_REG_ISID_LEN);
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
@@ -516,8 +334,8 @@ void __transport_register_session(
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
- printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
- TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
+ pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+ se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
}
EXPORT_SYMBOL(__transport_register_session);
@@ -536,13 +354,13 @@ EXPORT_SYMBOL(transport_register_session);
void transport_deregister_session_configfs(struct se_session *se_sess)
{
struct se_node_acl *se_nacl;
-
+ unsigned long flags;
/*
* Used by struct se_node_acl's under ConfigFS to locate active struct se_session
*/
se_nacl = se_sess->se_node_acl;
- if ((se_nacl)) {
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ if (se_nacl) {
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
list_del(&se_sess->sess_acl_list);
/*
* If the session list is empty, then clear the pointer.
@@ -556,7 +374,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
se_nacl->acl_sess_list.prev,
struct se_session, sess_acl_list);
}
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);
@@ -572,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess)
struct se_portal_group *se_tpg = se_sess->se_tpg;
struct se_node_acl *se_nacl;
- if (!(se_tpg)) {
+ if (!se_tpg) {
transport_free_session(se_sess);
return;
}
@@ -588,18 +406,18 @@ void transport_deregister_session(struct se_session *se_sess)
* struct se_node_acl if it had been previously dynamically generated.
*/
se_nacl = se_sess->se_node_acl;
- if ((se_nacl)) {
+ if (se_nacl) {
spin_lock_bh(&se_tpg->acl_node_lock);
if (se_nacl->dynamic_node_acl) {
- if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
- se_tpg))) {
+ if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
+ se_tpg)) {
list_del(&se_nacl->acl_list);
se_tpg->num_node_acls--;
spin_unlock_bh(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(se_nacl);
core_free_device_list_for_node(se_nacl, se_tpg);
- TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
+ se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
se_nacl);
spin_lock_bh(&se_tpg->acl_node_lock);
}
@@ -609,13 +427,13 @@ void transport_deregister_session(struct se_session *se_sess)
transport_free_session(se_sess);
- printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
- TPG_TFO(se_tpg)->get_fabric_name());
+ pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
+ se_tpg->se_tpg_tfo->get_fabric_name());
}
EXPORT_SYMBOL(transport_deregister_session);
/*
- * Called with T_TASK(cmd)->t_state_lock held.
+ * Called with cmd->t_state_lock held.
*/
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
@@ -623,28 +441,25 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
struct se_task *task;
unsigned long flags;
- if (!T_TASK(cmd))
- return;
-
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
dev = task->se_dev;
- if (!(dev))
+ if (!dev)
continue;
if (atomic_read(&task->task_active))
continue;
- if (!(atomic_read(&task->task_state_active)))
+ if (!atomic_read(&task->task_state_active))
continue;
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_del(&task->t_state_list);
- DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
- CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
+ pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+ cmd->se_tfo->get_task_tag(cmd), dev, task);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
atomic_set(&task->task_state_active, 0);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
+ atomic_dec(&cmd->t_task_cdbs_ex_left);
}
}
@@ -663,34 +478,34 @@ static int transport_cmd_check_stop(
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* Determine if IOCTL context caller in requesting the stopping of this
* command for LUN shutdown purposes.
*/
- if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
- DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
+ if (atomic_read(&cmd->transport_lun_stop)) {
+ pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
cmd->deferred_t_state = cmd->t_state;
cmd->t_state = TRANSPORT_DEFERRED_CMD;
- atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ atomic_set(&cmd->t_transport_active, 0);
if (transport_off == 2)
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&T_TASK(cmd)->transport_lun_stop_comp);
+ complete(&cmd->transport_lun_stop_comp);
return 1;
}
/*
* Determine if frontend context caller is requesting the stopping of
- * this command for frontend excpections.
+ * this command for frontend exceptions.
*/
- if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
- DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
+ if (atomic_read(&cmd->t_transport_stop)) {
+ pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
cmd->deferred_t_state = cmd->t_state;
cmd->t_state = TRANSPORT_DEFERRED_CMD;
@@ -703,13 +518,13 @@ static int transport_cmd_check_stop(
*/
if (transport_off == 2)
cmd->se_lun = NULL;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&T_TASK(cmd)->t_transport_stop_comp);
+ complete(&cmd->t_transport_stop_comp);
return 1;
}
if (transport_off) {
- atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ atomic_set(&cmd->t_transport_active, 0);
if (transport_off == 2) {
transport_all_task_dev_remove_state(cmd);
/*
@@ -722,20 +537,20 @@ static int transport_cmd_check_stop(
* their internally allocated I/O reference now and
* struct se_cmd now.
*/
- if (CMD_TFO(cmd)->check_stop_free != NULL) {
+ if (cmd->se_tfo->check_stop_free != NULL) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- CMD_TFO(cmd)->check_stop_free(cmd);
+ cmd->se_tfo->check_stop_free(cmd);
return 1;
}
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
} else if (t_state)
cmd->t_state = t_state;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
@@ -747,30 +562,30 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
- struct se_lun *lun = SE_LUN(cmd);
+ struct se_lun *lun = cmd->se_lun;
unsigned long flags;
if (!lun)
return;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto check_lun;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
check_lun:
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
- if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
- list_del(&cmd->se_lun_list);
- atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+ if (atomic_read(&cmd->transport_lun_active)) {
+ list_del(&cmd->se_lun_node);
+ atomic_set(&cmd->transport_lun_active, 0);
#if 0
- printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
- CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
+ pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
+ cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
#endif
}
spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
@@ -778,92 +593,59 @@ check_lun:
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
- transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return;
if (remove)
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
{
- transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
-static int transport_add_cmd_to_queue(
+static void transport_add_cmd_to_queue(
struct se_cmd *cmd,
int t_state)
{
struct se_device *dev = cmd->se_dev;
- struct se_queue_obj *qobj = dev->dev_queue_obj;
- struct se_queue_req *qr;
+ struct se_queue_obj *qobj = &dev->dev_queue_obj;
unsigned long flags;
- qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
- if (!(qr)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " struct se_queue_req\n");
- return -1;
- }
- INIT_LIST_HEAD(&qr->qr_list);
-
- qr->cmd = (void *)cmd;
- qr->state = t_state;
+ INIT_LIST_HEAD(&cmd->se_queue_node);
if (t_state) {
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = t_state;
- atomic_set(&T_TASK(cmd)->t_transport_active, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_active, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_add_tail(&qr->qr_list, &qobj->qobj_list);
- atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
+ if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
+ cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
+ list_add(&cmd->se_queue_node, &qobj->qobj_list);
+ } else
+ list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
+ atomic_inc(&cmd->t_transport_queue_active);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
atomic_inc(&qobj->queue_cnt);
wake_up_interruptible(&qobj->thread_wq);
- return 0;
-}
-
-/*
- * Called with struct se_queue_obj->cmd_queue_lock held.
- */
-static struct se_queue_req *
-__transport_get_qr_from_queue(struct se_queue_obj *qobj)
-{
- struct se_cmd *cmd;
- struct se_queue_req *qr = NULL;
-
- if (list_empty(&qobj->qobj_list))
- return NULL;
-
- list_for_each_entry(qr, &qobj->qobj_list, qr_list)
- break;
-
- if (qr->cmd) {
- cmd = (struct se_cmd *)qr->cmd;
- atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
- }
- list_del(&qr->qr_list);
- atomic_dec(&qobj->queue_cnt);
-
- return qr;
}
-static struct se_queue_req *
-transport_get_qr_from_queue(struct se_queue_obj *qobj)
+static struct se_cmd *
+transport_get_cmd_from_queue(struct se_queue_obj *qobj)
{
struct se_cmd *cmd;
- struct se_queue_req *qr;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -871,50 +653,42 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj)
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return NULL;
}
+ cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
- list_for_each_entry(qr, &qobj->qobj_list, qr_list)
- break;
+ atomic_dec(&cmd->t_transport_queue_active);
- if (qr->cmd) {
- cmd = (struct se_cmd *)qr->cmd;
- atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
- }
- list_del(&qr->qr_list);
+ list_del(&cmd->se_queue_node);
atomic_dec(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return qr;
+ return cmd;
}
static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
struct se_queue_obj *qobj)
{
- struct se_cmd *q_cmd;
- struct se_queue_req *qr = NULL, *qr_p = NULL;
+ struct se_cmd *t;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
+ if (!atomic_read(&cmd->t_transport_queue_active)) {
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return;
}
- list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
- q_cmd = (struct se_cmd *)qr->cmd;
- if (q_cmd != cmd)
- continue;
-
- atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
- atomic_dec(&qobj->queue_cnt);
- list_del(&qr->qr_list);
- kfree(qr);
- }
+ list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
+ if (t == cmd) {
+ atomic_dec(&cmd->t_transport_queue_active);
+ atomic_dec(&qobj->queue_cnt);
+ list_del(&cmd->se_queue_node);
+ break;
+ }
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
- printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- atomic_read(&T_TASK(cmd)->t_transport_queue_active));
+ if (atomic_read(&cmd->t_transport_queue_active)) {
+ pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
+ cmd->se_tfo->get_task_tag(cmd),
+ atomic_read(&cmd->t_transport_queue_active));
}
}
@@ -924,7 +698,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
*/
void transport_complete_sync_cache(struct se_cmd *cmd, int good)
{
- struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
+ struct se_task *task = list_entry(cmd->t_task_list.next,
struct se_task, t_list);
if (good) {
@@ -933,7 +707,7 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- TASK_CMD(task)->transport_error_status =
+ task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_ILLEGAL_REQUEST;
}
@@ -948,22 +722,18 @@ EXPORT_SYMBOL(transport_complete_sync_cache);
*/
void transport_complete_task(struct se_task *task, int success)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = task->se_dev;
int t_state;
unsigned long flags;
#if 0
- printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
- T_TASK(cmd)->t_task_cdb[0], dev);
+ pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+ cmd->t_task_cdb[0], dev);
#endif
- if (dev) {
- spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+ if (dev)
atomic_inc(&dev->depth_left);
- atomic_inc(&SE_HBA(dev)->left_queue_depth);
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
- }
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_set(&task->task_active, 0);
/*
@@ -985,14 +755,14 @@ void transport_complete_task(struct se_task *task, int success)
*/
if (atomic_read(&task->task_stop)) {
/*
- * Decrement T_TASK(cmd)->t_se_count if this task had
+ * Decrement cmd->t_se_count if this task had
* previously thrown its timeout exception handler.
*/
if (atomic_read(&task->task_timeout)) {
- atomic_dec(&T_TASK(cmd)->t_se_count);
+ atomic_dec(&cmd->t_se_count);
atomic_set(&task->task_timeout, 0);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&task->task_stop_comp);
return;
@@ -1003,34 +773,34 @@ void transport_complete_task(struct se_task *task, int success)
* the processing thread.
*/
if (atomic_read(&task->task_timeout)) {
- if (!(atomic_dec_and_test(
- &T_TASK(cmd)->t_task_cdbs_timeout_left))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ if (!atomic_dec_and_test(
+ &cmd->t_task_cdbs_timeout_left)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
return;
}
t_state = TRANSPORT_COMPLETE_TIMEOUT;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_add_cmd_to_queue(cmd, t_state);
return;
}
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
+ atomic_dec(&cmd->t_task_cdbs_timeout_left);
/*
* Decrement the outstanding t_task_cdbs_left count. The last
* struct se_task from struct se_cmd will complete itself into the
* device queue depending upon int success.
*/
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
if (!success)
- T_TASK(cmd)->t_tasks_failed = 1;
+ cmd->t_tasks_failed = 1;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- if (!success || T_TASK(cmd)->t_tasks_failed) {
+ if (!success || cmd->t_tasks_failed) {
t_state = TRANSPORT_COMPLETE_FAILURE;
if (!task->task_error_status) {
task->task_error_status =
@@ -1039,10 +809,10 @@ void transport_complete_task(struct se_task *task, int success)
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
} else {
- atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
+ atomic_set(&cmd->t_transport_complete, 1);
t_state = TRANSPORT_COMPLETE_OK;
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_add_cmd_to_queue(cmd, t_state);
}
@@ -1080,9 +850,9 @@ static inline int transport_add_task_check_sam_attr(
&task_prev->t_execute_list :
&dev->execute_task_list);
- DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+ pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
" in execution queue\n",
- T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+ task->task_se_cmd->t_task_cdb[0]);
return 1;
}
/*
@@ -1124,8 +894,8 @@ static void __transport_add_task_to_execute_queue(
atomic_set(&task->task_state_active, 1);
- DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
- CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
+ pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
task, dev);
}
@@ -1135,8 +905,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
struct se_task *task;
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
dev = task->se_dev;
if (atomic_read(&task->task_state_active))
@@ -1146,23 +916,23 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
list_add_tail(&task->t_state_list, &dev->state_task_list);
atomic_set(&task->task_state_active, 1);
- DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
- CMD_TFO(task->task_se_cmd)->get_task_tag(
+ pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ task->task_se_cmd->se_tfo->get_task_tag(
task->task_se_cmd), task, dev);
spin_unlock(&dev->execute_task_lock);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_task *task, *task_prev = NULL;
unsigned long flags;
spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
if (atomic_read(&task->task_execute_queue))
continue;
/*
@@ -1174,30 +944,6 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
task_prev = task;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-
- return;
-}
-
-/* transport_get_task_from_execute_queue():
- *
- * Called with dev->execute_task_lock held.
- */
-static struct se_task *
-transport_get_task_from_execute_queue(struct se_device *dev)
-{
- struct se_task *task;
-
- if (list_empty(&dev->execute_task_list))
- return NULL;
-
- list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
- break;
-
- list_del(&task->t_execute_list);
- atomic_set(&task->task_execute_queue, 0);
- atomic_dec(&dev->execute_tasks);
-
- return task;
}
/* transport_remove_task_from_execute_queue():
@@ -1222,6 +968,40 @@ void transport_remove_task_from_execute_queue(
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
+/*
+ * Handle QUEUE_FULL / -EAGAIN status
+ */
+
+static void target_qf_do_work(struct work_struct *work)
+{
+ struct se_device *dev = container_of(work, struct se_device,
+ qf_work_queue);
+ struct se_cmd *cmd, *cmd_tmp;
+
+ spin_lock_irq(&dev->qf_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
+
+ list_del(&cmd->se_qf_node);
+ atomic_dec(&dev->dev_qf_count);
+ smp_mb__after_atomic_dec();
+ spin_unlock_irq(&dev->qf_cmd_lock);
+
+ pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
+ " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
+ (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
+ (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
+ : "UNKNOWN");
+ /*
+ * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
+ * has been added to head of queue
+ */
+ transport_add_cmd_to_queue(cmd, cmd->t_state);
+
+ spin_lock_irq(&dev->qf_cmd_lock);
+ }
+ spin_unlock_irq(&dev->qf_cmd_lock);
+}
+
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
switch (cmd->data_direction) {
@@ -1269,7 +1049,7 @@ void transport_dump_dev_state(
atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
- DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
+ dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
*bl += sprintf(b + *bl, " ");
}
@@ -1279,33 +1059,29 @@ void transport_dump_dev_state(
*/
static void transport_release_all_cmds(struct se_device *dev)
{
- struct se_cmd *cmd = NULL;
- struct se_queue_req *qr = NULL, *qr_p = NULL;
+ struct se_cmd *cmd, *tcmd;
int bug_out = 0, t_state;
unsigned long flags;
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
- list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
- qr_list) {
-
- cmd = (struct se_cmd *)qr->cmd;
- t_state = qr->state;
- list_del(&qr->qr_list);
- kfree(qr);
- spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
+ spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
+ list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
+ se_queue_node) {
+ t_state = cmd->t_state;
+ list_del(&cmd->se_queue_node);
+ spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
flags);
- printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+ pr_err("Releasing ITT: 0x%08x, i_state: %u,"
" t_state: %u directly\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), t_state);
transport_release_fe_cmd(cmd);
bug_out = 1;
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
}
- spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags);
#if 0
if (bug_out)
BUG();
@@ -1362,7 +1138,7 @@ void transport_dump_vpd_proto_id(
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
- printk(KERN_INFO "%s", buf);
+ pr_debug("%s", buf);
}
void
@@ -1387,7 +1163,8 @@ int transport_dump_vpd_assoc(
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
- int ret = 0, len;
+ int ret = 0;
+ int len;
memset(buf, 0, VPD_TMP_BUF_SIZE);
len = sprintf(buf, "T10 VPD Identifier Association: ");
@@ -1404,14 +1181,14 @@ int transport_dump_vpd_assoc(
break;
default:
sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
- ret = -1;
+ ret = -EINVAL;
break;
}
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
- printk("%s", buf);
+ pr_debug("%s", buf);
return ret;
}
@@ -1434,7 +1211,8 @@ int transport_dump_vpd_ident_type(
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
- int ret = 0, len;
+ int ret = 0;
+ int len;
memset(buf, 0, VPD_TMP_BUF_SIZE);
len = sprintf(buf, "T10 VPD Identifier Type: ");
@@ -1461,14 +1239,17 @@ int transport_dump_vpd_ident_type(
default:
sprintf(buf+len, "Unsupported: 0x%02x\n",
vpd->device_identifier_type);
- ret = -1;
+ ret = -EINVAL;
break;
}
- if (p_buf)
+ if (p_buf) {
+ if (p_buf_len < strlen(buf)+1)
+ return -EINVAL;
strncpy(p_buf, buf, p_buf_len);
- else
- printk("%s", buf);
+ } else {
+ pr_debug("%s", buf);
+ }
return ret;
}
@@ -1511,14 +1292,14 @@ int transport_dump_vpd_ident(
default:
sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
" 0x%02x", vpd->device_identifier_code_set);
- ret = -1;
+ ret = -EINVAL;
break;
}
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
- printk("%s", buf);
+ pr_debug("%s", buf);
return ret;
}
@@ -1569,51 +1350,51 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
* This is currently not available in upsream Linux/SCSI Target
* mode code, and is assumed to be disabled while using TCM/pSCSI.
*/
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
return;
}
dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
- DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
- " device\n", TRANSPORT(dev)->name,
- TRANSPORT(dev)->get_device_rev(dev));
+ pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+ " device\n", dev->transport->name,
+ dev->transport->get_device_rev(dev));
}
static void scsi_dump_inquiry(struct se_device *dev)
{
- struct t10_wwn *wwn = DEV_T10_WWN(dev);
+ struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
int i, device_type;
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
- printk(" Vendor: ");
+ pr_debug(" Vendor: ");
for (i = 0; i < 8; i++)
if (wwn->vendor[i] >= 0x20)
- printk("%c", wwn->vendor[i]);
+ pr_debug("%c", wwn->vendor[i]);
else
- printk(" ");
+ pr_debug(" ");
- printk(" Model: ");
+ pr_debug(" Model: ");
for (i = 0; i < 16; i++)
if (wwn->model[i] >= 0x20)
- printk("%c", wwn->model[i]);
+ pr_debug("%c", wwn->model[i]);
else
- printk(" ");
+ pr_debug(" ");
- printk(" Revision: ");
+ pr_debug(" Revision: ");
for (i = 0; i < 4; i++)
if (wwn->revision[i] >= 0x20)
- printk("%c", wwn->revision[i]);
+ pr_debug("%c", wwn->revision[i]);
else
- printk(" ");
+ pr_debug(" ");
- printk("\n");
+ pr_debug("\n");
- device_type = TRANSPORT(dev)->get_device_type(dev);
- printk(" Type: %s ", scsi_device_type(device_type));
- printk(" ANSI SCSI revision: %02x\n",
- TRANSPORT(dev)->get_device_rev(dev));
+ device_type = dev->transport->get_device_type(dev);
+ pr_debug(" Type: %s ", scsi_device_type(device_type));
+ pr_debug(" ANSI SCSI revision: %02x\n",
+ dev->transport->get_device_rev(dev));
}
struct se_device *transport_add_device_to_core_hba(
@@ -1630,33 +1411,15 @@ struct se_device *transport_add_device_to_core_hba(
struct se_device *dev;
dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
- if (!(dev)) {
- printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
- return NULL;
- }
- dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
- if (!(dev->dev_queue_obj)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " dev->dev_queue_obj\n");
- kfree(dev);
+ if (!dev) {
+ pr_err("Unable to allocate memory for se_dev_t\n");
return NULL;
}
- transport_init_queue_obj(dev->dev_queue_obj);
-
- dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
- GFP_KERNEL);
- if (!(dev->dev_status_queue_obj)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " dev->dev_status_queue_obj\n");
- kfree(dev->dev_queue_obj);
- kfree(dev);
- return NULL;
- }
- transport_init_queue_obj(dev->dev_status_queue_obj);
+ transport_init_queue_obj(&dev->dev_queue_obj);
dev->dev_flags = device_flags;
dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
- dev->dev_ptr = (void *) transport_dev;
+ dev->dev_ptr = transport_dev;
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
@@ -1668,6 +1431,7 @@ struct se_device *transport_add_device_to_core_hba(
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
+ INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->ordered_cmd_lock);
@@ -1678,6 +1442,7 @@ struct se_device *transport_add_device_to_core_hba(
spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
+ spin_lock_init(&dev->qf_cmd_lock);
dev->queue_depth = dev_limits->queue_depth;
atomic_set(&dev->depth_left, dev->queue_depth);
@@ -1715,13 +1480,16 @@ struct se_device *transport_add_device_to_core_hba(
* Startup the struct se_device processing thread
*/
dev->process_thread = kthread_run(transport_processing_thread, dev,
- "LIO_%s", TRANSPORT(dev)->name);
+ "LIO_%s", dev->transport->name);
if (IS_ERR(dev->process_thread)) {
- printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
- TRANSPORT(dev)->name);
+ pr_err("Unable to create kthread: LIO_%s\n",
+ dev->transport->name);
goto out;
}
-
+ /*
+ * Setup work_queue for QUEUE_FULL
+ */
+ INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
/*
* Preload the initial INQUIRY const values if we are doing
* anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
@@ -1730,16 +1498,16 @@ struct se_device *transport_add_device_to_core_hba(
* originals once back into DEV_T10_WWN(dev) for the virtual device
* setup.
*/
- if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (!(inquiry_prod) || !(inquiry_prod)) {
- printk(KERN_ERR "All non TCM/pSCSI plugins require"
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (!inquiry_prod || !inquiry_rev) {
+ pr_err("All non TCM/pSCSI plugins require"
" INQUIRY consts\n");
goto out;
}
- strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
- strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
- strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
+ strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
+ strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
+ strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
}
scsi_dump_inquiry(dev);
@@ -1754,8 +1522,6 @@ out:
se_release_vpd_for_dev(dev);
- kfree(dev->dev_status_queue_obj);
- kfree(dev->dev_queue_obj);
kfree(dev);
return NULL;
@@ -1794,12 +1560,11 @@ transport_generic_get_task(struct se_cmd *cmd,
enum dma_data_direction data_direction)
{
struct se_task *task;
- struct se_device *dev = SE_DEV(cmd);
- unsigned long flags;
+ struct se_device *dev = cmd->se_dev;
- task = dev->transport->alloc_task(cmd);
+ task = dev->transport->alloc_task(cmd->t_task_cdb);
if (!task) {
- printk(KERN_ERR "Unable to allocate struct se_task\n");
+ pr_err("Unable to allocate struct se_task\n");
return NULL;
}
@@ -1807,26 +1572,15 @@ transport_generic_get_task(struct se_cmd *cmd,
INIT_LIST_HEAD(&task->t_execute_list);
INIT_LIST_HEAD(&task->t_state_list);
init_completion(&task->task_stop_comp);
- task->task_no = T_TASK(cmd)->t_tasks_no++;
task->task_se_cmd = cmd;
task->se_dev = dev;
task->task_data_direction = data_direction;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
-
return task;
}
static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
-void transport_device_setup_cmd(struct se_cmd *cmd)
-{
- cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
-}
-EXPORT_SYMBOL(transport_device_setup_cmd);
-
/*
* Used by fabric modules containing a local struct se_cmd within their
* fabric dependent per I/O descriptor.
@@ -1840,20 +1594,17 @@ void transport_init_se_cmd(
int task_attr,
unsigned char *sense_buffer)
{
- INIT_LIST_HEAD(&cmd->se_lun_list);
- INIT_LIST_HEAD(&cmd->se_delayed_list);
- INIT_LIST_HEAD(&cmd->se_ordered_list);
- /*
- * Setup t_task pointer to t_task_backstore
- */
- cmd->t_task = &cmd->t_task_backstore;
+ INIT_LIST_HEAD(&cmd->se_lun_node);
+ INIT_LIST_HEAD(&cmd->se_delayed_node);
+ INIT_LIST_HEAD(&cmd->se_ordered_node);
+ INIT_LIST_HEAD(&cmd->se_qf_node);
- INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
- init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
- init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
- init_completion(&T_TASK(cmd)->t_transport_stop_comp);
- spin_lock_init(&T_TASK(cmd)->t_state_lock);
- atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
+ INIT_LIST_HEAD(&cmd->t_task_list);
+ init_completion(&cmd->transport_lun_fe_stop_comp);
+ init_completion(&cmd->transport_lun_stop_comp);
+ init_completion(&cmd->t_transport_stop_comp);
+ spin_lock_init(&cmd->t_state_lock);
+ atomic_set(&cmd->transport_dev_active, 1);
cmd->se_tfo = tfo;
cmd->se_sess = se_sess;
@@ -1870,23 +1621,23 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 0;
if (cmd->sam_task_attr == MSG_ACA_TAG) {
- DEBUG_STA("SAM Task Attribute ACA"
+ pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
- return -1;
+ return -EINVAL;
}
/*
* Used to determine when ORDERED commands should go from
* Dormant to Active status.
*/
- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
+ cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
smp_mb__after_atomic_inc();
- DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+ pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
cmd->se_ordered_id, cmd->sam_task_attr,
- TRANSPORT(cmd->se_dev)->name);
+ cmd->se_dev->transport->name);
return 0;
}
@@ -1898,8 +1649,8 @@ void transport_free_se_cmd(
/*
* Check and free any extended CDB buffer that was allocated
*/
- if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
- kfree(T_TASK(se_cmd)->t_task_cdb);
+ if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
+ kfree(se_cmd->t_task_cdb);
}
EXPORT_SYMBOL(transport_free_se_cmd);
@@ -1922,42 +1673,41 @@ int transport_generic_allocate_tasks(
*/
cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
- transport_device_setup_cmd(cmd);
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
*/
if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
- printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+ pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
- return -1;
+ return -EINVAL;
}
/*
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
* allocate the additional extended CDB buffer now.. Otherwise
* setup the pointer from __t_task_cdb to t_task_cdb.
*/
- if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
- T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
+ if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
+ cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
GFP_KERNEL);
- if (!(T_TASK(cmd)->t_task_cdb)) {
- printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
- " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
+ if (!cmd->t_task_cdb) {
+ pr_err("Unable to allocate cmd->t_task_cdb"
+ " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
- (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
- return -1;
+ (unsigned long)sizeof(cmd->__t_task_cdb));
+ return -ENOMEM;
}
} else
- T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
/*
- * Copy the original CDB into T_TASK(cmd).
+ * Copy the original CDB into cmd->
*/
- memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
+ memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
/*
* Setup the received CDB based on SCSI defined opcodes and
* perform unit attention, persistent reservations and ALUA
- * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb
+ * checks for virtual device backends. The cmd->t_task_cdb
* pointer is expected to be setup before we reach this point.
*/
ret = transport_generic_cmd_sequencer(cmd, cdb);
@@ -1969,7 +1719,7 @@ int transport_generic_allocate_tasks(
if (transport_check_alloc_task_attr(cmd) < 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -2;
+ return -EINVAL;
}
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep)
@@ -1986,10 +1736,10 @@ EXPORT_SYMBOL(transport_generic_allocate_tasks);
int transport_generic_handle_cdb(
struct se_cmd *cmd)
{
- if (!SE_LUN(cmd)) {
+ if (!cmd->se_lun) {
dump_stack();
- printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
- return -1;
+ pr_err("cmd->se_lun is NULL\n");
+ return -EINVAL;
}
transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
@@ -1997,6 +1747,56 @@ int transport_generic_handle_cdb(
}
EXPORT_SYMBOL(transport_generic_handle_cdb);
+static void transport_generic_request_failure(struct se_cmd *,
+ struct se_device *, int, int);
+/*
+ * Used by fabric module frontends to queue tasks directly.
+ * Many only be used from process context only
+ */
+int transport_handle_cdb_direct(
+ struct se_cmd *cmd)
+{
+ int ret;
+
+ if (!cmd->se_lun) {
+ dump_stack();
+ pr_err("cmd->se_lun is NULL\n");
+ return -EINVAL;
+ }
+ if (in_interrupt()) {
+ dump_stack();
+ pr_err("transport_generic_handle_cdb cannot be called"
+ " from interrupt context\n");
+ return -EINVAL;
+ }
+ /*
+ * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
+ * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
+ * in existing usage to ensure that outstanding descriptors are handled
+ * correctly during shutdown via transport_generic_wait_for_tasks()
+ *
+ * Also, we don't take cmd->t_state_lock here as we only expect
+ * this to be called for initial descriptor submission.
+ */
+ cmd->t_state = TRANSPORT_NEW_CMD;
+ atomic_set(&cmd->t_transport_active, 1);
+ /*
+ * transport_generic_new_cmd() is already handling QUEUE_FULL,
+ * so follow TRANSPORT_NEW_CMD processing thread context usage
+ * and call transport_generic_request_failure() if necessary..
+ */
+ ret = transport_generic_new_cmd(cmd);
+ if (ret == -EAGAIN)
+ return 0;
+ else if (ret < 0) {
+ cmd->transport_error_status = ret;
+ transport_generic_request_failure(cmd, NULL, 0,
+ (cmd->data_direction != DMA_TO_DEVICE));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(transport_handle_cdb_direct);
+
/*
* Used by fabric module frontends defining a TFO->new_cmd_map() caller
* to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@@ -2005,10 +1805,10 @@ EXPORT_SYMBOL(transport_generic_handle_cdb);
int transport_generic_handle_cdb_map(
struct se_cmd *cmd)
{
- if (!SE_LUN(cmd)) {
+ if (!cmd->se_lun) {
dump_stack();
- printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
- return -1;
+ pr_err("cmd->se_lun is NULL\n");
+ return -EINVAL;
}
transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
@@ -2030,7 +1830,7 @@ int transport_generic_handle_data(
* in interrupt code, the signal_pending() check is skipped.
*/
if (!in_interrupt() && signal_pending(current))
- return -1;
+ return -EPERM;
/*
* If the received CDB has aleady been ABORTED by the generic
* target engine, we now call transport_check_aborted_status()
@@ -2057,7 +1857,6 @@ int transport_generic_handle_tmr(
* This is needed for early exceptions.
*/
cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
- transport_device_setup_cmd(cmd);
transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
return 0;
@@ -2077,16 +1876,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
unsigned long flags;
int ret = 0;
- DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("ITT[0x%08x] - Stopping tasks\n",
+ cmd->se_tfo->get_task_tag(cmd));
/*
* No tasks remain in the execution queue
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list) {
- DEBUG_TS("task_no[%d] - Processing task %p\n",
+ &cmd->t_task_list, t_list) {
+ pr_debug("task_no[%d] - Processing task %p\n",
task->task_no, task);
/*
* If the struct se_task has not been sent and is not active,
@@ -2094,14 +1893,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
*/
if (!atomic_read(&task->task_sent) &&
!atomic_read(&task->task_active)) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
transport_remove_task_from_execute_queue(task,
task->se_dev);
- DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+ pr_debug("task_no[%d] - Removed from execute queue\n",
task->task_no);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
continue;
}
@@ -2111,42 +1910,32 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
*/
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
- DEBUG_TS("task_no[%d] - Waiting to complete\n",
+ pr_debug("task_no[%d] - Waiting to complete\n",
task->task_no);
wait_for_completion(&task->task_stop_comp);
- DEBUG_TS("task_no[%d] - Stopped successfully\n",
+ pr_debug("task_no[%d] - Stopped successfully\n",
task->task_no);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
} else {
- DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+ pr_debug("task_no[%d] - Did nothing\n", task->task_no);
ret++;
}
__transport_stop_task_timer(task, &flags);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return ret;
}
-static void transport_failure_reset_queue_depth(struct se_device *dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
- atomic_inc(&dev->depth_left);
- atomic_inc(&SE_HBA(dev)->left_queue_depth);
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
-}
-
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
@@ -2156,29 +1945,31 @@ static void transport_generic_request_failure(
int complete,
int sc)
{
- DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
- " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
- T_TASK(cmd)->t_task_cdb[0]);
- DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+ int ret = 0;
+
+ pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+ " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->t_task_cdb[0]);
+ pr_debug("-----[ i_state: %d t_state/def_t_state:"
" %d/%d transport_error_status: %d\n",
- CMD_TFO(cmd)->get_cmd_state(cmd),
+ cmd->se_tfo->get_cmd_state(cmd),
cmd->t_state, cmd->deferred_t_state,
cmd->transport_error_status);
- DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+ pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
- " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
- atomic_read(&T_TASK(cmd)->t_transport_active),
- atomic_read(&T_TASK(cmd)->t_transport_stop),
- atomic_read(&T_TASK(cmd)->t_transport_sent));
+ " t_transport_sent: %d\n", cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+ atomic_read(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_task_cdbs_ex_left),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
transport_stop_all_task_timers(cmd);
if (dev)
- transport_failure_reset_queue_depth(dev);
+ atomic_inc(&dev->depth_left);
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
@@ -2211,8 +2002,8 @@ static void transport_generic_request_failure(
* we force this session to fall back to session
* recovery.
*/
- CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
- CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
+ cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
+ cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
goto check_stop;
case PYX_TRANSPORT_LU_COMM_FAILURE:
@@ -2240,13 +2031,15 @@ static void transport_generic_request_failure(
*
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
- if (SE_SESS(cmd) &&
- DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ if (cmd->se_sess &&
+ cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- CMD_TFO(cmd)->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
goto check_stop;
case PYX_TRANSPORT_USE_SENSE_REASON:
/*
@@ -2254,8 +2047,8 @@ static void transport_generic_request_failure(
*/
break;
default:
- printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
- T_TASK(cmd)->t_task_cdb[0],
+ pr_err("Unknown transport error for CDB 0x%02x: %d\n",
+ cmd->t_task_cdb[0],
cmd->transport_error_status);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
@@ -2263,32 +2056,41 @@ static void transport_generic_request_failure(
if (!sc)
transport_new_cmd_failure(cmd);
- else
- transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
+ else {
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN)
+ goto queue_full;
+ }
+
check_stop:
transport_lun_remove_cmd(cmd);
- if (!(transport_cmd_check_stop_to_fabric(cmd)))
+ if (!transport_cmd_check_stop_to_fabric(cmd))
;
+ return;
+
+queue_full:
+ cmd->t_state = TRANSPORT_COMPLETE_OK;
+ transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
}
static void transport_direct_request_timeout(struct se_cmd *cmd)
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->t_transport_timeout)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
- &T_TASK(cmd)->t_se_count);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_sub(atomic_read(&cmd->t_transport_timeout),
+ &cmd->t_se_count);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static void transport_generic_request_timeout(struct se_cmd *cmd)
@@ -2296,35 +2098,18 @@ static void transport_generic_request_timeout(struct se_cmd *cmd)
unsigned long flags;
/*
- * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
+ * Reset cmd->t_se_count to allow transport_generic_remove()
* to allow last call to free memory resources.
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
- int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (atomic_read(&cmd->t_transport_timeout) > 1) {
+ int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
- atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
+ atomic_sub(tmp, &cmd->t_se_count);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_generic_remove(cmd, 0, 0);
-}
-
-static int
-transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
-{
- unsigned char *buf;
-
- buf = kzalloc(data_length, GFP_KERNEL);
- if (!(buf)) {
- printk(KERN_ERR "Unable to allocate memory for buffer\n");
- return -1;
- }
-
- T_TASK(cmd)->t_tasks_se_num = 0;
- T_TASK(cmd)->t_task_buf = buf;
-
- return 0;
+ transport_generic_remove(cmd, 0);
}
static inline u32 transport_lba_21(unsigned char *cdb)
@@ -2364,9 +2149,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
- spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
/*
@@ -2375,14 +2160,14 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
static void transport_task_timeout_handler(unsigned long data)
{
struct se_task *task = (struct se_task *)data;
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
unsigned long flags;
- DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+ pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
if (task->task_flags & TF_STOP) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
task->task_flags &= ~TF_RUNNING;
@@ -2390,46 +2175,46 @@ static void transport_task_timeout_handler(unsigned long data)
/*
* Determine if transport_complete_task() has already been called.
*/
- if (!(atomic_read(&task->task_active))) {
- DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+ if (!atomic_read(&task->task_active)) {
+ pr_debug("transport task: %p cmd: %p timeout task_active"
" == 0\n", task, cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- atomic_inc(&T_TASK(cmd)->t_se_count);
- atomic_inc(&T_TASK(cmd)->t_transport_timeout);
- T_TASK(cmd)->t_tasks_failed = 1;
+ atomic_inc(&cmd->t_se_count);
+ atomic_inc(&cmd->t_transport_timeout);
+ cmd->t_tasks_failed = 1;
atomic_set(&task->task_timeout, 1);
task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
task->task_scsi_status = 1;
if (atomic_read(&task->task_stop)) {
- DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+ pr_debug("transport task: %p cmd: %p timeout task_stop"
" == 1\n", task, cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&task->task_stop_comp);
return;
}
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
- DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
+ pr_debug("transport task: %p cmd: %p timeout non zero"
" t_task_cdbs_left\n", task, cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+ pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
task, cmd);
cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
}
/*
- * Called with T_TASK(cmd)->t_state_lock held.
+ * Called with cmd->t_state_lock held.
*/
static void transport_start_task_timer(struct se_task *task)
{
@@ -2441,8 +2226,8 @@ static void transport_start_task_timer(struct se_task *task)
/*
* If the task_timeout is disabled, exit now.
*/
- timeout = DEV_ATTRIB(dev)->task_timeout;
- if (!(timeout))
+ timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
+ if (!timeout)
return;
init_timer(&task->task_timer);
@@ -2453,27 +2238,27 @@ static void transport_start_task_timer(struct se_task *task)
task->task_flags |= TF_RUNNING;
add_timer(&task->task_timer);
#if 0
- printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+ pr_debug("Starting task timer for cmd: %p task: %p seconds:"
" %d\n", task->task_se_cmd, task, timeout);
#endif
}
/*
- * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
+ * Called with spin_lock_irq(&cmd->t_state_lock) held.
*/
void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
- if (!(task->task_flags & TF_RUNNING))
+ if (!task->task_flags & TF_RUNNING)
return;
task->task_flags |= TF_STOP;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
del_timer_sync(&task->task_timer);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
task->task_flags &= ~TF_RUNNING;
task->task_flags &= ~TF_STOP;
}
@@ -2483,11 +2268,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd)
struct se_task *task = NULL, *task_tmp;
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list)
+ &cmd->t_task_list, t_list)
__transport_stop_task_timer(task, &flags);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static inline int transport_tcq_window_closed(struct se_device *dev)
@@ -2498,7 +2283,7 @@ static inline int transport_tcq_window_closed(struct se_device *dev)
} else
msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
- wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
return 0;
}
@@ -2511,45 +2296,45 @@ static inline int transport_tcq_window_closed(struct se_device *dev)
*/
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{
- if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 1;
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
+ atomic_inc(&cmd->se_dev->dev_hoq_count);
smp_mb__after_atomic_inc();
- DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+ pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
- T_TASK(cmd)->t_task_cdb[0],
+ cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_list,
- &SE_DEV(cmd)->ordered_cmd_list);
- spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
+ spin_lock(&cmd->se_dev->ordered_cmd_lock);
+ list_add_tail(&cmd->se_ordered_node,
+ &cmd->se_dev->ordered_cmd_list);
+ spin_unlock(&cmd->se_dev->ordered_cmd_lock);
- atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
+ atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
- DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+ pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
" list, se_ordered_id: %u\n",
- T_TASK(cmd)->t_task_cdb[0],
+ cmd->t_task_cdb[0],
cmd->se_ordered_id);
/*
* Add ORDERED command to tail of execution queue if
* no other older commands exist that need to be
* completed first.
*/
- if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
+ if (!atomic_read(&cmd->se_dev->simple_cmds))
return 1;
} else {
/*
* For SIMPLE and UNTAGGED Task Attribute commands
*/
- atomic_inc(&SE_DEV(cmd)->simple_cmds);
+ atomic_inc(&cmd->se_dev->simple_cmds);
smp_mb__after_atomic_inc();
}
/*
@@ -2557,20 +2342,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* add the dormant task(s) built for the passed struct se_cmd to the
* execution queue and become in Active state for this struct se_device.
*/
- if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
+ if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
/*
* Otherwise, add cmd w/ tasks to delayed cmd queue that
* will be drained upon completion of HEAD_OF_QUEUE task.
*/
- spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
+ spin_lock(&cmd->se_dev->delayed_cmd_lock);
cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
- list_add_tail(&cmd->se_delayed_list,
- &SE_DEV(cmd)->delayed_cmd_list);
- spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
+ list_add_tail(&cmd->se_delayed_node,
+ &cmd->se_dev->delayed_cmd_list);
+ spin_unlock(&cmd->se_dev->delayed_cmd_lock);
- DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
" delayed CMD list, se_ordered_id: %u\n",
- T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->t_task_cdb[0], cmd->sam_task_attr,
cmd->se_ordered_id);
/*
* Return zero to let transport_execute_tasks() know
@@ -2592,25 +2377,23 @@ static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
- if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status =
- PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, NULL, 0, 1);
- return 0;
- }
+ if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
+ cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
+ transport_generic_request_failure(cmd, NULL, 0, 1);
+ return 0;
}
+
/*
* Call transport_cmd_check_stop() to see if a fabric exception
* has occurred that prevents execution.
*/
- if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+ if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
/*
* Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
* attribute for the tasks of the received struct se_cmd CDB
*/
add_tasks = transport_execute_task_attr(cmd);
- if (add_tasks == 0)
+ if (!add_tasks)
goto execute_tasks;
/*
* This calls transport_add_tasks_from_cmd() to handle
@@ -2625,7 +2408,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
* storage object.
*/
execute_tasks:
- __transport_execute_tasks(SE_DEV(cmd));
+ __transport_execute_tasks(cmd->se_dev);
return 0;
}
@@ -2639,51 +2422,49 @@ static int __transport_execute_tasks(struct se_device *dev)
{
int error;
struct se_cmd *cmd = NULL;
- struct se_task *task;
+ struct se_task *task = NULL;
unsigned long flags;
/*
* Check if there is enough room in the device and HBA queue to send
- * struct se_transport_task's to the selected transport.
+ * struct se_tasks to the selected transport.
*/
check_depth:
- spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
- if (!(atomic_read(&dev->depth_left)) ||
- !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ if (!atomic_read(&dev->depth_left))
return transport_tcq_window_closed(dev);
- }
- dev->dev_tcq_window_closed = 0;
- spin_lock(&dev->execute_task_lock);
- task = transport_get_task_from_execute_queue(dev);
- spin_unlock(&dev->execute_task_lock);
+ dev->dev_tcq_window_closed = 0;
- if (!task) {
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ spin_lock_irq(&dev->execute_task_lock);
+ if (list_empty(&dev->execute_task_list)) {
+ spin_unlock_irq(&dev->execute_task_lock);
return 0;
}
+ task = list_first_entry(&dev->execute_task_list,
+ struct se_task, t_execute_list);
+ list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
+ atomic_dec(&dev->execute_tasks);
+ spin_unlock_irq(&dev->execute_task_lock);
atomic_dec(&dev->depth_left);
- atomic_dec(&SE_HBA(dev)->left_queue_depth);
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
- cmd = TASK_CMD(task);
+ cmd = task->task_se_cmd;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_set(&task->task_active, 1);
atomic_set(&task->task_sent, 1);
- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
+ atomic_inc(&cmd->t_task_cdbs_sent);
- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
- T_TASK(cmd)->t_task_cdbs)
+ if (atomic_read(&cmd->t_task_cdbs_sent) ==
+ cmd->t_task_list_num)
atomic_set(&cmd->transport_sent, 1);
transport_start_task_timer(task);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/*
* The struct se_cmd->transport_emulate_cdb() function pointer is used
- * to grab REPORT_LUNS CDBs before they hit the
+ * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
* struct se_subsystem_api->do_task() caller below.
*/
if (cmd->transport_emulate_cdb) {
@@ -2718,11 +2499,11 @@ check_depth:
* call ->do_task() directly and let the underlying TCM subsystem plugin
* code handle the CDB emulation.
*/
- if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
- (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+ if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
error = transport_emulate_control_cdb(task);
else
- error = TRANSPORT(dev)->do_task(task);
+ error = dev->transport->do_task(task);
if (error != 0) {
cmd->transport_error_status = error;
@@ -2745,12 +2526,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
* Any unsolicited data will get dumped for failed command inside of
* the fabric plugin
*/
- spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
-
- CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
@@ -2760,7 +2539,7 @@ static inline u32 transport_get_sectors_6(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2772,7 +2551,7 @@ static inline u32 transport_get_sectors_6(
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
/*
@@ -2788,7 +2567,7 @@ static inline u32 transport_get_sectors_10(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2800,8 +2579,8 @@ static inline u32 transport_get_sectors_10(
/*
* XXX_10 is not defined in SSC, throw an exception
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
- *ret = -1;
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -EINVAL;
return 0;
}
@@ -2818,7 +2597,7 @@ static inline u32 transport_get_sectors_12(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2830,8 +2609,8 @@ static inline u32 transport_get_sectors_12(
/*
* XXX_12 is not defined in SSC, throw an exception
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
- *ret = -1;
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -EINVAL;
return 0;
}
@@ -2848,7 +2627,7 @@ static inline u32 transport_get_sectors_16(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2860,7 +2639,7 @@ static inline u32 transport_get_sectors_16(
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
type_disk:
@@ -2890,57 +2669,30 @@ static inline u32 transport_get_size(
unsigned char *cdb,
struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
if (cdb[1] & 1) { /* sectors */
- return DEV_ATTRIB(dev)->block_size * sectors;
+ return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
} else /* bytes */
return sectors;
}
#if 0
- printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
- " %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
- DEV_ATTRIB(dev)->block_size * sectors,
- TRANSPORT(dev)->name);
+ pr_debug("Returning block_size: %u, sectors: %u == %u for"
+ " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
+ dev->se_sub_dev->se_dev_attrib.block_size * sectors,
+ dev->transport->name);
#endif
- return DEV_ATTRIB(dev)->block_size * sectors;
+ return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
}
-unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
-{
- unsigned char result = 0;
- /*
- * MSB
- */
- if ((val[0] >= 'a') && (val[0] <= 'f'))
- result = ((val[0] - 'a' + 10) & 0xf) << 4;
- else
- if ((val[0] >= 'A') && (val[0] <= 'F'))
- result = ((val[0] - 'A' + 10) & 0xf) << 4;
- else /* digit */
- result = ((val[0] - '0') & 0xf) << 4;
- /*
- * LSB
- */
- if ((val[1] >= 'a') && (val[1] <= 'f'))
- result |= ((val[1] - 'a' + 10) & 0xf);
- else
- if ((val[1] >= 'A') && (val[1] <= 'F'))
- result |= ((val[1] - 'A' + 10) & 0xf);
- else /* digit */
- result |= ((val[1] - '0') & 0xf);
-
- return result;
-}
-EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
-
static void transport_xor_callback(struct se_cmd *cmd)
{
unsigned char *buf, *addr;
- struct se_mem *se_mem;
+ struct scatterlist *sg;
unsigned int offset;
int i;
+ int count;
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
@@ -2953,32 +2705,37 @@ static void transport_xor_callback(struct se_cmd *cmd)
* 5) transfer the resulting XOR data to the data-in buffer.
*/
buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!(buf)) {
- printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+ if (!buf) {
+ pr_err("Unable to allocate xor_callback buf\n");
return;
}
/*
- * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
+ * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
* into the locally allocated *buf
*/
- transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
+ sg_copy_to_buffer(cmd->t_data_sg,
+ cmd->t_data_nents,
+ buf,
+ cmd->data_length);
+
/*
* Now perform the XOR against the BIDI read memory located at
- * T_TASK(cmd)->t_mem_bidi_list
+ * cmd->t_mem_bidi_list
*/
offset = 0;
- list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
- addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
- if (!(addr))
+ for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
+ addr = kmap_atomic(sg_page(sg), KM_USER0);
+ if (!addr)
goto out;
- for (i = 0; i < se_mem->se_len; i++)
- *(addr + se_mem->se_off + i) ^= *(buf + offset + i);
+ for (i = 0; i < sg->length; i++)
+ *(addr + sg->offset + i) ^= *(buf + offset + i);
- offset += se_mem->se_len;
+ offset += sg->length;
kunmap_atomic(addr, KM_USER0);
}
+
out:
kfree(buf);
}
@@ -2994,75 +2751,60 @@ static int transport_get_sense_data(struct se_cmd *cmd)
unsigned long flags;
u32 offset = 0;
- if (!SE_LUN(cmd)) {
- printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
- return -1;
- }
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ WARN_ON(!cmd->se_lun);
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list) {
+ &cmd->t_task_list, t_list) {
if (!task->task_sense)
continue;
dev = task->se_dev;
- if (!(dev))
+ if (!dev)
continue;
- if (!TRANSPORT(dev)->get_sense_buffer) {
- printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
+ if (!dev->transport->get_sense_buffer) {
+ pr_err("dev->transport->get_sense_buffer"
" is NULL\n");
continue;
}
- sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
- if (!(sense_buffer)) {
- printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+ sense_buffer = dev->transport->get_sense_buffer(task);
+ if (!sense_buffer) {
+ pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
" sense buffer for task with sense\n",
- CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
+ cmd->se_tfo->get_task_tag(cmd), task->task_no);
continue;
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ offset = cmd->se_tfo->set_fabric_sense_len(cmd,
TRANSPORT_SENSE_BUFFER);
- memcpy((void *)&buffer[offset], (void *)sense_buffer,
+ memcpy(&buffer[offset], sense_buffer,
TRANSPORT_SENSE_BUFFER);
cmd->scsi_status = task->task_scsi_status;
/* Automatically padded */
cmd->scsi_sense_length =
(TRANSPORT_SENSE_BUFFER + offset);
- printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+ pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
" and sense\n",
- dev->se_hba->hba_id, TRANSPORT(dev)->name,
+ dev->se_hba->hba_id, dev->transport->name,
cmd->scsi_status);
return 0;
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return -1;
}
-static int transport_allocate_resources(struct se_cmd *cmd)
-{
- u32 length = cmd->data_length;
-
- if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
- (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
- return transport_generic_get_mem(cmd, length, PAGE_SIZE);
- else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
- return transport_generic_allocate_buf(cmd, length);
- else
- return 0;
-}
-
static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
@@ -3077,12 +2819,40 @@ transport_handle_reservation_conflict(struct se_cmd *cmd)
*
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
- if (SE_SESS(cmd) &&
- DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ if (cmd->se_sess &&
+ cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -2;
+ return -EINVAL;
+}
+
+static inline long long transport_dev_end_lba(struct se_device *dev)
+{
+ return dev->transport->get_blocks(dev) + 1;
+}
+
+static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ u32 sectors;
+
+ if (dev->transport->get_device_type(dev) != TYPE_DISK)
+ return 0;
+
+ sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
+
+ if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
+ pr_err("LBA: %llu Sectors: %u exceeds"
+ " transport_dev_end_lba(): %llu\n",
+ cmd->t_task_lba, sectors,
+ transport_dev_end_lba(dev));
+ pr_err(" We should return CHECK_CONDITION"
+ " but we don't yet\n");
+ return 0;
+ }
+
+ return sectors;
}
/* transport_generic_cmd_sequencer():
@@ -3099,7 +2869,7 @@ static int transport_generic_cmd_sequencer(
struct se_cmd *cmd,
unsigned char *cdb)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
int ret = 0, sector_ret = 0, passthrough;
u32 sectors = 0, size = 0, pr_reg_type = 0;
@@ -3113,12 +2883,12 @@ static int transport_generic_cmd_sequencer(
&transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
- return -2;
+ return -EINVAL;
}
/*
* Check status of Asymmetric Logical Unit Assignment port
*/
- ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
+ ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) {
cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
/*
@@ -3128,22 +2898,22 @@ static int transport_generic_cmd_sequencer(
*/
if (ret > 0) {
#if 0
- printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+ pr_debug("[%s]: ALUA TG Port not available,"
" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
- CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
+ cmd->se_tfo->get_fabric_name(), alua_ascq);
#endif
transport_set_sense_codes(cmd, 0x04, alua_ascq);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
- return -2;
+ return -EINVAL;
}
goto out_invalid_cdb_field;
}
/*
* Check status for SPC-3 Persistent Reservations
*/
- if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
- if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
+ if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
+ if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
cmd, cdb, pr_reg_type) != 0)
return transport_handle_reservation_conflict(cmd);
/*
@@ -3160,7 +2930,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_6;
- T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_10:
@@ -3169,7 +2939,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_10;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_12:
@@ -3178,7 +2948,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_12;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_16:
@@ -3187,7 +2957,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_16;
- T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_6:
@@ -3196,7 +2966,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_6;
- T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_10:
@@ -3205,8 +2975,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_10;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@@ -3215,8 +2985,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_12;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@@ -3225,22 +2995,22 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_16;
- T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(T_TASK(cmd)->t_tasks_bidi))
+ !(cmd->t_tasks_bidi))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_10;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- passthrough = (TRANSPORT(dev)->transport_type ==
+ passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
/*
* Skip the remaining assignments for TCM/PSCSI passthrough
@@ -3251,7 +3021,7 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run during transport_generic_complete_ok()
*/
cmd->transport_complete_callback = &transport_xor_callback;
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@@ -3259,7 +3029,7 @@ static int transport_generic_cmd_sequencer(
* Determine if this is TCM/PSCSI device and we should disable
* internal emulation for this CDB.
*/
- passthrough = (TRANSPORT(dev)->transport_type ==
+ passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
switch (service_action) {
@@ -3273,7 +3043,7 @@ static int transport_generic_cmd_sequencer(
* XDWRITE_READ_32 logic.
*/
cmd->transport_split_cdb = &split_cdb_XX_32;
- T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
+ cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
/*
@@ -3287,14 +3057,22 @@ static int transport_generic_cmd_sequencer(
* transport_generic_complete_ok()
*/
cmd->transport_complete_callback = &transport_xor_callback;
- T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
+ cmd->t_tasks_fua = (cdb[10] & 0x8);
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
+
+ if (sectors)
+ size = transport_get_size(sectors, cdb, cmd);
+ else {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+ " supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
/*
@@ -3304,7 +3082,7 @@ static int transport_generic_cmd_sequencer(
break;
if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
- printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
goto out_invalid_cdb_field;
@@ -3314,28 +3092,28 @@ static int transport_generic_cmd_sequencer(
* tpws with the UNMAP=1 bit set.
*/
if (!(cdb[10] & 0x08)) {
- printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+ pr_err("WRITE_SAME w/o UNMAP bit not"
" supported for Block Discard Emulation\n");
goto out_invalid_cdb_field;
}
break;
default:
- printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+ pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
goto out_unsupported_cdb;
}
break;
- case 0xa3:
- if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ case MAINTENANCE_IN:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_IN from SCC-2 */
/*
* Check for emulated MI_REPORT_TARGET_PGS.
*/
if (cdb[1] == MI_REPORT_TARGET_PGS) {
cmd->transport_emulate_cdb =
- (T10_ALUA(su_dev)->alua_type ==
+ (su_dev->t10_alua.alua_type ==
SPC3_ALUA_EMULATED) ?
- &core_emulate_report_target_port_groups :
+ core_emulate_report_target_port_groups :
NULL;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
@@ -3344,7 +3122,7 @@ static int transport_generic_cmd_sequencer(
/* GPCMD_SEND_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SELECT:
size = cdb[4];
@@ -3356,7 +3134,7 @@ static int transport_generic_cmd_sequencer(
break;
case MODE_SENSE:
size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SENSE_10:
case GPCMD_READ_BUFFER_CAPACITY:
@@ -3364,11 +3142,11 @@ static int transport_generic_cmd_sequencer(
case LOG_SELECT:
case LOG_SENSE:
size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_BLOCK_LIMITS:
size = READ_BLOCK_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_GET_CONFIGURATION:
case GPCMD_READ_FORMAT_CAPACITIES:
@@ -3380,11 +3158,11 @@ static int transport_generic_cmd_sequencer(
case PERSISTENT_RESERVE_IN:
case PERSISTENT_RESERVE_OUT:
cmd->transport_emulate_cdb =
- (T10_RES(su_dev)->res_type ==
+ (su_dev->t10_pr.res_type ==
SPC3_PERSISTENT_RESERVATIONS) ?
- &core_scsi3_emulate_pr : NULL;
+ core_scsi3_emulate_pr : NULL;
size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_MECHANISM_STATUS:
case GPCMD_READ_DVD_STRUCTURE:
@@ -3393,19 +3171,19 @@ static int transport_generic_cmd_sequencer(
break;
case READ_POSITION:
size = READ_POSITION_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
- case 0xa4:
- if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ case MAINTENANCE_OUT:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_OUT from SCC-2
*
* Check for emulated MO_SET_TARGET_PGS.
*/
if (cdb[1] == MO_SET_TARGET_PGS) {
cmd->transport_emulate_cdb =
- (T10_ALUA(su_dev)->alua_type ==
+ (su_dev->t10_alua.alua_type ==
SPC3_ALUA_EMULATED) ?
- &core_emulate_set_target_port_groups :
+ core_emulate_set_target_port_groups :
NULL;
}
@@ -3415,7 +3193,7 @@ static int transport_generic_cmd_sequencer(
/* GPCMD_REPORT_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case INQUIRY:
size = (cdb[3] << 8) + cdb[4];
@@ -3423,23 +3201,23 @@ static int transport_generic_cmd_sequencer(
* Do implict HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
- if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_CAPACITY:
size = READ_CAP_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_MEDIA_SERIAL_NUMBER:
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case SERVICE_ACTION_IN:
case ACCESS_CONTROL_IN:
@@ -3450,36 +3228,36 @@ static int transport_generic_cmd_sequencer(
case WRITE_ATTRIBUTE:
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
size = (cdb[3] << 8) | cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
case GPCMD_READ_CD:
sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
size = (2336 * sectors);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
#endif
case READ_TOC:
size = cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case REQUEST_SENSE:
size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_ELEMENT_STATUS:
size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case WRITE_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RESERVE:
case RESERVE_10:
@@ -3500,9 +3278,9 @@ static int transport_generic_cmd_sequencer(
* emulation disabled.
*/
cmd->transport_emulate_cdb =
- (T10_RES(su_dev)->res_type !=
+ (su_dev->t10_pr.res_type !=
SPC_PASSTHROUGH) ?
- &core_scsi2_emulate_crh : NULL;
+ core_scsi2_emulate_crh : NULL;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case RELEASE:
@@ -3517,9 +3295,9 @@ static int transport_generic_cmd_sequencer(
size = cmd->data_length;
cmd->transport_emulate_cdb =
- (T10_RES(su_dev)->res_type !=
+ (su_dev->t10_pr.res_type !=
SPC_PASSTHROUGH) ?
- &core_scsi2_emulate_crh : NULL;
+ core_scsi2_emulate_crh : NULL;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case SYNCHRONIZE_CACHE:
@@ -3529,10 +3307,10 @@ static int transport_generic_cmd_sequencer(
*/
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
}
if (sector_ret)
goto out_unsupported_cdb;
@@ -3543,7 +3321,7 @@ static int transport_generic_cmd_sequencer(
/*
* For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
*/
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
break;
/*
* Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
@@ -3554,32 +3332,27 @@ static int transport_generic_cmd_sequencer(
* Check to ensure that LBA + Range does not exceed past end of
* device.
*/
- if (transport_get_sectors(cmd) < 0)
+ if (!transport_cmd_get_valid_sectors(cmd))
goto out_invalid_cdb_field;
break;
case UNMAP:
size = get_unaligned_be16(&cdb[7]);
- passthrough = (TRANSPORT(dev)->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
- /*
- * Determine if the received UNMAP used to for direct passthrough
- * into Linux/SCSI with struct request via TCM/pSCSI or we are
- * signaling the use of internal transport_generic_unmap() emulation
- * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
- * subsystem plugin backstores.
- */
- if (!(passthrough))
- cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
-
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
- passthrough = (TRANSPORT(dev)->transport_type ==
+
+ if (sectors)
+ size = transport_get_size(sectors, cdb, cmd);
+ else {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+ passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
/*
* Determine if the received WRITE_SAME_16 is used to for direct
@@ -3588,9 +3361,9 @@ static int transport_generic_cmd_sequencer(
* emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
* TCM/FILEIO subsystem plugin backstores.
*/
- if (!(passthrough)) {
+ if (!passthrough) {
if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
- printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
goto out_invalid_cdb_field;
@@ -3600,7 +3373,7 @@ static int transport_generic_cmd_sequencer(
* tpws with the UNMAP=1 bit set.
*/
if (!(cdb[1] & 0x08)) {
- printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+ pr_err("WRITE_SAME w/o UNMAP bit not "
" supported for Block Discard Emulation\n");
goto out_invalid_cdb_field;
}
@@ -3625,34 +3398,34 @@ static int transport_generic_cmd_sequencer(
break;
case REPORT_LUNS:
cmd->transport_emulate_cdb =
- &transport_core_report_lun_response;
+ transport_core_report_lun_response;
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
- if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
default:
- printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+ pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
- CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
+ cmd->se_tfo->get_fabric_name(), cdb[0]);
cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
goto out_unsupported_cdb;
}
if (size != cmd->data_length) {
- printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+ pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
- " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
+ " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cdb[0]);
cmd->cmd_spdtl = size;
if (cmd->data_direction == DMA_TO_DEVICE) {
- printk(KERN_ERR "Rejecting underflow/overflow"
+ pr_err("Rejecting underflow/overflow"
" WRITE data\n");
goto out_invalid_cdb_field;
}
@@ -3660,10 +3433,10 @@ static int transport_generic_cmd_sequencer(
* Reject READ_* or WRITE_* with overflow/underflow for
* type SCF_SCSI_DATA_SG_IO_CDB.
*/
- if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) {
- printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+ if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
+ pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
" CDB on non 512-byte sector setup subsystem"
- " plugin: %s\n", TRANSPORT(dev)->name);
+ " plugin: %s\n", dev->transport->name);
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
goto out_invalid_cdb_field;
}
@@ -3678,105 +3451,22 @@ static int transport_generic_cmd_sequencer(
cmd->data_length = size;
}
+ /* Let's limit control cdbs to a page, for simplicity's sake. */
+ if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+ size > PAGE_SIZE)
+ goto out_invalid_cdb_field;
+
transport_set_supported_SAM_opcode(cmd);
return ret;
out_unsupported_cdb:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -2;
+ return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -2;
-}
-
-static inline void transport_release_tasks(struct se_cmd *);
-
-/*
- * This function will copy a contiguous *src buffer into a destination
- * struct scatterlist array.
- */
-static void transport_memcpy_write_contig(
- struct se_cmd *cmd,
- struct scatterlist *sg_d,
- unsigned char *src)
-{
- u32 i = 0, length = 0, total_length = cmd->data_length;
- void *dst;
-
- while (total_length) {
- length = sg_d[i].length;
-
- if (length > total_length)
- length = total_length;
-
- dst = sg_virt(&sg_d[i]);
-
- memcpy(dst, src, length);
-
- if (!(total_length -= length))
- return;
-
- src += length;
- i++;
- }
-}
-
-/*
- * This function will copy a struct scatterlist array *sg_s into a destination
- * contiguous *dst buffer.
- */
-static void transport_memcpy_read_contig(
- struct se_cmd *cmd,
- unsigned char *dst,
- struct scatterlist *sg_s)
-{
- u32 i = 0, length = 0, total_length = cmd->data_length;
- void *src;
-
- while (total_length) {
- length = sg_s[i].length;
-
- if (length > total_length)
- length = total_length;
-
- src = sg_virt(&sg_s[i]);
-
- memcpy(dst, src, length);
-
- if (!(total_length -= length))
- return;
-
- dst += length;
- i++;
- }
-}
-
-static void transport_memcpy_se_mem_read_contig(
- struct se_cmd *cmd,
- unsigned char *dst,
- struct list_head *se_mem_list)
-{
- struct se_mem *se_mem;
- void *src;
- u32 length = 0, total_length = cmd->data_length;
-
- list_for_each_entry(se_mem, se_mem_list, se_list) {
- length = se_mem->se_len;
-
- if (length > total_length)
- length = total_length;
-
- src = page_address(se_mem->se_page) + se_mem->se_off;
-
- memcpy(dst, src, length);
-
- if (!(total_length -= length))
- return;
-
- dst += length;
- }
+ return -EINVAL;
}
/*
@@ -3786,7 +3476,7 @@ static void transport_memcpy_se_mem_read_contig(
*/
static void transport_complete_task_attr(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_cmd *cmd_p, *cmd_tmp;
int new_active_tasks = 0;
@@ -3794,25 +3484,25 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
- DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+ pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_dec(&dev->dev_hoq_count);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
- DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+ pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_list);
+ list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
- DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+ pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
}
/*
@@ -3822,15 +3512,15 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
*/
spin_lock(&dev->delayed_cmd_lock);
list_for_each_entry_safe(cmd_p, cmd_tmp,
- &dev->delayed_cmd_list, se_delayed_list) {
+ &dev->delayed_cmd_list, se_delayed_node) {
- list_del(&cmd_p->se_delayed_list);
+ list_del(&cmd_p->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
- DEBUG_STA("Calling add_tasks() for"
+ pr_debug("Calling add_tasks() for"
" cmd_p: 0x%02x Task Attr: 0x%02x"
" Dormant -> Active, se_ordered_id: %u\n",
- T_TASK(cmd_p)->t_task_cdb[0],
+ cmd_p->t_task_cdb[0],
cmd_p->sam_task_attr, cmd_p->se_ordered_id);
transport_add_tasks_from_cmd(cmd_p);
@@ -3846,20 +3536,79 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
* to do the processing of the Active tasks.
*/
if (new_active_tasks != 0)
- wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
+}
+
+static int transport_complete_qf(struct se_cmd *cmd)
+{
+ int ret = 0;
+
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+ return cmd->se_tfo->queue_status(cmd);
+
+ switch (cmd->data_direction) {
+ case DMA_FROM_DEVICE:
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ break;
+ case DMA_TO_DEVICE:
+ if (cmd->t_bidi_data_sg) {
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ if (ret < 0)
+ return ret;
+ }
+ /* Fall through for DMA_TO_DEVICE */
+ case DMA_NONE:
+ ret = cmd->se_tfo->queue_status(cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void transport_handle_queue_full(
+ struct se_cmd *cmd,
+ struct se_device *dev,
+ int (*qf_callback)(struct se_cmd *))
+{
+ spin_lock_irq(&dev->qf_cmd_lock);
+ cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
+ cmd->transport_qf_callback = qf_callback;
+ list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
+ atomic_inc(&dev->dev_qf_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
+
+ schedule_work(&cmd->se_dev->qf_work_queue);
}
static void transport_generic_complete_ok(struct se_cmd *cmd)
{
- int reason = 0;
+ int reason = 0, ret;
/*
* Check if we need to move delayed/dormant tasks from cmds on the
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
* Attribute.
*/
- if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
/*
+ * Check to schedule QUEUE_FULL work, or execute an existing
+ * cmd->transport_qf_callback()
+ */
+ if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
+ schedule_work(&cmd->se_dev->qf_work_queue);
+
+ if (cmd->transport_qf_callback) {
+ ret = cmd->transport_qf_callback(cmd);
+ if (ret < 0)
+ goto queue_full;
+
+ cmd->transport_qf_callback = NULL;
+ goto done;
+ }
+ /*
* Check if we need to retrieve a sense buffer from
* the struct se_cmd in question.
*/
@@ -3872,8 +3621,11 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
* a non GOOD status.
*/
if (cmd->scsi_status) {
- transport_send_check_condition_and_sense(
+ ret = transport_send_check_condition_and_sense(
cmd, reason, 1);
+ if (ret == -EAGAIN)
+ goto queue_full;
+
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
@@ -3889,53 +3641,57 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock);
- if (SE_LUN(cmd)->lun_sep) {
- SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ if (cmd->se_lun->lun_sep) {
+ cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
- /*
- * If enabled by TCM fabirc module pre-registered SGL
- * memory, perform the memcpy() from the TCM internal
- * contigious buffer back to the original SGL.
- */
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
- transport_memcpy_write_contig(cmd,
- T_TASK(cmd)->t_task_pt_sgl,
- T_TASK(cmd)->t_task_buf);
- CMD_TFO(cmd)->queue_data_in(cmd);
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
break;
case DMA_TO_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock);
- if (SE_LUN(cmd)->lun_sep) {
- SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
+ if (cmd->se_lun->lun_sep) {
+ cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
- if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
+ if (cmd->t_bidi_data_sg) {
spin_lock(&cmd->se_lun->lun_sep_lock);
- if (SE_LUN(cmd)->lun_sep) {
- SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ if (cmd->se_lun->lun_sep) {
+ cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
- CMD_TFO(cmd)->queue_data_in(cmd);
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
break;
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
- CMD_TFO(cmd)->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
break;
default:
break;
}
+done:
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
+ return;
+
+queue_full:
+ pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
+ " data_direction: %d\n", cmd, cmd->data_direction);
+ transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
}
static void transport_free_dev_tasks(struct se_cmd *cmd)
@@ -3943,9 +3699,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
struct se_task *task, *task_tmp;
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list) {
+ &cmd->t_task_list, t_list) {
if (atomic_read(&task->task_active))
continue;
@@ -3954,75 +3710,40 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
list_del(&task->t_list);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (task->se_dev)
- TRANSPORT(task->se_dev)->free_task(task);
+ task->se_dev->transport->free_task(task);
else
- printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+ pr_err("task[%u] - task->se_dev is NULL\n",
task->task_no);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
-static inline void transport_free_pages(struct se_cmd *cmd)
+static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
{
- struct se_mem *se_mem, *se_mem_tmp;
- int free_page = 1;
-
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
- free_page = 0;
- if (cmd->se_dev->transport->do_se_mem_map)
- free_page = 0;
+ struct scatterlist *sg;
+ int count;
- if (T_TASK(cmd)->t_task_buf) {
- kfree(T_TASK(cmd)->t_task_buf);
- T_TASK(cmd)->t_task_buf = NULL;
- return;
- }
+ for_each_sg(sgl, sg, nents, count)
+ __free_page(sg_page(sg));
- /*
- * Caller will handle releasing of struct se_mem.
- */
- if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
- return;
+ kfree(sgl);
+}
- if (!(T_TASK(cmd)->t_tasks_se_num))
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
return;
- list_for_each_entry_safe(se_mem, se_mem_tmp,
- T_TASK(cmd)->t_mem_list, se_list) {
- /*
- * We only release call __free_page(struct se_mem->se_page) when
- * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
- */
- if (free_page)
- __free_page(se_mem->se_page);
+ transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
+ cmd->t_data_sg = NULL;
+ cmd->t_data_nents = 0;
- list_del(&se_mem->se_list);
- kmem_cache_free(se_mem_cache, se_mem);
- }
-
- if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
- list_for_each_entry_safe(se_mem, se_mem_tmp,
- T_TASK(cmd)->t_mem_bidi_list, se_list) {
- /*
- * We only release call __free_page(struct se_mem->se_page) when
- * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
- */
- if (free_page)
- __free_page(se_mem->se_page);
-
- list_del(&se_mem->se_list);
- kmem_cache_free(se_mem_cache, se_mem);
- }
- }
-
- kfree(T_TASK(cmd)->t_mem_bidi_list);
- T_TASK(cmd)->t_mem_bidi_list = NULL;
- kfree(T_TASK(cmd)->t_mem_list);
- T_TASK(cmd)->t_mem_list = NULL;
- T_TASK(cmd)->t_tasks_se_num = 0;
+ transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
+ cmd->t_bidi_data_sg = NULL;
+ cmd->t_bidi_data_nents = 0;
}
static inline void transport_release_tasks(struct se_cmd *cmd)
@@ -4034,23 +3755,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (atomic_read(&cmd->t_fe_count)) {
+ if (!atomic_dec_and_test(&cmd->t_fe_count)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
return 1;
}
}
- if (atomic_read(&T_TASK(cmd)->t_se_count)) {
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ if (atomic_read(&cmd->t_se_count)) {
+ if (!atomic_dec_and_test(&cmd->t_se_count)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
return 1;
}
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
@@ -4062,68 +3783,57 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
if (transport_dec_and_check(cmd))
return;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto free_pages;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_release_tasks(cmd);
free_pages:
transport_free_pages(cmd);
transport_free_se_cmd(cmd);
- CMD_TFO(cmd)->release_cmd_direct(cmd);
+ cmd->se_tfo->release_cmd(cmd);
}
-static int transport_generic_remove(
- struct se_cmd *cmd,
- int release_to_pool,
- int session_reinstatement)
+static int
+transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
{
unsigned long flags;
- if (!(T_TASK(cmd)))
- goto release_cmd;
-
if (transport_dec_and_check(cmd)) {
if (session_reinstatement) {
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
}
return 1;
}
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto free_pages;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_release_tasks(cmd);
+
free_pages:
transport_free_pages(cmd);
-
-release_cmd:
- if (release_to_pool) {
- transport_release_cmd_to_pool(cmd);
- } else {
- transport_free_se_cmd(cmd);
- CMD_TFO(cmd)->release_cmd_direct(cmd);
- }
-
+ transport_release_cmd(cmd);
return 0;
}
/*
- * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
+ * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
+ * allocating in the core.
* @cmd: Associated se_cmd descriptor
* @mem: SGL style memory for TCM WRITE / READ
* @sg_mem_num: Number of SGL elements
@@ -4135,614 +3845,163 @@ release_cmd:
*/
int transport_generic_map_mem_to_cmd(
struct se_cmd *cmd,
- struct scatterlist *mem,
- u32 sg_mem_num,
- struct scatterlist *mem_bidi_in,
- u32 sg_mem_bidi_num)
+ struct scatterlist *sgl,
+ u32 sgl_count,
+ struct scatterlist *sgl_bidi,
+ u32 sgl_bidi_count)
{
- u32 se_mem_cnt_out = 0;
- int ret;
-
- if (!(mem) || !(sg_mem_num))
+ if (!sgl || !sgl_count)
return 0;
- /*
- * Passed *mem will contain a list_head containing preformatted
- * struct se_mem elements...
- */
- if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
- if ((mem_bidi_in) || (sg_mem_bidi_num)) {
- printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
- " with BIDI-COMMAND\n");
- return -ENOSYS;
- }
- T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
- T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
- cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
- return 0;
- }
- /*
- * Otherwise, assume the caller is passing a struct scatterlist
- * array from include/linux/scatterlist.h
- */
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
- /*
- * For CDB using TCM struct se_mem linked list scatterlist memory
- * processed into a TCM struct se_subsystem_dev, we do the mapping
- * from the passed physical memory to struct se_mem->se_page here.
- */
- T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_list))
- return -ENOMEM;
- ret = transport_map_sg_to_mem(cmd,
- T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
- if (ret < 0)
- return -ENOMEM;
-
- T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
- /*
- * Setup BIDI READ list of struct se_mem elements
- */
- if ((mem_bidi_in) && (sg_mem_bidi_num)) {
- T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_bidi_list)) {
- kfree(T_TASK(cmd)->t_mem_list);
- return -ENOMEM;
- }
- se_mem_cnt_out = 0;
+ cmd->t_data_sg = sgl;
+ cmd->t_data_nents = sgl_count;
- ret = transport_map_sg_to_mem(cmd,
- T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
- &se_mem_cnt_out);
- if (ret < 0) {
- kfree(T_TASK(cmd)->t_mem_list);
- return -ENOMEM;
- }
-
- T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
+ if (sgl_bidi && sgl_bidi_count) {
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
}
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
-
- } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
- if (mem_bidi_in || sg_mem_bidi_num) {
- printk(KERN_ERR "BIDI-Commands not supported using "
- "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
- return -ENOSYS;
- }
- /*
- * For incoming CDBs using a contiguous buffer internall with TCM,
- * save the passed struct scatterlist memory. After TCM storage object
- * processing has completed for this struct se_cmd, TCM core will call
- * transport_memcpy_[write,read]_contig() as necessary from
- * transport_generic_complete_ok() and transport_write_pending() in order
- * to copy the TCM buffer to/from the original passed *mem in SGL ->
- * struct scatterlist format.
- */
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
- T_TASK(cmd)->t_task_pt_sgl = mem;
}
return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
-
-static inline long long transport_dev_end_lba(struct se_device *dev)
-{
- return dev->transport->get_blocks(dev) + 1;
-}
-
-static int transport_get_sectors(struct se_cmd *cmd)
-{
- struct se_device *dev = SE_DEV(cmd);
-
- T_TASK(cmd)->t_tasks_sectors =
- (cmd->data_length / DEV_ATTRIB(dev)->block_size);
- if (!(T_TASK(cmd)->t_tasks_sectors))
- T_TASK(cmd)->t_tasks_sectors = 1;
-
- if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
- return 0;
-
- if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
- transport_dev_end_lba(dev)) {
- printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
- " transport_dev_end_lba(): %llu\n",
- T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
- transport_dev_end_lba(dev));
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
- }
-
- return 0;
-}
-
static int transport_new_cmd_obj(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- u32 task_cdbs = 0, rc;
-
- if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
- task_cdbs++;
- T_TASK(cmd)->t_task_cdbs++;
- } else {
- int set_counts = 1;
+ struct se_device *dev = cmd->se_dev;
+ u32 task_cdbs;
+ u32 rc;
+ int set_counts = 1;
- /*
- * Setup any BIDI READ tasks and memory from
- * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
- * are queued first for the non pSCSI passthrough case.
- */
- if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
- (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
- rc = transport_generic_get_cdb_count(cmd,
- T_TASK(cmd)->t_task_lba,
- T_TASK(cmd)->t_tasks_sectors,
- DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
- set_counts);
- if (!(rc)) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- }
- set_counts = 0;
- }
- /*
- * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
- * Note for BIDI transfers this will contain the WRITE payload
- */
- task_cdbs = transport_generic_get_cdb_count(cmd,
- T_TASK(cmd)->t_task_lba,
- T_TASK(cmd)->t_tasks_sectors,
- cmd->data_direction, T_TASK(cmd)->t_mem_list,
- set_counts);
- if (!(task_cdbs)) {
+ /*
+ * Setup any BIDI READ tasks and memory from
+ * cmd->t_mem_bidi_list so the READ struct se_tasks
+ * are queued first for the non pSCSI passthrough case.
+ */
+ if (cmd->t_bidi_data_sg &&
+ (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
+ rc = transport_allocate_tasks(cmd,
+ cmd->t_task_lba,
+ DMA_FROM_DEVICE,
+ cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ if (rc <= 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
- T_TASK(cmd)->t_task_cdbs += task_cdbs;
-
-#if 0
- printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
- " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
- T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
- T_TASK(cmd)->t_task_cdbs);
-#endif
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
+ set_counts = 0;
}
-
- atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
- atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
- atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
- return 0;
-}
-
-static struct list_head *transport_init_se_mem_list(void)
-{
- struct list_head *se_mem_list;
-
- se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!(se_mem_list)) {
- printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
- return NULL;
- }
- INIT_LIST_HEAD(se_mem_list);
-
- return se_mem_list;
-}
-
-static int
-transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
-{
- unsigned char *buf;
- struct se_mem *se_mem;
-
- T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_list))
- return -ENOMEM;
-
/*
- * If the device uses memory mapping this is enough.
+ * Setup the tasks and memory from cmd->t_mem_list
+ * Note for BIDI transfers this will contain the WRITE payload
*/
- if (cmd->se_dev->transport->do_se_mem_map)
- return 0;
-
- /*
- * Setup BIDI-COMMAND READ list of struct se_mem elements
- */
- if (T_TASK(cmd)->t_tasks_bidi) {
- T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_bidi_list)) {
- kfree(T_TASK(cmd)->t_mem_list);
- return -ENOMEM;
- }
+ task_cdbs = transport_allocate_tasks(cmd,
+ cmd->t_task_lba,
+ cmd->data_direction,
+ cmd->t_data_sg,
+ cmd->t_data_nents);
+ if (task_cdbs <= 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
}
- while (length) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- goto out;
- }
-
-/* #warning FIXME Allocate contigous pages for struct se_mem elements */
- se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
- if (!(se_mem->se_page)) {
- printk(KERN_ERR "alloc_pages() failed\n");
- goto out;
- }
-
- buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
- if (!(buf)) {
- printk(KERN_ERR "kmap_atomic() failed\n");
- goto out;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
- se_mem->se_len = (length > dma_size) ? dma_size : length;
- memset(buf, 0, se_mem->se_len);
- kunmap_atomic(buf, KM_IRQ0);
-
- list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
- T_TASK(cmd)->t_tasks_se_num++;
-
- DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
- " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
- se_mem->se_off);
-
- length -= se_mem->se_len;
+ if (set_counts) {
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
}
- DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
- T_TASK(cmd)->t_tasks_se_num);
+ cmd->t_task_list_num = task_cdbs;
+ atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
+ atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
+ atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
return 0;
-out:
- if (se_mem)
- __free_pages(se_mem->se_page, 0);
- kmem_cache_free(se_mem_cache, se_mem);
- return -1;
}
-u32 transport_calc_sg_num(
- struct se_task *task,
- struct se_mem *in_se_mem,
- u32 task_offset)
+void *transport_kmap_first_data_page(struct se_cmd *cmd)
{
- struct se_cmd *se_cmd = task->task_se_cmd;
- struct se_device *se_dev = SE_DEV(se_cmd);
- struct se_mem *se_mem = in_se_mem;
- struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
- u32 sg_length, task_size = task->task_size, task_sg_num_padded;
-
- while (task_size != 0) {
- DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
- " se_mem->se_off(%u) task_offset(%u)\n",
- se_mem->se_page, se_mem->se_len,
- se_mem->se_off, task_offset);
-
- if (task_offset == 0) {
- if (task_size >= se_mem->se_len) {
- sg_length = se_mem->se_len;
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list)))
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- } else {
- sg_length = task_size;
- task_size -= sg_length;
- goto next;
- }
+ struct scatterlist *sg = cmd->t_data_sg;
- DEBUG_SC("sg_length(%u) task_size(%u)\n",
- sg_length, task_size);
- } else {
- if ((se_mem->se_len - task_offset) > task_size) {
- sg_length = task_size;
- task_size -= sg_length;
- goto next;
- } else {
- sg_length = (se_mem->se_len - task_offset);
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list)))
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- }
-
- DEBUG_SC("sg_length(%u) task_size(%u)\n",
- sg_length, task_size);
-
- task_offset = 0;
- }
- task_size -= sg_length;
-next:
- DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
- task->task_no, task_size);
-
- task->task_sg_num++;
- }
+ BUG_ON(!sg);
/*
- * Check if the fabric module driver is requesting that all
- * struct se_task->task_sg[] be chained together.. If so,
- * then allocate an extra padding SG entry for linking and
- * marking the end of the chained SGL.
+ * We need to take into account a possible offset here for fabrics like
+ * tcm_loop who may be using a contig buffer from the SCSI midlayer for
+ * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
*/
- if (tfo->task_sg_chaining) {
- task_sg_num_padded = (task->task_sg_num + 1);
- task->task_padded_sg = 1;
- } else
- task_sg_num_padded = task->task_sg_num;
-
- task->task_sg = kzalloc(task_sg_num_padded *
- sizeof(struct scatterlist), GFP_KERNEL);
- if (!(task->task_sg)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " task->task_sg\n");
- return 0;
- }
- sg_init_table(&task->task_sg[0], task_sg_num_padded);
- /*
- * Setup task->task_sg_bidi for SCSI READ payload for
- * TCM/pSCSI passthrough if present for BIDI-COMMAND
- */
- if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
- (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
- task->task_sg_bidi = kzalloc(task_sg_num_padded *
- sizeof(struct scatterlist), GFP_KERNEL);
- if (!(task->task_sg_bidi)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " task->task_sg_bidi\n");
- return 0;
- }
- sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
- }
- /*
- * For the chaining case, setup the proper end of SGL for the
- * initial submission struct task into struct se_subsystem_api.
- * This will be cleared later by transport_do_task_sg_chain()
- */
- if (task->task_padded_sg) {
- sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
- /*
- * Added the 'if' check before marking end of bi-directional
- * scatterlist (which gets created only in case of request
- * (RD + WR).
- */
- if (task->task_sg_bidi)
- sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
- }
-
- DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
- " task_sg_num_padded(%u)\n", task->task_sg_num,
- task_sg_num_padded);
-
- return task->task_sg_num;
+ return kmap(sg_page(sg)) + sg->offset;
}
+EXPORT_SYMBOL(transport_kmap_first_data_page);
-static inline int transport_set_tasks_sectors_disk(
- struct se_task *task,
- struct se_device *dev,
- unsigned long long lba,
- u32 sectors,
- int *max_sectors_set)
+void transport_kunmap_first_data_page(struct se_cmd *cmd)
{
- if ((lba + sectors) > transport_dev_end_lba(dev)) {
- task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
-
- if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
- task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
- *max_sectors_set = 1;
- }
- } else {
- if (sectors > DEV_ATTRIB(dev)->max_sectors) {
- task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
- *max_sectors_set = 1;
- } else
- task->task_sectors = sectors;
- }
-
- return 0;
+ kunmap(sg_page(cmd->t_data_sg));
}
+EXPORT_SYMBOL(transport_kunmap_first_data_page);
-static inline int transport_set_tasks_sectors_non_disk(
- struct se_task *task,
- struct se_device *dev,
- unsigned long long lba,
- u32 sectors,
- int *max_sectors_set)
+static int
+transport_generic_get_mem(struct se_cmd *cmd)
{
- if (sectors > DEV_ATTRIB(dev)->max_sectors) {
- task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
- *max_sectors_set = 1;
- } else
- task->task_sectors = sectors;
+ u32 length = cmd->data_length;
+ unsigned int nents;
+ struct page *page;
+ int i = 0;
- return 0;
-}
+ nents = DIV_ROUND_UP(length, PAGE_SIZE);
+ cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
+ if (!cmd->t_data_sg)
+ return -ENOMEM;
-static inline int transport_set_tasks_sectors(
- struct se_task *task,
- struct se_device *dev,
- unsigned long long lba,
- u32 sectors,
- int *max_sectors_set)
-{
- return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
- transport_set_tasks_sectors_disk(task, dev, lba, sectors,
- max_sectors_set) :
- transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
- max_sectors_set);
-}
+ cmd->t_data_nents = nents;
+ sg_init_table(cmd->t_data_sg, nents);
-static int transport_map_sg_to_mem(
- struct se_cmd *cmd,
- struct list_head *se_mem_list,
- void *in_mem,
- u32 *se_mem_cnt)
-{
- struct se_mem *se_mem;
- struct scatterlist *sg;
- u32 sg_count = 1, cmd_size = cmd->data_length;
+ while (length) {
+ u32 page_len = min_t(u32, length, PAGE_SIZE);
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto out;
- if (!in_mem) {
- printk(KERN_ERR "No source scatterlist\n");
- return -1;
+ sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
+ length -= page_len;
+ i++;
}
- sg = (struct scatterlist *)in_mem;
-
- while (cmd_size) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- return -1;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
- DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
- " sg_page: %p offset: %d length: %d\n", cmd_size,
- sg_page(sg), sg->offset, sg->length);
-
- se_mem->se_page = sg_page(sg);
- se_mem->se_off = sg->offset;
-
- if (cmd_size > sg->length) {
- se_mem->se_len = sg->length;
- sg = sg_next(sg);
- sg_count++;
- } else
- se_mem->se_len = cmd_size;
-
- cmd_size -= se_mem->se_len;
-
- DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
- *se_mem_cnt, cmd_size);
- DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
- se_mem->se_page, se_mem->se_off, se_mem->se_len);
+ return 0;
- list_add_tail(&se_mem->se_list, se_mem_list);
- (*se_mem_cnt)++;
+out:
+ while (i >= 0) {
+ __free_page(sg_page(&cmd->t_data_sg[i]));
+ i--;
}
-
- DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
- " struct se_mem\n", sg_count, *se_mem_cnt);
-
- if (sg_count != *se_mem_cnt)
- BUG();
-
- return 0;
+ kfree(cmd->t_data_sg);
+ cmd->t_data_sg = NULL;
+ return -ENOMEM;
}
-/* transport_map_mem_to_sg():
- *
- *
- */
-int transport_map_mem_to_sg(
- struct se_task *task,
- struct list_head *se_mem_list,
- void *in_mem,
- struct se_mem *in_se_mem,
- struct se_mem **out_se_mem,
- u32 *se_mem_cnt,
- u32 *task_offset)
+/* Reduce sectors if they are too long for the device */
+static inline sector_t transport_limit_task_sectors(
+ struct se_device *dev,
+ unsigned long long lba,
+ sector_t sectors)
{
- struct se_cmd *se_cmd = task->task_se_cmd;
- struct se_mem *se_mem = in_se_mem;
- struct scatterlist *sg = (struct scatterlist *)in_mem;
- u32 task_size = task->task_size, sg_no = 0;
-
- if (!sg) {
- printk(KERN_ERR "Unable to locate valid struct"
- " scatterlist pointer\n");
- return -1;
- }
+ sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
- while (task_size != 0) {
- /*
- * Setup the contigious array of scatterlists for
- * this struct se_task.
- */
- sg_assign_page(sg, se_mem->se_page);
-
- if (*task_offset == 0) {
- sg->offset = se_mem->se_off;
+ if (dev->transport->get_device_type(dev) == TYPE_DISK)
+ if ((lba + sectors) > transport_dev_end_lba(dev))
+ sectors = ((transport_dev_end_lba(dev) - lba) + 1);
- if (task_size >= se_mem->se_len) {
- sg->length = se_mem->se_len;
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list))) {
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- (*se_mem_cnt)++;
- }
- } else {
- sg->length = task_size;
- /*
- * Determine if we need to calculate an offset
- * into the struct se_mem on the next go around..
- */
- task_size -= sg->length;
- if (!(task_size))
- *task_offset = sg->length;
-
- goto next;
- }
-
- } else {
- sg->offset = (*task_offset + se_mem->se_off);
-
- if ((se_mem->se_len - *task_offset) > task_size) {
- sg->length = task_size;
- /*
- * Determine if we need to calculate an offset
- * into the struct se_mem on the next go around..
- */
- task_size -= sg->length;
- if (!(task_size))
- *task_offset += sg->length;
-
- goto next;
- } else {
- sg->length = (se_mem->se_len - *task_offset);
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list))) {
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- (*se_mem_cnt)++;
- }
- }
-
- *task_offset = 0;
- }
- task_size -= sg->length;
-next:
- DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
- " task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
- sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
-
- sg_no++;
- if (!(task_size))
- break;
-
- sg = sg_next(sg);
-
- if (task_size > se_cmd->data_length)
- BUG();
- }
- *out_se_mem = se_mem;
-
- DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
- " SGs\n", task->task_no, *se_mem_cnt, sg_no);
-
- return 0;
+ return sectors;
}
+
/*
* This function can be used by HW target mode drivers to create a linked
* scatterlist from all contiguously allocated struct se_task->task_sg[].
@@ -4751,334 +4010,235 @@ next:
*/
void transport_do_task_sg_chain(struct se_cmd *cmd)
{
- struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
- struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
- struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
+ struct scatterlist *sg_first = NULL;
+ struct scatterlist *sg_prev = NULL;
+ int sg_prev_nents = 0;
+ struct scatterlist *sg;
struct se_task *task;
- struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
- u32 task_sg_num = 0, sg_count = 0;
+ u32 chained_nents = 0;
int i;
- if (tfo->task_sg_chaining == 0) {
- printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
- " %s\n", tfo->get_fabric_name());
- dump_stack();
- return;
- }
+ BUG_ON(!cmd->se_tfo->task_sg_chaining);
+
/*
* Walk the struct se_task list and setup scatterlist chains
- * for each contiguosly allocated struct se_task->task_sg[].
+ * for each contiguously allocated struct se_task->task_sg[].
*/
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
- if (!(task->task_sg) || !(task->task_padded_sg))
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
+ if (!task->task_sg)
continue;
- if (sg_head && sg_link) {
- sg_head_cur = &task->task_sg[0];
- sg_link_cur = &task->task_sg[task->task_sg_num];
- /*
- * Either add chain or mark end of scatterlist
- */
- if (!(list_is_last(&task->t_list,
- &T_TASK(cmd)->t_task_list))) {
- /*
- * Clear existing SGL termination bit set in
- * transport_calc_sg_num(), see sg_mark_end()
- */
- sg_end_cur = &task->task_sg[task->task_sg_num - 1];
- sg_end_cur->page_link &= ~0x02;
-
- sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += task->task_sg_num;
- task_sg_num = (task->task_sg_num + 1);
- } else {
- sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += task->task_sg_num;
- task_sg_num = task->task_sg_num;
- }
+ BUG_ON(!task->task_padded_sg);
- sg_head = sg_head_cur;
- sg_link = sg_link_cur;
- continue;
- }
- sg_head = sg_first = &task->task_sg[0];
- sg_link = &task->task_sg[task->task_sg_num];
- /*
- * Check for single task..
- */
- if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
- /*
- * Clear existing SGL termination bit set in
- * transport_calc_sg_num(), see sg_mark_end()
- */
- sg_end = &task->task_sg[task->task_sg_num - 1];
- sg_end->page_link &= ~0x02;
- sg_count += task->task_sg_num;
- task_sg_num = (task->task_sg_num + 1);
+ if (!sg_first) {
+ sg_first = task->task_sg;
+ chained_nents = task->task_sg_nents;
} else {
- sg_count += task->task_sg_num;
- task_sg_num = task->task_sg_num;
+ sg_chain(sg_prev, sg_prev_nents, task->task_sg);
+ chained_nents += task->task_sg_nents;
}
+
+ sg_prev = task->task_sg;
+ sg_prev_nents = task->task_sg_nents;
}
/*
* Setup the starting pointer and total t_tasks_sg_linked_no including
* padding SGs for linking and to mark the end.
*/
- T_TASK(cmd)->t_tasks_sg_chained = sg_first;
- T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
+ cmd->t_tasks_sg_chained = sg_first;
+ cmd->t_tasks_sg_chained_no = chained_nents;
- DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
- " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
- T_TASK(cmd)->t_tasks_sg_chained_no);
+ pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
+ " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
+ cmd->t_tasks_sg_chained_no);
- for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
- T_TASK(cmd)->t_tasks_sg_chained_no, i) {
+ for_each_sg(cmd->t_tasks_sg_chained, sg,
+ cmd->t_tasks_sg_chained_no, i) {
- DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
- i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
+ pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
if (sg_is_chain(sg))
- DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+ pr_debug("SG: %p sg_is_chain=1\n", sg);
if (sg_is_last(sg))
- DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+ pr_debug("SG: %p sg_is_last=1\n", sg);
}
}
EXPORT_SYMBOL(transport_do_task_sg_chain);
-static int transport_do_se_mem_map(
- struct se_device *dev,
- struct se_task *task,
- struct list_head *se_mem_list,
- void *in_mem,
- struct se_mem *in_se_mem,
- struct se_mem **out_se_mem,
- u32 *se_mem_cnt,
- u32 *task_offset_in)
-{
- u32 task_offset = *task_offset_in;
- int ret = 0;
- /*
- * se_subsystem_api_t->do_se_mem_map is used when internal allocation
- * has been done by the transport plugin.
- */
- if (TRANSPORT(dev)->do_se_mem_map) {
- ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
- in_mem, in_se_mem, out_se_mem, se_mem_cnt,
- task_offset_in);
- if (ret == 0)
- T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
-
- return ret;
- }
-
- BUG_ON(list_empty(se_mem_list));
- /*
- * This is the normal path for all normal non BIDI and BIDI-COMMAND
- * WRITE payloads.. If we need to do BIDI READ passthrough for
- * TCM/pSCSI the first call to transport_do_se_mem_map ->
- * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
- * allocation for task->task_sg_bidi, and the subsequent call to
- * transport_do_se_mem_map() from transport_generic_get_cdb_count()
- */
- if (!(task->task_sg_bidi)) {
- /*
- * Assume default that transport plugin speaks preallocated
- * scatterlists.
- */
- if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
- return -1;
- /*
- * struct se_task->task_sg now contains the struct scatterlist array.
- */
- return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
- in_se_mem, out_se_mem, se_mem_cnt,
- task_offset_in);
- }
- /*
- * Handle the se_mem_list -> struct task->task_sg_bidi
- * memory map for the extra BIDI READ payload
- */
- return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
- in_se_mem, out_se_mem, se_mem_cnt,
- task_offset_in);
-}
-
-static u32 transport_generic_get_cdb_count(
+/*
+ * Break up cmd into chunks transport can handle
+ */
+static int transport_allocate_data_tasks(
struct se_cmd *cmd,
unsigned long long lba,
- u32 sectors,
enum dma_data_direction data_direction,
- struct list_head *mem_list,
- int set_counts)
+ struct scatterlist *sgl,
+ unsigned int sgl_nents)
{
unsigned char *cdb = NULL;
struct se_task *task;
- struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
- struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
- struct se_device *dev = SE_DEV(cmd);
- int max_sectors_set = 0, ret;
- u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
-
- if (!mem_list) {
- printk(KERN_ERR "mem_list is NULL in transport_generic_get"
- "_cdb_count()\n");
- return 0;
- }
- /*
- * While using RAMDISK_DR backstores is the only case where
- * mem_list will ever be empty at this point.
- */
- if (!(list_empty(mem_list)))
- se_mem = list_entry(mem_list->next, struct se_mem, se_list);
- /*
- * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
- * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
- */
- if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
- !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
- (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
- se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
- struct se_mem, se_list);
-
- while (sectors) {
- DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
- CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
- transport_dev_end_lba(dev));
+ struct se_device *dev = cmd->se_dev;
+ unsigned long flags;
+ int task_count, i, ret;
+ sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
+ u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
+ struct scatterlist *sg;
+ struct scatterlist *cmd_sg;
- task = transport_generic_get_task(cmd, data_direction);
- if (!(task))
- goto out;
+ WARN_ON(cmd->data_length % sector_size);
+ sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
+ task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
+
+ cmd_sg = sgl;
+ for (i = 0; i < task_count; i++) {
+ unsigned int task_size;
+ int count;
- transport_set_tasks_sectors(task, dev, lba, sectors,
- &max_sectors_set);
+ task = transport_generic_get_task(cmd, data_direction);
+ if (!task)
+ return -ENOMEM;
task->task_lba = lba;
- lba += task->task_sectors;
- sectors -= task->task_sectors;
- task->task_size = (task->task_sectors *
- DEV_ATTRIB(dev)->block_size);
-
- cdb = TRANSPORT(dev)->get_cdb(task);
- if ((cdb)) {
- memcpy(cdb, T_TASK(cmd)->t_task_cdb,
- scsi_command_size(T_TASK(cmd)->t_task_cdb));
- cmd->transport_split_cdb(task->task_lba,
- &task->task_sectors, cdb);
- }
+ task->task_sectors = min(sectors, dev_max_sectors);
+ task->task_size = task->task_sectors * sector_size;
- /*
- * Perform the SE OBJ plugin and/or Transport plugin specific
- * mapping for T_TASK(cmd)->t_mem_list. And setup the
- * task->task_sg and if necessary task->task_sg_bidi
- */
- ret = transport_do_se_mem_map(dev, task, mem_list,
- NULL, se_mem, &se_mem_lout, &se_mem_cnt,
- &task_offset_in);
- if (ret < 0)
- goto out;
+ cdb = dev->transport->get_cdb(task);
+ BUG_ON(!cdb);
+
+ memcpy(cdb, cmd->t_task_cdb,
+ scsi_command_size(cmd->t_task_cdb));
+
+ /* Update new cdb with updated lba/sectors */
+ cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
- se_mem = se_mem_lout;
/*
- * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
- * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
- *
- * Note that the first call to transport_do_se_mem_map() above will
- * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
- * -> transport_calc_sg_num(), and the second here will do the
- * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
+ * Check if the fabric module driver is requesting that all
+ * struct se_task->task_sg[] be chained together.. If so,
+ * then allocate an extra padding SG entry for linking and
+ * marking the end of the chained SGL.
+ * Possibly over-allocate task sgl size by using cmd sgl size.
+ * It's so much easier and only a waste when task_count > 1.
+ * That is extremely rare.
*/
- if (task->task_sg_bidi != NULL) {
- ret = transport_do_se_mem_map(dev, task,
- T_TASK(cmd)->t_mem_bidi_list, NULL,
- se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
- &task_offset_in);
- if (ret < 0)
- goto out;
+ task->task_sg_nents = sgl_nents;
+ if (cmd->se_tfo->task_sg_chaining) {
+ task->task_sg_nents++;
+ task->task_padded_sg = 1;
+ }
- se_mem_bidi = se_mem_bidi_lout;
+ task->task_sg = kmalloc(sizeof(struct scatterlist) *
+ task->task_sg_nents, GFP_KERNEL);
+ if (!task->task_sg) {
+ cmd->se_dev->transport->free_task(task);
+ return -ENOMEM;
}
- task_cdbs++;
- DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
- task_cdbs, task->task_sg_num);
+ sg_init_table(task->task_sg, task->task_sg_nents);
- if (max_sectors_set) {
- max_sectors_set = 0;
- continue;
+ task_size = task->task_size;
+
+ /* Build new sgl, only up to task_size */
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
+ if (cmd_sg->length > task_size)
+ break;
+
+ *sg = *cmd_sg;
+ task_size -= cmd_sg->length;
+ cmd_sg = sg_next(cmd_sg);
}
- if (!sectors)
- break;
- }
+ lba += task->task_sectors;
+ sectors -= task->task_sectors;
- if (set_counts) {
- atomic_inc(&T_TASK(cmd)->t_fe_count);
- atomic_inc(&T_TASK(cmd)->t_se_count);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_add_tail(&task->t_list, &cmd->t_task_list);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+ /*
+ * Now perform the memory map of task->task_sg[] into backend
+ * subsystem memory..
+ */
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
+ if (atomic_read(&task->task_sent))
+ continue;
+ if (!dev->transport->map_data_SG)
+ continue;
- DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
- CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
- ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
+ ret = dev->transport->map_data_SG(task);
+ if (ret < 0)
+ return 0;
+ }
- return task_cdbs;
-out:
- return 0;
+ return task_count;
}
static int
-transport_map_control_cmd_to_task(struct se_cmd *cmd)
+transport_allocate_control_task(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
unsigned char *cdb;
struct se_task *task;
- int ret;
+ unsigned long flags;
+ int ret = 0;
task = transport_generic_get_task(cmd, cmd->data_direction);
if (!task)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM;
- cdb = TRANSPORT(dev)->get_cdb(task);
- if (cdb)
- memcpy(cdb, cmd->t_task->t_task_cdb,
- scsi_command_size(cmd->t_task->t_task_cdb));
+ cdb = dev->transport->get_cdb(task);
+ BUG_ON(!cdb);
+ memcpy(cdb, cmd->t_task_cdb,
+ scsi_command_size(cmd->t_task_cdb));
+
+ task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+ GFP_KERNEL);
+ if (!task->task_sg) {
+ cmd->se_dev->transport->free_task(task);
+ return -ENOMEM;
+ }
+ memcpy(task->task_sg, cmd->t_data_sg,
+ sizeof(struct scatterlist) * cmd->t_data_nents);
task->task_size = cmd->data_length;
- task->task_sg_num =
- (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
+ task->task_sg_nents = cmd->t_data_nents;
- atomic_inc(&cmd->t_task->t_fe_count);
- atomic_inc(&cmd->t_task->t_se_count);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_add_tail(&task->t_list, &cmd->t_task_list);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
- struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
- u32 se_mem_cnt = 0, task_offset = 0;
-
- if (!list_empty(T_TASK(cmd)->t_mem_list))
- se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
- struct se_mem, se_list);
-
- ret = transport_do_se_mem_map(dev, task,
- cmd->t_task->t_mem_list, NULL, se_mem,
- &se_mem_lout, &se_mem_cnt, &task_offset);
- if (ret < 0)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
-
- if (dev->transport->map_task_SG)
- return dev->transport->map_task_SG(task);
- return 0;
- } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
- if (dev->transport->map_task_non_SG)
- return dev->transport->map_task_non_SG(task);
- return 0;
+ if (dev->transport->map_control_SG)
+ ret = dev->transport->map_control_SG(task);
} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
if (dev->transport->cdb_none)
- return dev->transport->cdb_none(task);
- return 0;
+ ret = dev->transport->cdb_none(task);
} else {
+ pr_err("target: Unknown control cmd type!\n");
BUG();
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
}
+
+ /* Success! Return number of tasks allocated */
+ if (ret == 0)
+ return 1;
+ return ret;
+}
+
+static u32 transport_allocate_tasks(
+ struct se_cmd *cmd,
+ unsigned long long lba,
+ enum dma_data_direction data_direction,
+ struct scatterlist *sgl,
+ unsigned int sgl_nents)
+{
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
+ return transport_allocate_data_tasks(cmd, lba, data_direction,
+ sgl, sgl_nents);
+ else
+ return transport_allocate_control_task(cmd);
+
}
+
/* transport_generic_new_cmd(): Called from transport_processing_thread()
*
* Allocate storage transport resources from a set of values predefined
@@ -5088,64 +4248,33 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
/*
* Generate struct se_task(s) and/or their payloads for this CDB.
*/
-static int transport_generic_new_cmd(struct se_cmd *cmd)
+int transport_generic_new_cmd(struct se_cmd *cmd)
{
- struct se_portal_group *se_tpg;
- struct se_task *task;
- struct se_device *dev = SE_DEV(cmd);
int ret = 0;
/*
* Determine is the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
- * to setup beforehand the linked list of physical memory at
- * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
+ * beforehand.
*/
- if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
- ret = transport_allocate_resources(cmd);
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+ cmd->data_length) {
+ ret = transport_generic_get_mem(cmd);
if (ret < 0)
return ret;
}
-
- ret = transport_get_sectors(cmd);
- if (ret < 0)
- return ret;
-
+ /*
+ * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
+ * control or data CDB types, and perform the map to backend subsystem
+ * code from SGL memory allocated here by transport_generic_get_mem(), or
+ * via pre-existing SGL memory setup explictly by fabric module code with
+ * transport_generic_map_mem_to_cmd().
+ */
ret = transport_new_cmd_obj(cmd);
if (ret < 0)
return ret;
-
/*
- * Determine if the calling TCM fabric module is talking to
- * Linux/NET via kernel sockets and needs to allocate a
- * struct iovec array to complete the struct se_cmd
- */
- se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
- if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
- ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
- if (ret < 0)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
- }
-
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
- if (atomic_read(&task->task_sent))
- continue;
- if (!dev->transport->map_task_SG)
- continue;
-
- ret = dev->transport->map_task_SG(task);
- if (ret < 0)
- return ret;
- }
- } else {
- ret = transport_map_control_cmd_to_task(cmd);
- if (ret < 0)
- return ret;
- }
-
- /*
- * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
+ * For WRITEs, let the fabric know its buffer is ready..
* This WRITE struct se_cmd (and all of its associated struct se_task's)
* will be added to the struct se_device execution queue after its WRITE
* data has arrived. (ie: It gets handled by the transport processing
@@ -5162,6 +4291,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
transport_execute_tasks(cmd);
return 0;
}
+EXPORT_SYMBOL(transport_generic_new_cmd);
/* transport_generic_process_write():
*
@@ -5169,68 +4299,15 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
*/
void transport_generic_process_write(struct se_cmd *cmd)
{
-#if 0
- /*
- * Copy SCSI Presented DTL sector(s) from received buffers allocated to
- * original EDTL
- */
- if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
- if (!T_TASK(cmd)->t_tasks_se_num) {
- unsigned char *dst, *buf =
- (unsigned char *)T_TASK(cmd)->t_task_buf;
-
- dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
- if (!(dst)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " WRITE underflow\n");
- transport_generic_request_failure(cmd, NULL,
- PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
- return;
- }
- memcpy(dst, buf, cmd->cmd_spdtl);
-
- kfree(T_TASK(cmd)->t_task_buf);
- T_TASK(cmd)->t_task_buf = dst;
- } else {
- struct scatterlist *sg =
- (struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
- struct scatterlist *orig_sg;
-
- orig_sg = kzalloc(sizeof(struct scatterlist) *
- T_TASK(cmd)->t_tasks_se_num,
- GFP_KERNEL))) {
- if (!(orig_sg)) {
- printk(KERN_ERR "Unable to allocate memory"
- " for WRITE underflow\n");
- transport_generic_request_failure(cmd, NULL,
- PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
- return;
- }
-
- memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
- sizeof(struct scatterlist) *
- T_TASK(cmd)->t_tasks_se_num);
-
- cmd->data_length = cmd->cmd_spdtl;
- /*
- * FIXME, clear out original struct se_task and state
- * information.
- */
- if (transport_generic_new_cmd(cmd) < 0) {
- transport_generic_request_failure(cmd, NULL,
- PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
- kfree(orig_sg);
- return;
- }
-
- transport_memcpy_write_sg(cmd, orig_sg);
- }
- }
-#endif
transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);
+static int transport_write_pending_qf(struct se_cmd *cmd)
+{
+ return cmd->se_tfo->write_pending(cmd);
+}
+
/* transport_generic_write_pending():
*
*
@@ -5240,24 +4317,26 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
unsigned long flags;
int ret;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = TRANSPORT_WRITE_PENDING;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- /*
- * For the TCM control CDBs using a contiguous buffer, do the memcpy
- * from the passed Linux/SCSI struct scatterlist located at
- * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
- * T_TASK(se_cmd)->t_task_buf.
- */
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
- transport_memcpy_read_contig(cmd,
- T_TASK(cmd)->t_task_buf,
- T_TASK(cmd)->t_task_pt_sgl);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (cmd->transport_qf_callback) {
+ ret = cmd->transport_qf_callback(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
+ else if (ret < 0)
+ return ret;
+
+ cmd->transport_qf_callback = NULL;
+ return 0;
+ }
+
/*
* Clear the se_cmd for WRITE_PENDING status in order to set
- * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
+ * cmd->t_transport_active=0 so that transport_generic_handle_data
* can be called from HW target mode interrupt code. This is safe
- * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
+ * to be called with transport_off=1 before the cmd->se_tfo->write_pending
* because the se_cmd->se_lun pointer is not being cleared.
*/
transport_cmd_check_stop(cmd, 1, 0);
@@ -5266,26 +4345,30 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
* Call the fabric write_pending function here to let the
* frontend know that WRITE buffers are ready.
*/
- ret = CMD_TFO(cmd)->write_pending(cmd);
- if (ret < 0)
+ ret = cmd->se_tfo->write_pending(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
+ else if (ret < 0)
return ret;
return PYX_TRANSPORT_WRITE_PENDING;
+
+queue_full:
+ pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
+ transport_handle_queue_full(cmd, cmd->se_dev,
+ transport_write_pending_qf);
+ return ret;
}
-/* transport_release_cmd_to_pool():
- *
- *
- */
-void transport_release_cmd_to_pool(struct se_cmd *cmd)
+void transport_release_cmd(struct se_cmd *cmd)
{
- BUG_ON(!T_TASK(cmd));
- BUG_ON(!CMD_TFO(cmd));
+ BUG_ON(!cmd->se_tfo);
transport_free_se_cmd(cmd);
- CMD_TFO(cmd)->release_cmd_to_pool(cmd);
+ cmd->se_tfo->release_cmd(cmd);
}
-EXPORT_SYMBOL(transport_release_cmd_to_pool);
+EXPORT_SYMBOL(transport_release_cmd);
/* transport_generic_free_cmd():
*
@@ -5294,19 +4377,18 @@ EXPORT_SYMBOL(transport_release_cmd_to_pool);
void transport_generic_free_cmd(
struct se_cmd *cmd,
int wait_for_tasks,
- int release_to_pool,
int session_reinstatement)
{
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
- transport_release_cmd_to_pool(cmd);
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
+ transport_release_cmd(cmd);
else {
core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
- if (SE_LUN(cmd)) {
+ if (cmd->se_lun) {
#if 0
- printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
- " SE_LUN(cmd)\n", cmd,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("cmd: %p ITT: 0x%08x contains"
+ " cmd->se_lun\n", cmd,
+ cmd->se_tfo->get_task_tag(cmd));
#endif
transport_lun_remove_cmd(cmd);
}
@@ -5316,8 +4398,7 @@ void transport_generic_free_cmd(
transport_free_dev_tasks(cmd);
- transport_generic_remove(cmd, release_to_pool,
- session_reinstatement);
+ transport_generic_remove(cmd, session_reinstatement);
}
}
EXPORT_SYMBOL(transport_generic_free_cmd);
@@ -5343,43 +4424,36 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
* If the frontend has already requested this struct se_cmd to
* be stopped, we can safely ignore this struct se_cmd.
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
- atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
- DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
- " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (atomic_read(&cmd->t_transport_stop)) {
+ atomic_set(&cmd->transport_lun_stop, 0);
+ pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
+ " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_cmd_check_stop(cmd, 1, 0);
- return -1;
+ return -EPERM;
}
- atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&cmd->transport_lun_fe_stop, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
ret = transport_stop_tasks_for_cmd(cmd);
- DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
- " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
+ pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
+ " %d\n", cmd, cmd->t_task_list_num, ret);
if (!ret) {
- DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
- wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
- DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+ cmd->se_tfo->get_task_tag(cmd));
+ wait_for_completion(&cmd->transport_lun_stop_comp);
+ pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+ cmd->se_tfo->get_task_tag(cmd));
}
- transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
return 0;
}
-/* #define DEBUG_CLEAR_LUN */
-#ifdef DEBUG_CLEAR_LUN
-#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CLEAR_L(x...)
-#endif
-
static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
struct se_cmd *cmd = NULL;
@@ -5389,66 +4463,59 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
* Initiator Port.
*/
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- while (!list_empty_careful(&lun->lun_cmd_list)) {
- cmd = list_entry(lun->lun_cmd_list.next,
- struct se_cmd, se_lun_list);
- list_del(&cmd->se_lun_list);
-
- if (!(T_TASK(cmd))) {
- printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
- "[i,t]_state: %u/%u\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
- BUG();
- }
- atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+ while (!list_empty(&lun->lun_cmd_list)) {
+ cmd = list_first_entry(&lun->lun_cmd_list,
+ struct se_cmd, se_lun_node);
+ list_del(&cmd->se_lun_node);
+
+ atomic_set(&cmd->transport_lun_active, 0);
/*
* This will notify iscsi_target_transport.c:
* transport_cmd_check_stop() that a LUN shutdown is in
* progress for the iscsi_cmd_t.
*/
- spin_lock(&T_TASK(cmd)->t_state_lock);
- DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
+ spin_lock(&cmd->t_state_lock);
+ pr_debug("SE_LUN[%d] - Setting cmd->transport"
"_lun_stop for ITT: 0x%08x\n",
- SE_LUN(cmd)->unpacked_lun,
- CMD_TFO(cmd)->get_task_tag(cmd));
- atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
- spin_unlock(&T_TASK(cmd)->t_state_lock);
+ cmd->se_lun->unpacked_lun,
+ cmd->se_tfo->get_task_tag(cmd));
+ atomic_set(&cmd->transport_lun_stop, 1);
+ spin_unlock(&cmd->t_state_lock);
spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
- if (!(SE_LUN(cmd))) {
- printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+ if (!cmd->se_lun) {
+ pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
BUG();
}
/*
* If the Storage engine still owns the iscsi_cmd_t, determine
* and/or stop its context.
*/
- DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
- "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
+ "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
+ cmd->se_tfo->get_task_tag(cmd));
- if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
+ if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue;
}
- DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+ pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
"_wait_for_tasks(): SUCCESS\n",
- SE_LUN(cmd)->unpacked_lun,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_lun->unpacked_lun,
+ cmd->se_tfo->get_task_tag(cmd));
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
goto check_cond;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
transport_free_dev_tasks(cmd);
/*
@@ -5465,24 +4532,24 @@ check_cond:
* be released, notify the waiting thread now that LU has
* finished accessing it.
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
- if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
- DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+ spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+ if (atomic_read(&cmd->transport_lun_fe_stop)) {
+ pr_debug("SE_LUN[%d] - Detected FE stop for"
" struct se_cmd: %p ITT: 0x%08x\n",
lun->unpacked_lun,
- cmd, CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd, cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
cmd_flags);
transport_cmd_check_stop(cmd, 1, 0);
- complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+ complete(&cmd->transport_lun_fe_stop_comp);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue;
}
- DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
- lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+ lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
}
spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
@@ -5502,11 +4569,11 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
{
struct task_struct *kt;
- kt = kthread_run(transport_clear_lun_thread, (void *)lun,
+ kt = kthread_run(transport_clear_lun_thread, lun,
"tcm_cl_%u", lun->unpacked_lun);
if (IS_ERR(kt)) {
- printk(KERN_ERR "Unable to start clear_lun thread\n");
- return -1;
+ pr_err("Unable to start clear_lun thread\n");
+ return PTR_ERR(kt);
}
wait_for_completion(&lun->lun_shutdown_comp);
@@ -5528,20 +4595,20 @@ static void transport_generic_wait_for_tasks(
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
return;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* If we are already stopped due to an external event (ie: LUN shutdown)
* sleep until the connection can have the passed struct se_cmd back.
- * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
+ * The cmd->transport_lun_stopped_sem will be upped by
* transport_clear_lun_from_sessions() once the ConfigFS context caller
* has completed its operation on the struct se_cmd.
*/
- if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+ if (atomic_read(&cmd->transport_lun_stop)) {
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
- " wait_for_completion(&T_TASK(cmd)transport_lun_fe"
+ pr_debug("wait_for_tasks: Stopping"
+ " wait_for_completion(&cmd->t_tasktransport_lun_fe"
"_stop_comp); for ITT: 0x%08x\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
/*
* There is a special case for WRITES where a FE exception +
* LUN shutdown means ConfigFS context is still sleeping on
@@ -5549,10 +4616,10 @@ static void transport_generic_wait_for_tasks(
* We go ahead and up transport_lun_stop_comp just to be sure
* here.
*/
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- complete(&T_TASK(cmd)->transport_lun_stop_comp);
- wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ complete(&cmd->transport_lun_stop_comp);
+ wait_for_completion(&cmd->transport_lun_fe_stop_comp);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
transport_all_task_dev_remove_state(cmd);
/*
@@ -5560,44 +4627,44 @@ static void transport_generic_wait_for_tasks(
* struct se_cmd, now owns the structure and can be released through
* normal means below.
*/
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
- " wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
+ pr_debug("wait_for_tasks: Stopped"
+ " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
"stop_comp); for ITT: 0x%08x\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
- atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+ atomic_set(&cmd->transport_lun_stop, 0);
}
- if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
- atomic_read(&T_TASK(cmd)->t_transport_aborted))
+ if (!atomic_read(&cmd->t_transport_active) ||
+ atomic_read(&cmd->t_transport_aborted))
goto remove;
- atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
+ atomic_set(&cmd->t_transport_stop, 1);
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
- " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+ " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
cmd->deferred_t_state);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
- wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
+ wait_for_completion(&cmd->t_transport_stop_comp);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_set(&T_TASK(cmd)->t_transport_active, 0);
- atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_active, 0);
+ atomic_set(&cmd->t_transport_stop, 0);
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
- "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("wait_for_tasks: Stopped wait_for_compltion("
+ "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
+ cmd->se_tfo->get_task_tag(cmd));
remove:
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (!remove_cmd)
return;
- transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
+ transport_generic_free_cmd(cmd, 0, session_reinstatement);
}
static int transport_get_sense_codes(
@@ -5632,13 +4699,13 @@ int transport_send_check_condition_and_sense(
int offset;
u8 asc = 0, ascq = 0;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (!reason && from_transport)
goto after_reason;
@@ -5651,7 +4718,7 @@ int transport_send_check_condition_and_sense(
* TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
* from include/scsi/scsi_cmnd.h
*/
- offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ offset = cmd->se_tfo->set_fabric_sense_len(cmd,
TRANSPORT_SENSE_BUFFER);
/*
* Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
@@ -5788,8 +4855,7 @@ int transport_send_check_condition_and_sense(
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
after_reason:
- CMD_TFO(cmd)->queue_status(cmd);
- return 0;
+ return cmd->se_tfo->queue_status(cmd);
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
@@ -5797,18 +4863,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
int ret = 0;
- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
- if (!(send_status) ||
+ if (atomic_read(&cmd->t_transport_aborted) != 0) {
+ if (!send_status ||
(cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
return 1;
#if 0
- printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
" status for CDB: 0x%02x ITT: 0x%08x\n",
- T_TASK(cmd)->t_task_cdb[0],
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->t_task_cdb[0],
+ cmd->se_tfo->get_task_tag(cmd));
#endif
cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
- CMD_TFO(cmd)->queue_status(cmd);
+ cmd->se_tfo->queue_status(cmd);
ret = 1;
}
return ret;
@@ -5824,8 +4890,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
* queued back to fabric module by transport_check_aborted_status().
*/
if (cmd->data_direction == DMA_TO_DEVICE) {
- if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
+ if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+ atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
transport_new_cmd_failure(cmd);
@@ -5834,11 +4900,11 @@ void transport_send_task_abort(struct se_cmd *cmd)
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
#if 0
- printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
- " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+ " ITT: 0x%08x\n", cmd->t_task_cdb[0],
+ cmd->se_tfo->get_task_tag(cmd));
#endif
- CMD_TFO(cmd)->queue_status(cmd);
+ cmd->se_tfo->queue_status(cmd);
}
/* transport_generic_do_tmr():
@@ -5847,14 +4913,12 @@ void transport_send_task_abort(struct se_cmd *cmd)
*/
int transport_generic_do_tmr(struct se_cmd *cmd)
{
- struct se_cmd *ref_cmd;
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
int ret;
switch (tmr->function) {
case TMR_ABORT_TASK:
- ref_cmd = tmr->ref_cmd;
tmr->response = TMR_FUNCTION_REJECTED;
break;
case TMR_ABORT_TASK_SET:
@@ -5874,14 +4938,14 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
tmr->response = TMR_FUNCTION_REJECTED;
break;
default:
- printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+ pr_err("Uknown TMR function: 0x%02x.\n",
tmr->function);
tmr->response = TMR_FUNCTION_REJECTED;
break;
}
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
- CMD_TFO(cmd)->queue_tm_rsp(cmd);
+ cmd->se_tfo->queue_tm_rsp(cmd);
transport_cmd_check_stop(cmd, 2, 0);
return 0;
@@ -5911,62 +4975,54 @@ transport_get_task_from_state_list(struct se_device *dev)
static void transport_processing_shutdown(struct se_device *dev)
{
struct se_cmd *cmd;
- struct se_queue_req *qr;
struct se_task *task;
- u8 state;
unsigned long flags;
/*
* Empty the struct se_device's struct se_task state list.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
while ((task = transport_get_task_from_state_list(dev))) {
- if (!(TASK_CMD(task))) {
- printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ if (!task->task_se_cmd) {
+ pr_err("task->task_se_cmd is NULL!\n");
continue;
}
- cmd = TASK_CMD(task);
+ cmd = task->task_se_cmd;
- if (!T_TASK(cmd)) {
- printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
- " %p ITT: 0x%08x\n", task, cmd,
- CMD_TFO(cmd)->get_task_tag(cmd));
- continue;
- }
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
- DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
- " i_state/def_i_state: %d/%d, t_state/def_t_state:"
+ pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
+ " i_state: %d, t_state/def_t_state:"
" %d/%d cdb: 0x%02x\n", cmd, task,
- CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd),
cmd->t_state, cmd->deferred_t_state,
- T_TASK(cmd)->t_task_cdb[0]);
- DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+ cmd->t_task_cdb[0]);
+ pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- T_TASK(cmd)->t_task_cdbs,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
- atomic_read(&T_TASK(cmd)->t_transport_active),
- atomic_read(&T_TASK(cmd)->t_transport_stop),
- atomic_read(&T_TASK(cmd)->t_transport_sent));
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+ atomic_read(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+ pr_debug("Waiting for task: %p to shutdown for dev:"
" %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
- DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+ pr_debug("Completed task: %p shutdown for dev: %p\n",
task, dev);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
@@ -5976,72 +5032,72 @@ static void transport_processing_shutdown(struct se_device *dev)
}
__transport_stop_task_timer(task, &flags);
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- DEBUG_DO("Skipping task: %p, dev: %p for"
+ pr_debug("Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+ atomic_read(&cmd->t_task_cdbs_ex_left));
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
- DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+ if (atomic_read(&cmd->t_transport_active)) {
+ pr_debug("got t_transport_active = 1 for task: %p, dev:"
" %p\n", task, dev);
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ if (atomic_read(&cmd->t_fe_count)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_send_check_condition_and_sense(
cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
0);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop(cmd, 1, 0);
} else {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+ pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
task, dev);
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ if (atomic_read(&cmd->t_fe_count)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_send_check_condition_and_sense(cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop(cmd, 1, 0);
} else {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -6050,18 +5106,12 @@ static void transport_processing_shutdown(struct se_device *dev)
/*
* Empty the struct se_device's struct se_cmd list.
*/
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
- while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
- spin_unlock_irqrestore(
- &dev->dev_queue_obj->cmd_queue_lock, flags);
- cmd = (struct se_cmd *)qr->cmd;
- state = qr->state;
- kfree(qr);
-
- DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
- cmd, state);
-
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
+
+ pr_debug("From Device Queue: cmd: %p t_state: %d\n",
+ cmd, cmd->t_state);
+
+ if (atomic_read(&cmd->t_fe_count)) {
transport_send_check_condition_and_sense(cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -6070,11 +5120,9 @@ static void transport_processing_shutdown(struct se_device *dev)
} else {
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
}
- spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
}
/* transport_processing_thread():
@@ -6083,16 +5131,15 @@ static void transport_processing_shutdown(struct se_device *dev)
*/
static int transport_processing_thread(void *param)
{
- int ret, t_state;
+ int ret;
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
- struct se_queue_req *qr;
set_user_nice(current, -20);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
- atomic_read(&dev->dev_queue_obj->queue_cnt) ||
+ ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
+ atomic_read(&dev->dev_queue_obj.queue_cnt) ||
kthread_should_stop());
if (ret < 0)
goto out;
@@ -6108,22 +5155,18 @@ static int transport_processing_thread(void *param)
get_cmd:
__transport_execute_tasks(dev);
- qr = transport_get_qr_from_queue(dev->dev_queue_obj);
- if (!(qr))
+ cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
+ if (!cmd)
continue;
- cmd = (struct se_cmd *)qr->cmd;
- t_state = qr->state;
- kfree(qr);
-
- switch (t_state) {
+ switch (cmd->t_state) {
case TRANSPORT_NEW_CMD_MAP:
- if (!(CMD_TFO(cmd)->new_cmd_map)) {
- printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
+ if (!cmd->se_tfo->new_cmd_map) {
+ pr_err("cmd->se_tfo->new_cmd_map is"
" NULL for TRANSPORT_NEW_CMD_MAP\n");
BUG();
}
- ret = CMD_TFO(cmd)->new_cmd_map(cmd);
+ ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd, NULL,
@@ -6134,7 +5177,9 @@ get_cmd:
/* Fall through */
case TRANSPORT_NEW_CMD:
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
+ if (ret == -EAGAIN)
+ break;
+ else if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd, NULL,
0, (cmd->data_direction !=
@@ -6149,10 +5194,10 @@ get_cmd:
transport_generic_complete_ok(cmd);
break;
case TRANSPORT_REMOVE:
- transport_generic_remove(cmd, 1, 0);
+ transport_generic_remove(cmd, 0);
break;
case TRANSPORT_FREE_CMD_INTR:
- transport_generic_free_cmd(cmd, 0, 1, 0);
+ transport_generic_free_cmd(cmd, 0, 0);
break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
@@ -6164,13 +5209,16 @@ get_cmd:
transport_stop_all_task_timers(cmd);
transport_generic_request_timeout(cmd);
break;
+ case TRANSPORT_COMPLETE_QF_WP:
+ transport_generic_write_pending(cmd);
+ break;
default:
- printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+ pr_err("Unknown t_state: %d deferred_t_state:"
" %d for ITT: 0x%08x i_state: %d on SE LUN:"
- " %u\n", t_state, cmd->deferred_t_state,
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd),
- SE_LUN(cmd)->unpacked_lun);
+ " %u\n", cmd->t_state, cmd->deferred_t_state,
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd),
+ cmd->se_lun->unpacked_lun);
BUG();
}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index df355176a37..31e3c652527 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -49,15 +49,15 @@ int core_scsi3_ua_check(
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
- if (!(sess))
+ if (!sess)
return 0;
nacl = sess->se_node_acl;
- if (!(nacl))
+ if (!nacl)
return 0;
deve = &nacl->device_list[cmd->orig_fe_lun];
- if (!(atomic_read(&deve->ua_count)))
+ if (!atomic_read(&deve->ua_count))
return 0;
/*
* From sam4r14, section 5.14 Unit attention condition:
@@ -80,10 +80,10 @@ int core_scsi3_ua_check(
case REQUEST_SENSE:
return 0;
default:
- return -1;
+ return -EINVAL;
}
- return -1;
+ return -EINVAL;
}
int core_scsi3_ua_allocate(
@@ -97,13 +97,13 @@ int core_scsi3_ua_allocate(
/*
* PASSTHROUGH OPS
*/
- if (!(nacl))
- return -1;
+ if (!nacl)
+ return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
- if (!(ua)) {
- printk(KERN_ERR "Unable to allocate struct se_ua\n");
- return -1;
+ if (!ua) {
+ pr_err("Unable to allocate struct se_ua\n");
+ return -ENOMEM;
}
INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
@@ -177,9 +177,9 @@ int core_scsi3_ua_allocate(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
- printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+ pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n",
- TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
+ nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq);
atomic_inc(&deve->ua_count);
@@ -208,23 +208,23 @@ void core_scsi3_ua_for_check_condition(
u8 *asc,
u8 *ascq)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!(sess))
+ if (!sess)
return;
nacl = sess->se_node_acl;
- if (!(nacl))
+ if (!nacl)
return;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
- if (!(atomic_read(&deve->ua_count))) {
+ if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
return;
}
@@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
- if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
+ if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
@@ -264,13 +264,13 @@ void core_scsi3_ua_for_check_condition(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
- printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
+ pr_debug("[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
- TPG_TFO(nacl->se_tpg)->get_fabric_name(),
- (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
- "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
- cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
+ nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+ "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
+ cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
}
int core_scsi3_ua_clear_for_request_sense(
@@ -284,18 +284,18 @@ int core_scsi3_ua_clear_for_request_sense(
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!(sess))
- return -1;
+ if (!sess)
+ return -EINVAL;
nacl = sess->se_node_acl;
- if (!(nacl))
- return -1;
+ if (!nacl)
+ return -EINVAL;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
- if (!(atomic_read(&deve->ua_count))) {
+ if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
- return -1;
+ return -EPERM;
}
/*
* The highest priority Unit Attentions are placed at the head of the
@@ -323,10 +323,10 @@ int core_scsi3_ua_clear_for_request_sense(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
- printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
+ pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
- " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+ " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq);
- return (head) ? -1 : 0;
+ return (head) ? -EPERM : 0;
}
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
index 7a5c2b64cf6..20b14bb087c 100644
--- a/drivers/target/tcm_fc/Makefile
+++ b/drivers/target/tcm_fc/Makefile
@@ -1,15 +1,6 @@
-EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
- -I$(srctree)/drivers/scsi/ \
- -I$(srctree)/include/scsi/ \
- -I$(srctree)/drivers/target/tcm_fc/
-
-tcm_fc-y += tfc_cmd.o \
- tfc_conf.o \
- tfc_io.o \
- tfc_sess.o
+tcm_fc-y += tfc_cmd.o \
+ tfc_conf.o \
+ tfc_io.o \
+ tfc_sess.o
obj-$(CONFIG_TCM_FC) += tcm_fc.o
-
-ifdef CONFIGFS_TCM_FC_DEBUG
-EXTRA_CFLAGS += -DTCM_FC_DEBUG
-endif
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index defff32b788..bd4fe21a23b 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -23,30 +23,6 @@
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
-/*
- * Debug options.
- */
-#define FT_DEBUG_CONF 0x01 /* configuration messages */
-#define FT_DEBUG_SESS 0x02 /* session messages */
-#define FT_DEBUG_TM 0x04 /* TM operations */
-#define FT_DEBUG_IO 0x08 /* I/O commands */
-#define FT_DEBUG_DATA 0x10 /* Data transfer */
-
-extern unsigned int ft_debug_logging; /* debug options */
-
-#define FT_DEBUG(mask, fmt, args...) \
- do { \
- if (ft_debug_logging & (mask)) \
- printk(KERN_INFO "tcm_fc: %s: " fmt, \
- __func__, ##args); \
- } while (0)
-
-#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
-#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
-#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
-#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
-#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
-
struct ft_transport_id {
__u8 format;
__u8 __resvd1[7];
@@ -144,7 +120,7 @@ enum ft_cmd_state {
*/
struct ft_cmd {
enum ft_cmd_state state;
- u16 lun; /* LUN from request */
+ u32 lun; /* LUN from request */
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
struct se_cmd se_cmd; /* Local TCM I/O descriptor */
@@ -195,7 +171,6 @@ int ft_write_pending(struct se_cmd *);
int ft_write_pending_status(struct se_cmd *);
u32 ft_get_task_tag(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
-void ft_new_cmd_failure(struct se_cmd *);
int ft_queue_tm_resp(struct se_cmd *);
int ft_is_state_remove(struct se_cmd *);
@@ -212,4 +187,9 @@ void ft_dump_cmd(struct ft_cmd *, const char *caller);
ssize_t ft_format_wwn(char *, size_t, u64);
+/*
+ * Underlying HW specific helper function
+ */
+void ft_invl_hw_context(struct ft_cmd *);
+
#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index c056a1132ae..5654dc22f7a 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -45,7 +45,6 @@
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_base.h>
#include <target/target_core_tmr.h>
#include <target/configfs_macros.h>
@@ -59,33 +58,30 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
struct fc_exch *ep;
struct fc_seq *sp;
struct se_cmd *se_cmd;
- struct se_mem *mem;
- struct se_transport_task *task;
-
- if (!(ft_debug_logging & FT_DEBUG_IO))
- return;
+ struct scatterlist *sg;
+ int count;
se_cmd = &cmd->se_cmd;
- printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
+ pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
- printk(KERN_INFO "%s: cmd %p cdb %p\n",
+ pr_debug("%s: cmd %p cdb %p\n",
caller, cmd, cmd->cdb);
- printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
-
- task = T_TASK(se_cmd);
- printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
- caller, cmd, task, task->t_tasks_se_num,
- task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
- if (task->t_mem_list)
- list_for_each_entry(mem, task->t_mem_list, se_list)
- printk(KERN_INFO "%s: cmd %p mem %p page %p "
- "len 0x%x off 0x%x\n",
- caller, cmd, mem,
- mem->se_page, mem->se_len, mem->se_off);
+ pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
+
+ pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
+ caller, cmd, se_cmd->t_data_nents,
+ se_cmd->data_length, se_cmd->se_cmd_flags);
+
+ for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
+ pr_debug("%s: cmd %p sg %p page %p "
+ "len 0x%x off 0x%x\n",
+ caller, cmd, sg,
+ sg_page(sg), sg->length, sg->offset);
+
sp = cmd->seq;
if (sp) {
ep = fc_seq_exch(sp);
- printk(KERN_INFO "%s: cmd %p sid %x did %x "
+ pr_debug("%s: cmd %p sid %x did %x "
"ox_id %x rx_id %x seq_id %x e_stat %x\n",
caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
sp->id, ep->esb_stat);
@@ -94,40 +90,19 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
}
-/*
- * Get LUN from CDB.
- */
-static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
-{
- u64 lun;
-
- lun = lunp[1];
- switch (lunp[0] >> 6) {
- case 0:
- break;
- case 1:
- lun |= (lunp[0] & 0x3f) << 8;
- break;
- default:
- return -1;
- }
- if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return -1;
- cmd->lun = lun;
- return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
-}
-
static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
{
- struct se_queue_obj *qobj;
+ struct ft_tpg *tpg = sess->tport->tpg;
+ struct se_queue_obj *qobj = &tpg->qobj;
unsigned long flags;
qobj = &sess->tport->tpg->qobj;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
atomic_inc(&qobj->queue_cnt);
- wake_up_interruptible(&qobj->thread_wq);
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ wake_up_process(tpg->thread);
}
static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
@@ -172,7 +147,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
void ft_check_stop_free(struct se_cmd *se_cmd)
{
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
}
/*
@@ -279,18 +254,18 @@ int ft_write_pending(struct se_cmd *se_cmd)
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
/*
- * Map se_mem list to scatterlist, so that
- * DDP can be setup. DDP setup function require
- * scatterlist. se_mem_list is internal to
- * TCM/LIO target
+ * cmd may have been broken up into multiple
+ * tasks. Link their sgs together so we can
+ * operate on them all at once.
*/
transport_do_task_sg_chain(se_cmd);
- cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
+ cmd->sg = se_cmd->t_tasks_sg_chained;
cmd->sg_cnt =
- T_TASK(se_cmd)->t_tasks_sg_chained_no;
+ se_cmd->t_tasks_sg_chained_no;
}
- if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
- cmd->sg, cmd->sg_cnt))
+ if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
+ cmd->sg,
+ cmd->sg_cnt))
cmd->was_ddp_setup = 1;
}
}
@@ -317,12 +292,6 @@ int ft_is_state_remove(struct se_cmd *se_cmd)
return 0; /* XXX TBD */
}
-void ft_new_cmd_failure(struct se_cmd *se_cmd)
-{
- /* XXX TBD */
- printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
-}
-
/*
* FC sequence response handler for follow-on sequences (data) and aborts.
*/
@@ -335,7 +304,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
/* XXX need to find cmd if queued */
cmd->se_cmd.t_state = TRANSPORT_REMOVE;
cmd->seq = NULL;
- transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
return;
}
@@ -349,10 +318,11 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
case FC_RCTL_DD_SOL_CTL: /* transfer ready */
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
default:
- printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
+ pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
+ ft_invl_hw_context(cmd);
fc_frame_free(fp);
- transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
break;
}
}
@@ -374,7 +344,7 @@ static void ft_send_resp_status(struct fc_lport *lport,
struct fcp_resp_rsp_info *info;
fh = fc_frame_header_get(rx_fp);
- FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
+ pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
len = sizeof(*fcp);
if (status == SAM_STAT_GOOD)
@@ -402,12 +372,23 @@ static void ft_send_resp_status(struct fc_lport *lport,
/*
* Send error or task management response.
- * Always frees the cmd and associated state.
*/
-static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
+static void ft_send_resp_code(struct ft_cmd *cmd,
+ enum fcp_resp_rsp_codes code)
{
ft_send_resp_status(cmd->sess->tport->lport,
cmd->req_frame, SAM_STAT_GOOD, code);
+}
+
+
+/*
+ * Send error or task management response.
+ * Always frees the cmd and associated state.
+ */
+static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
+ enum fcp_resp_rsp_codes code)
+{
+ ft_send_resp_code(cmd, code);
ft_free_cmd(cmd);
}
@@ -418,6 +399,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
{
struct se_tmr_req *tmr;
struct fcp_cmnd *fcp;
+ struct ft_sess *sess;
u8 tm_func;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
@@ -425,13 +407,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
switch (fcp->fc_tm_flags) {
case FCP_TMF_LUN_RESET:
tm_func = TMR_LUN_RESET;
- if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
- ft_dump_cmd(cmd, __func__);
- transport_send_check_condition_and_sense(&cmd->se_cmd,
- cmd->se_cmd.scsi_sense_reason, 0);
- ft_sess_put(cmd->sess);
- return;
- }
break;
case FCP_TMF_TGT_RESET:
tm_func = TMR_TARGET_WARM_RESET;
@@ -450,19 +425,49 @@ static void ft_send_tm(struct ft_cmd *cmd)
* FCP4r01 indicates having a combination of
* tm_flags set is invalid.
*/
- FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
- ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
+ pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
+ ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
return;
}
- FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
+ pr_debug("alloc tm cmd fn %d\n", tm_func);
tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
if (!tmr) {
- FT_TM_DBG("alloc failed\n");
- ft_send_resp_code(cmd, FCP_TMF_FAILED);
+ pr_debug("alloc failed\n");
+ ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
return;
}
cmd->se_cmd.se_tmr_req = tmr;
+
+ switch (fcp->fc_tm_flags) {
+ case FCP_TMF_LUN_RESET:
+ cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+ if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
+ /*
+ * Make sure to clean up newly allocated TMR request
+ * since "unable to handle TMR request because failed
+ * to get to LUN"
+ */
+ pr_debug("Failed to get LUN for TMR func %d, "
+ "se_cmd %p, unpacked_lun %d\n",
+ tm_func, &cmd->se_cmd, cmd->lun);
+ ft_dump_cmd(cmd, __func__);
+ sess = cmd->sess;
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ cmd->se_cmd.scsi_sense_reason, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
+ ft_sess_put(sess);
+ return;
+ }
+ break;
+ case FCP_TMF_TGT_RESET:
+ case FCP_TMF_CLR_TASK_SET:
+ case FCP_TMF_ABT_TASK_SET:
+ case FCP_TMF_CLR_ACA:
+ break;
+ default:
+ return;
+ }
transport_generic_handle_tmr(&cmd->se_cmd);
}
@@ -494,7 +499,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
code = FCP_TMF_FAILED;
break;
}
- FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
+ pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
return 0;
@@ -522,7 +527,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
return;
busy:
- FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
+ pr_debug("cmd or seq allocation failure - sending BUSY\n");
ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
@@ -547,7 +552,7 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
default:
- printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
+ pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
@@ -635,7 +640,8 @@ static void ft_send_cmd(struct ft_cmd *cmd)
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
- ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
+ cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+ ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
if (ret < 0) {
ft_dump_cmd(cmd, __func__);
transport_send_check_condition_and_sense(&cmd->se_cmd,
@@ -645,30 +651,29 @@ static void ft_send_cmd(struct ft_cmd *cmd)
ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
- FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
+ pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
ft_dump_cmd(cmd, __func__);
- if (ret == -1) {
+ if (ret == -ENOMEM) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
return;
}
- if (ret == -2) {
+ if (ret == -EINVAL) {
if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
ft_queue_status(se_cmd);
else
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
return;
}
transport_generic_handle_cdb(se_cmd);
return;
err:
- ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
- return;
+ ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}
/*
@@ -676,7 +681,7 @@ err:
*/
static void ft_exec_req(struct ft_cmd *cmd)
{
- FT_IO_DBG("cmd state %x\n", cmd->state);
+ pr_debug("cmd state %x\n", cmd->state);
switch (cmd->state) {
case FC_CMD_ST_NEW:
ft_send_cmd(cmd);
@@ -695,15 +700,12 @@ int ft_thread(void *arg)
struct ft_tpg *tpg = arg;
struct se_queue_obj *qobj = &tpg->qobj;
struct ft_cmd *cmd;
- int ret;
-
- set_user_nice(current, -20);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(qobj->thread_wq,
- atomic_read(&qobj->queue_cnt) || kthread_should_stop());
- if (ret < 0 || kthread_should_stop())
+ schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+ if (kthread_should_stop())
goto out;
+
cmd = ft_dequeue_cmd(qobj);
if (cmd)
ft_exec_req(cmd);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 84e868c255d..8781d1e423d 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -48,7 +48,6 @@
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_base.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
@@ -106,7 +105,7 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
}
err = 4;
fail:
- FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
+ pr_debug("err %u len %zu pos %u byte %u\n",
err, cp - name, pos, byte);
return -1;
}
@@ -216,14 +215,14 @@ static struct se_node_acl *ft_add_acl(
u64 wwpn;
u32 q_depth;
- FT_CONF_DBG("add acl %s\n", name);
+ pr_debug("add acl %s\n", name);
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL);
acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
- if (!(acl))
+ if (!acl)
return ERR_PTR(-ENOMEM);
acl->node_auth.port_name = wwpn;
@@ -239,11 +238,11 @@ static void ft_del_acl(struct se_node_acl *se_acl)
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
- FT_CONF_DBG("del acl %s\n",
+ pr_debug("del acl %s\n",
config_item_name(&se_acl->acl_group.cg_item));
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
- FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
+ pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
acl, se_acl, tpg, &tpg->se_tpg);
core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
@@ -260,11 +259,11 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
spin_lock_bh(&se_tpg->acl_node_lock);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
- FT_CONF_DBG("acl %p port_name %llx\n",
+ pr_debug("acl %p port_name %llx\n",
acl, (unsigned long long)acl->node_auth.port_name);
if (acl->node_auth.port_name == rdata->ids.port_name ||
acl->node_auth.node_name == rdata->ids.node_name) {
- FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
+ pr_debug("acl %p port_name %llx matched\n", acl,
(unsigned long long)rdata->ids.port_name);
found = acl;
/* XXX need to hold onto ACL */
@@ -280,11 +279,11 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
struct ft_node_acl *acl;
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
- if (!(acl)) {
- printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
+ if (!acl) {
+ pr_err("Unable to allocate struct ft_node_acl\n");
return NULL;
}
- FT_CONF_DBG("acl %p\n", acl);
+ pr_debug("acl %p\n", acl);
return &acl->se_node_acl;
}
@@ -294,7 +293,7 @@ static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
- FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
+ pr_debug("acl %p\n", acl);
kfree(acl);
}
@@ -311,7 +310,7 @@ static struct se_portal_group *ft_add_tpg(
unsigned long index;
int ret;
- FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
+ pr_debug("tcm_fc: add tpg %s\n", name);
/*
* Name must be "tpgt_" followed by the index.
@@ -331,7 +330,7 @@ static struct se_portal_group *ft_add_tpg(
transport_init_queue_obj(&tpg->qobj);
ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
- (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
@@ -354,7 +353,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
- FT_CONF_DBG("del tpg %s\n",
+ pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
kthread_stop(tpg->thread);
@@ -412,7 +411,7 @@ static struct se_wwn *ft_add_lport(
struct ft_lport_acl *old_lacl;
u64 wwpn;
- FT_CONF_DBG("add lport %s\n", name);
+ pr_debug("add lport %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
@@ -441,7 +440,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- FT_CONF_DBG("del lport %s\n",
+ pr_debug("del lport %s\n",
config_item_name(&wwn->wwn_group.cg_item));
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
@@ -536,8 +535,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
- .release_cmd_to_pool = ft_release_cmd,
- .release_cmd_direct = ft_release_cmd,
+ .release_cmd = ft_release_cmd,
.shutdown_session = ft_sess_shutdown,
.close_session = ft_sess_close,
.stop_session = ft_sess_stop,
@@ -550,7 +548,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.set_default_node_attributes = ft_set_default_node_attr,
.get_task_tag = ft_get_task_tag,
.get_cmd_state = ft_get_cmd_state,
- .new_cmd_failure = ft_new_cmd_failure,
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
.queue_tm_rsp = ft_queue_tm_resp,
@@ -582,10 +579,10 @@ int ft_register_configfs(void)
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
- if (!fabric) {
- printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
+ if (IS_ERR(fabric)) {
+ pr_err("%s: target_fabric_configfs_init() failed!\n",
__func__);
- return -1;
+ return PTR_ERR(fabric);
}
fabric->tf_ops = ft_fabric_ops;
@@ -610,11 +607,8 @@ int ft_register_configfs(void)
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
- FT_CONF_DBG("target_fabric_configfs_register() for"
+ pr_debug("target_fabric_configfs_register() for"
" FC Target failed!\n");
- printk(KERN_INFO
- "%s: target_fabric_configfs_register() failed!\n",
- __func__);
target_fabric_configfs_free(fabric);
return -1;
}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 4c3c0efbe13..c37f4cd9645 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -39,6 +39,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
+#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -53,7 +54,6 @@
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_base.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
@@ -65,21 +65,20 @@
int ft_queue_data_in(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
- struct se_transport_task *task;
struct fc_frame *fp = NULL;
struct fc_exch *ep;
struct fc_lport *lport;
- struct se_mem *mem;
+ struct scatterlist *sg = NULL;
size_t remaining;
u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
- u32 mem_off;
+ u32 mem_off = 0;
u32 fh_off = 0;
u32 frame_off = 0;
size_t frame_len = 0;
- size_t mem_len;
+ size_t mem_len = 0;
size_t tlen;
size_t off_in_page;
- struct page *page;
+ struct page *page = NULL;
int use_sg;
int error;
void *page_addr;
@@ -90,24 +89,17 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq);
- task = T_TASK(se_cmd);
- BUG_ON(!task);
remaining = se_cmd->data_length;
/*
- * Setup to use first mem list entry if any.
+ * Setup to use first mem list entry, unless no data.
*/
- if (task->t_tasks_se_num) {
- mem = list_first_entry(task->t_mem_list,
- struct se_mem, se_list);
- mem_len = mem->se_len;
- mem_off = mem->se_off;
- page = mem->se_page;
- } else {
- mem = NULL;
- mem_len = remaining;
- mem_off = 0;
- page = NULL;
+ BUG_ON(remaining && !se_cmd->t_data_sg);
+ if (remaining) {
+ sg = se_cmd->t_data_sg;
+ mem_len = sg->length;
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
/* no scatter/gather in skb for odd word length due to fc_seq_send() */
@@ -115,12 +107,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
while (remaining) {
if (!mem_len) {
- BUG_ON(!mem);
- mem = list_entry(mem->se_list.next,
- struct se_mem, se_list);
- mem_len = min((size_t)mem->se_len, remaining);
- mem_off = mem->se_off;
- page = mem->se_page;
+ sg = sg_next(sg);
+ mem_len = min((size_t)sg->length, remaining);
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
if (!frame_len) {
/*
@@ -148,18 +138,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
tlen = min(mem_len, frame_len);
if (use_sg) {
- if (!mem) {
- BUG_ON(!task->t_task_buf);
- page_addr = task->t_task_buf + mem_off;
- /*
- * In this case, offset is 'offset_in_page' of
- * (t_task_buf + mem_off) instead of 'mem_off'.
- */
- off_in_page = offset_in_page(page_addr);
- page = virt_to_page(page_addr);
- tlen = min(tlen, PAGE_SIZE - off_in_page);
- } else
- off_in_page = mem_off;
+ off_in_page = mem_off;
BUG_ON(!page);
get_page(page);
skb_fill_page_desc(fp_skb(fp),
@@ -169,7 +148,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
fp_skb(fp)->data_len += tlen;
fp_skb(fp)->truesize +=
PAGE_SIZE << compound_order(page);
- } else if (mem) {
+ } else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
KM_SOFTIRQ0);
@@ -180,10 +159,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
memcpy(to, from, tlen);
kunmap_atomic(page_addr, KM_SOFTIRQ0);
to += tlen;
- } else {
- from = task->t_task_buf + mem_off;
- memcpy(to, from, tlen);
- to += tlen;
}
mem_off += tlen;
@@ -201,9 +176,8 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
error = lport->tt.seq_send(lport, cmd->seq, fp);
if (error) {
/* XXX For now, initiator will retry */
- if (printk_ratelimit())
- printk(KERN_ERR "%s: Failed to send frame %p, "
- "xid <0x%x>, remaining <0x%x>, "
+ pr_err_ratelimited("%s: Failed to send frame %p, "
+ "xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
@@ -221,84 +195,67 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
struct fc_seq *seq = cmd->seq;
struct fc_exch *ep;
struct fc_lport *lport;
- struct se_transport_task *task;
struct fc_frame_header *fh;
- struct se_mem *mem;
- u32 mem_off;
+ struct scatterlist *sg = NULL;
+ u32 mem_off = 0;
u32 rel_off;
size_t frame_len;
- size_t mem_len;
+ size_t mem_len = 0;
size_t tlen;
- struct page *page;
+ struct page *page = NULL;
void *page_addr;
void *from;
void *to;
u32 f_ctl;
void *buf;
- task = T_TASK(se_cmd);
- BUG_ON(!task);
-
fh = fc_frame_header_get(fp);
if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
goto drop;
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ ep = fc_seq_exch(seq);
+ lport = ep->lp;
+ if (cmd->was_ddp_setup) {
+ BUG_ON(!ep);
+ BUG_ON(!lport);
+ }
+
/*
- * Doesn't expect even single byte of payload. Payload
+ * Doesn't expect payload if DDP is setup. Payload
* is expected to be copied directly to user buffers
- * due to DDP (Large Rx offload) feature, hence
- * BUG_ON if BUF is non-NULL
+ * due to DDP (Large Rx offload),
*/
buf = fc_frame_payload_get(fp, 1);
- if (cmd->was_ddp_setup && buf) {
- printk(KERN_INFO "%s: When DDP was setup, not expected to"
- "receive frame with payload, Payload shall be"
- "copied directly to buffer instead of coming "
- "via. legacy receive queues\n", __func__);
- BUG_ON(buf);
- }
+ if (buf)
+ pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+ "cmd->sg_cnt 0x%x. DDP was setup"
+ " hence not expected to receive frame with "
+ "payload, Frame will be dropped if "
+ "'Sequence Initiative' bit in f_ctl is "
+ "not set\n", __func__, ep->xid, f_ctl,
+ cmd->sg, cmd->sg_cnt);
+ /*
+ * Invalidate HW DDP context if it was setup for respective
+ * command. Invalidation of HW DDP context is requited in both
+ * situation (success and error).
+ */
+ ft_invl_hw_context(cmd);
/*
- * If ft_cmd indicated 'ddp_setup', in that case only the last frame
- * should come with 'TSI bit being set'. If 'TSI bit is not set and if
- * data frame appears here, means error condition. In both the cases
- * release the DDP context (ddp_put) and in error case, as well
- * initiate error recovery mechanism.
+ * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+ * write data frame is received successfully where payload is
+ * posted directly to user buffer and only the last frame's
+ * header is posted in receive queue.
+ *
+ * If "Sequence Initiative (TSI)" bit is not set, means error
+ * condition w.r.t. DDP, hence drop the packet and let explict
+ * ABORTS from other end of exchange timer trigger the recovery.
*/
- ep = fc_seq_exch(seq);
- if (cmd->was_ddp_setup) {
- BUG_ON(!ep);
- lport = ep->lp;
- BUG_ON(!lport);
- }
- if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
- f_ctl = ntoh24(fh->fh_f_ctl);
- /*
- * If TSI bit set in f_ctl, means last write data frame is
- * received successfully where payload is posted directly
- * to user buffer and only the last frame's header is posted
- * in legacy receive queue
- */
- if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
- cmd->write_data_len = lport->tt.ddp_done(lport,
- ep->xid);
- goto last_frame;
- } else {
- /*
- * Updating the write_data_len may be meaningless at
- * this point, but just in case if required in future
- * for debugging or any other purpose
- */
- printk(KERN_ERR "%s: Received frame with TSI bit not"
- " being SET, dropping the frame, "
- "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
- __func__, cmd->sg, cmd->sg_cnt);
- cmd->write_data_len = lport->tt.ddp_done(lport,
- ep->xid);
- lport->tt.seq_exch_abort(cmd->seq, 0);
- goto drop;
- }
- }
+ if (f_ctl & FC_FC_SEQ_INIT)
+ goto last_frame;
+ else
+ goto drop;
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);
@@ -312,29 +269,22 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
frame_len = se_cmd->data_length - rel_off;
/*
- * Setup to use first mem list entry if any.
+ * Setup to use first mem list entry, unless no data.
*/
- if (task->t_tasks_se_num) {
- mem = list_first_entry(task->t_mem_list,
- struct se_mem, se_list);
- mem_len = mem->se_len;
- mem_off = mem->se_off;
- page = mem->se_page;
- } else {
- mem = NULL;
- page = NULL;
- mem_off = 0;
- mem_len = frame_len;
+ BUG_ON(frame_len && !se_cmd->t_data_sg);
+ if (frame_len) {
+ sg = se_cmd->t_data_sg;
+ mem_len = sg->length;
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
while (frame_len) {
if (!mem_len) {
- BUG_ON(!mem);
- mem = list_entry(mem->se_list.next,
- struct se_mem, se_list);
- mem_len = mem->se_len;
- mem_off = mem->se_off;
- page = mem->se_page;
+ sg = sg_next(sg);
+ mem_len = sg->length;
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
if (rel_off >= mem_len) {
rel_off -= mem_len;
@@ -347,19 +297,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
tlen = min(mem_len, frame_len);
- if (mem) {
- to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
- KM_SOFTIRQ0);
- page_addr = to;
- to += mem_off & ~PAGE_MASK;
- tlen = min(tlen, (size_t)(PAGE_SIZE -
- (mem_off & ~PAGE_MASK)));
- memcpy(to, from, tlen);
- kunmap_atomic(page_addr, KM_SOFTIRQ0);
- } else {
- to = task->t_task_buf + mem_off;
- memcpy(to, from, tlen);
- }
+ to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
+ KM_SOFTIRQ0);
+ page_addr = to;
+ to += mem_off & ~PAGE_MASK;
+ tlen = min(tlen, (size_t)(PAGE_SIZE -
+ (mem_off & ~PAGE_MASK)));
+ memcpy(to, from, tlen);
+ kunmap_atomic(page_addr, KM_SOFTIRQ0);
+
from += tlen;
frame_len -= tlen;
mem_off += tlen;
@@ -372,3 +318,39 @@ last_frame:
drop:
fc_frame_free(fp);
}
+
+/*
+ * Handle and cleanup any HW specific resources if
+ * received ABORTS, errors, timeouts.
+ */
+void ft_invl_hw_context(struct ft_cmd *cmd)
+{
+ struct fc_seq *seq = cmd->seq;
+ struct fc_exch *ep = NULL;
+ struct fc_lport *lport = NULL;
+
+ BUG_ON(!cmd);
+
+ /* Cleanup the DDP context in HW if DDP was setup */
+ if (cmd->was_ddp_setup && seq) {
+ ep = fc_seq_exch(seq);
+ if (ep) {
+ lport = ep->lp;
+ if (lport && (ep->xid <= lport->lro_xid))
+ /*
+ * "ddp_done" trigger invalidation of HW
+ * specific DDP context
+ */
+ cmd->write_data_len = lport->tt.ddp_done(lport,
+ ep->xid);
+
+ /*
+ * Resetting same variable to indicate HW's
+ * DDP context has been invalidated to avoid
+ * re_invalidation of same context (context is
+ * identified using ep->xid)
+ */
+ cmd->was_ddp_setup = 0;
+ }
+ }
+}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index a3bd57f2ea3..dbb5eaeee39 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -46,10 +46,8 @@
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_base.h>
#include <target/configfs_macros.h>
-#include <scsi/libfc.h>
#include "tcm_fc.h"
static void ft_sess_delete_all(struct ft_tport *);
@@ -198,13 +196,13 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
- FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
+ pr_debug("port_id %x found %p\n", port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
- FT_SESS_DBG("port_id %x not found\n", port_id);
+ pr_debug("port_id %x not found\n", port_id);
return NULL;
}
@@ -229,7 +227,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
return NULL;
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
}
@@ -240,7 +238,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
- FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
+ pr_debug("port_id %x sess %p\n", port_id, sess);
transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
sess->se_sess, sess);
@@ -314,7 +312,7 @@ int ft_sess_shutdown(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
- FT_SESS_DBG("port_id %x\n", sess->port_id);
+ pr_debug("port_id %x\n", sess->port_id);
return 1;
}
@@ -332,10 +330,10 @@ void ft_sess_close(struct se_session *se_sess)
lport = sess->tport->lport;
port_id = sess->port_id;
if (port_id == -1) {
- mutex_lock(&ft_lport_lock);
+ mutex_unlock(&ft_lport_lock);
return;
}
- FT_SESS_DBG("port_id %x\n", port_id);
+ pr_debug("port_id %x\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(se_sess);
@@ -348,7 +346,7 @@ void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
- FT_SESS_DBG("port_id %x\n", sess->port_id);
+ pr_debug("port_id %x\n", sess->port_id);
}
int ft_sess_logged_in(struct se_session *se_sess)
@@ -458,7 +456,7 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
- FT_SESS_DBG("port_id %x flags %x ret %x\n",
+ pr_debug("port_id %x flags %x ret %x\n",
rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@@ -518,11 +516,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
- FT_SESS_DBG("sid %x\n", sid);
+ pr_debug("sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
- FT_SESS_DBG("sid %x sess lookup failed\n", sid);
+ pr_debug("sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;