summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c3
-rw-r--r--drivers/scsi/ch.c6
-rw-r--r--drivers/scsi/constants.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c3
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ide-scsi.c26
-rw-r--r--drivers/scsi/iscsi_tcp.c44
-rw-r--r--drivers/scsi/libiscsi.c183
-rw-r--r--drivers/scsi/lpfc/lpfc.h96
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c1375
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h51
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c400
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1712
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c241
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h183
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c924
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c624
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h163
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c514
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1715
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c168
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h4
-rw-r--r--drivers/scsi/osst.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c26
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c10
-rw-r--r--drivers/scsi/scsi_error.c72
-rw-r--r--drivers/scsi/scsi_lib.c106
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c47
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c19
-rw-r--r--drivers/scsi/scsi_transport_spi.c4
-rw-r--r--drivers/scsi/sd.c17
-rw-r--r--drivers/scsi/sd.h21
-rw-r--r--drivers/scsi/sd_dif.c42
-rw-r--r--drivers/scsi/sg.c10
-rw-r--r--drivers/scsi/sr_vendor.c12
-rw-r--r--drivers/scsi/st.c11
53 files changed, 7953 insertions, 1121 deletions
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 69f8346aa28..5877f29a600 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -189,7 +189,6 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
.attr = {
.name = "mu_read",
.mode = S_IRUSR ,
- .owner = THIS_MODULE,
},
.size = 1032,
.read = arcmsr_sysfs_iop_message_read,
@@ -199,7 +198,6 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
.attr = {
.name = "mu_write",
.mode = S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 1032,
.write = arcmsr_sysfs_iop_message_write,
@@ -209,7 +207,6 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.attr = {
.name = "mu_clear",
.mode = S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 1,
.write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 3c257fe0893..88ecf94ad97 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -914,9 +914,9 @@ static int ch_probe(struct device *dev)
ch->minor = minor;
sprintf(ch->name,"ch%d",ch->minor);
- class_dev = device_create_drvdata(ch_sysfs_class, dev,
- MKDEV(SCSI_CHANGER_MAJOR, ch->minor),
- ch, "s%s", ch->name);
+ class_dev = device_create(ch_sysfs_class, dev,
+ MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
+ "s%s", ch->name);
if (IS_ERR(class_dev)) {
printk(KERN_WARNING "ch%d: device_create failed\n",
ch->minor);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 9785d738419..4003deefb7d 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1364,7 +1364,8 @@ EXPORT_SYMBOL(scsi_print_sense);
static const char * const hostbyte_table[]={
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
-"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"};
+"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
+"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" };
#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
static const char * const driverbyte_table[]={
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 708e475896b..e356b43753f 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -109,7 +109,8 @@ static struct request *get_alua_req(struct scsi_device *sdev,
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
rq->retries = ALUA_FAILOVER_RETRIES;
rq->timeout = ALUA_FAILOVER_TIMEOUT;
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 8f45570a8a0..0e572d2c5b0 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -303,7 +303,8 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
rq->cmd[4] = len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags |= REQ_FAILFAST;
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
rq->timeout = CLARIION_TIMEOUT;
rq->retries = CLARIION_RETRIES;
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 5e93c88ad66..9aec4ca64e5 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -112,7 +112,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
- req->cmd_flags |= REQ_FAILFAST;
+ req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
req->cmd[0] = TEST_UNIT_READY;
req->timeout = HP_SW_TIMEOUT;
@@ -204,7 +205,8 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
- req->cmd_flags |= REQ_FAILFAST;
+ req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(START_STOP);
req->cmd[0] = START_STOP;
req->cmd[4] = 1; /* Start spin cycle */
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 50bf95f3b5c..a43c3ed4df2 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -226,7 +226,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
rq->retries = RDAC_RETRIES;
rq->timeout = RDAC_TIMEOUT;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 1fe0901e811..8aba4fdfb52 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -271,7 +271,7 @@ rebuild_sys_tab:
pHba->initialized = TRUE;
pHba->state &= ~DPTI_STATE_RESET;
if (adpt_sysfs_class) {
- struct device *dev = device_create_drvdata(adpt_sysfs_class,
+ struct device *dev = device_create(adpt_sysfs_class,
NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
"dpti%d", pHba->unit);
if (IS_ERR(dev)) {
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4e0b7c8eb32..7650707a40d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2031,8 +2031,6 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
spin_unlock_irqrestore(shost->host_lock, flags);
} else
ibmvfc_issue_fc_host_lip(shost);
-
- scsi_target_unblock(&rport->dev);
LEAVE;
}
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 740bad43599..afc96e844a2 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -343,6 +343,11 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
}
#ifdef CONFIG_IDE_PROC_FS
+static ide_proc_entry_t idescsi_proc[] = {
+ { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
+ { NULL, 0, NULL, NULL }
+};
+
#define ide_scsi_devset_get(name, field) \
static int get_##name(ide_drive_t *drive) \
{ \
@@ -378,6 +383,16 @@ static const struct ide_proc_devset idescsi_settings[] = {
IDE_PROC_DEVSET(transform, 0, 3),
{ 0 },
};
+
+static ide_proc_entry_t *ide_scsi_proc_entries(ide_drive_t *drive)
+{
+ return idescsi_proc;
+}
+
+static const struct ide_proc_devset *ide_scsi_proc_devsets(ide_drive_t *drive)
+{
+ return idescsi_settings;
+}
#endif
/*
@@ -419,13 +434,6 @@ static void ide_scsi_remove(ide_drive_t *drive)
static int ide_scsi_probe(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
-static ide_proc_entry_t idescsi_proc[] = {
- { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
- { NULL, 0, NULL, NULL }
-};
-#endif
-
static ide_driver_t idescsi_driver = {
.gen_driver = {
.owner = THIS_MODULE,
@@ -439,8 +447,8 @@ static ide_driver_t idescsi_driver = {
.end_request = idescsi_end_request,
.error = idescsi_atapi_error,
#ifdef CONFIG_IDE_PROC_FS
- .proc = idescsi_proc,
- .settings = idescsi_settings,
+ .proc_entries = ide_scsi_proc_entries,
+ .proc_devsets = ide_scsi_proc_devsets,
#endif
};
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 2a2f0094570..ed6c54cae7b 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -523,22 +523,20 @@ iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
}
/**
- * iscsi_data_rsp - SCSI Data-In Response processing
+ * iscsi_data_in - SCSI Data-In Response processing
* @conn: iscsi connection
* @task: scsi command task
**/
static int
-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+iscsi_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
- struct iscsi_session *session = conn->session;
- struct scsi_cmnd *sc = task->sc;
int datasn = be32_to_cpu(rhdr->datasn);
- unsigned total_in_length = scsi_in(sc)->length;
+ unsigned total_in_length = scsi_in(task->sc)->length;
- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
if (tcp_conn->in.datalen == 0)
return 0;
@@ -558,23 +556,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
return ISCSI_ERR_DATA_OFFSET;
}
- if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
- sc->result = (DID_OK << 16) | rhdr->cmd_status;
- conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
- if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
- ISCSI_FLAG_DATA_OVERFLOW)) {
- int res_count = be32_to_cpu(rhdr->residual_count);
-
- if (res_count > 0 &&
- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
- res_count <= total_in_length))
- scsi_in(sc)->resid = res_count;
- else
- sc->result = (DID_BAD_TARGET << 16) |
- rhdr->cmd_status;
- }
- }
-
conn->datain_pdus_cnt++;
return 0;
}
@@ -774,7 +755,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (!task)
rc = ISCSI_ERR_BAD_ITT;
else
- rc = iscsi_data_rsp(conn, task);
+ rc = iscsi_data_in(conn, task);
if (rc) {
spin_unlock(&conn->session->lock);
break;
@@ -998,7 +979,7 @@ iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
error:
debug_tcp("Error receiving PDU, errno=%d\n", rc);
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ iscsi_conn_failure(conn, rc);
return 0;
}
@@ -1117,8 +1098,10 @@ iscsi_xmit(struct iscsi_conn *conn)
while (1) {
rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
- if (rc < 0)
+ if (rc < 0) {
+ rc = ISCSI_ERR_XMIT_FAILED;
goto error;
+ }
if (rc == 0)
break;
@@ -1127,7 +1110,7 @@ iscsi_xmit(struct iscsi_conn *conn)
if (segment->total_copied >= segment->total_size) {
if (segment->done != NULL) {
rc = segment->done(tcp_conn, segment);
- if (rc < 0)
+ if (rc != 0)
goto error;
}
}
@@ -1142,8 +1125,8 @@ error:
/* Transmit error. We could initiate error recovery
* here. */
debug_tcp("Error sending PDU, errno=%d\n", rc);
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- return rc;
+ iscsi_conn_failure(conn, rc);
+ return -EIO;
}
/**
@@ -1904,6 +1887,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_r2tpool_free(cls_session->dd_data);
+ iscsi_session_teardown(cls_session);
iscsi_host_remove(shost);
iscsi_host_free(shost);
@@ -1927,7 +1911,7 @@ static struct scsi_host_template iscsi_sht = {
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
- .eh_host_reset_handler = iscsi_eh_host_reset,
+ .eh_target_reset_handler= iscsi_eh_target_reset,
.use_clustering = DISABLE_CLUSTERING,
.slave_configure = iscsi_tcp_slave_configure,
.proc_name = "iscsi_tcp",
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index da7b67d30d9..801c7cf54d2 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -404,11 +404,6 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
conn->session->queued_cmdsn--;
else
conn->session->tt->cleanup_task(conn, task);
- /*
- * Check if cleanup_task dropped the lock and the command completed,
- */
- if (!task->sc)
- return;
sc->result = err;
if (!scsi_bidi_cmnd(sc))
@@ -633,6 +628,40 @@ out:
__iscsi_put_task(task);
}
+/**
+ * iscsi_data_in_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi pdu
+ * @task: scsi command task
+ **/
+static void
+iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ struct iscsi_task *task)
+{
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
+ struct scsi_cmnd *sc = task->sc;
+
+ if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
+ return;
+
+ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+ if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
+ ISCSI_FLAG_DATA_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_in(sc)->length))
+ scsi_in(sc)->resid = res_count;
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+ }
+
+ conn->scsirsp_pdus_cnt++;
+ __iscsi_put_task(task);
+}
+
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{
struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
@@ -818,12 +847,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
break;
case ISCSI_OP_SCSI_DATA_IN:
- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
- conn->scsirsp_pdus_cnt++;
- iscsi_update_cmdsn(session,
- (struct iscsi_nopin*) hdr);
- __iscsi_put_task(task);
- }
+ iscsi_data_in_rsp(conn, hdr, task);
break;
case ISCSI_OP_LOGOUT_RSP:
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -954,6 +978,38 @@ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
}
EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
+void iscsi_session_failure(struct iscsi_cls_session *cls_session,
+ enum iscsi_err err)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn;
+ struct device *dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&session->lock, flags);
+ conn = session->leadconn;
+ if (session->state == ISCSI_STATE_TERMINATE || !conn) {
+ spin_unlock_irqrestore(&session->lock, flags);
+ return;
+ }
+
+ dev = get_device(&conn->cls_conn->dev);
+ spin_unlock_irqrestore(&session->lock, flags);
+ if (!dev)
+ return;
+ /*
+ * if the host is being removed bypass the connection
+ * recovery initialization because we are going to kill
+ * the session.
+ */
+ if (err == ISCSI_ERR_INVALID_HOST)
+ iscsi_conn_error_event(conn->cls_conn, err);
+ else
+ iscsi_conn_failure(conn, err);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_failure);
+
void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
{
struct iscsi_session *session = conn->session;
@@ -968,9 +1024,10 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
+
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- iscsi_conn_error(conn->cls_conn, err);
+ iscsi_conn_error_event(conn->cls_conn, err);
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
@@ -1194,15 +1251,13 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
switch (session->state) {
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
- sc->result = DID_IMM_RETRY << 16;
- break;
+ goto reject;
case ISCSI_STATE_LOGGING_OUT:
reason = FAILURE_SESSION_LOGGING_OUT;
- sc->result = DID_IMM_RETRY << 16;
- break;
+ goto reject;
case ISCSI_STATE_RECOVERY_FAILED:
reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
- sc->result = DID_NO_CONNECT << 16;
+ sc->result = DID_TRANSPORT_FAILFAST << 16;
break;
case ISCSI_STATE_TERMINATE:
reason = FAILURE_SESSION_TERMINATE;
@@ -1267,7 +1322,7 @@ reject:
spin_unlock(&session->lock);
debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
spin_lock(host->host_lock);
- return SCSI_MLQUEUE_HOST_BUSY;
+ return SCSI_MLQUEUE_TARGET_BUSY;
fault:
spin_unlock(&session->lock);
@@ -1307,7 +1362,7 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
}
EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
-int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+int iscsi_eh_target_reset(struct scsi_cmnd *sc)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
@@ -1321,7 +1376,7 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_TERMINATE) {
failed:
- debug_scsi("failing host reset: session terminated "
+ debug_scsi("failing target reset: session terminated "
"[CID %d age %d]\n", conn->id, session->age);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@@ -1336,7 +1391,7 @@ failed:
*/
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- debug_scsi("iscsi_eh_host_reset wait for relogin\n");
+ debug_scsi("iscsi_eh_target_reset wait for relogin\n");
wait_event_interruptible(conn->ehwait,
session->state == ISCSI_STATE_TERMINATE ||
session->state == ISCSI_STATE_LOGGED_IN ||
@@ -1348,14 +1403,14 @@ failed:
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_LOGGED_IN)
iscsi_session_printk(KERN_INFO, session,
- "host reset succeeded\n");
+ "target reset succeeded\n");
else
goto failed;
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
}
-EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
+EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
static void iscsi_tmf_timedout(unsigned long data)
{
@@ -1769,10 +1824,10 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
iscsi_suspend_tx(conn);
- spin_lock(&session->lock);
+ spin_lock_bh(&session->lock);
fail_all_commands(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
- spin_unlock(&session->lock);
+ spin_unlock_bh(&session->lock);
iscsi_start_tx(conn);
goto done;
@@ -1878,6 +1933,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
int dd_data_size, uint16_t qdepth)
{
struct Scsi_Host *shost;
+ struct iscsi_host *ihost;
shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
if (!shost)
@@ -1892,22 +1948,43 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
qdepth = ISCSI_DEF_CMD_PER_LUN;
}
shost->cmd_per_lun = qdepth;
+
+ ihost = shost_priv(shost);
+ spin_lock_init(&ihost->lock);
+ ihost->state = ISCSI_HOST_SETUP;
+ ihost->num_sessions = 0;
+ init_waitqueue_head(&ihost->session_removal_wq);
return shost;
}
EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_failure(cls_session, ISCSI_ERR_INVALID_HOST);
+}
+
/**
* iscsi_host_remove - remove host and sessions
* @shost: scsi host
*
- * This will also remove any sessions attached to the host, but if userspace
- * is managing the session at the same time this will break. TODO: add
- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
- * does not remove the memory from under us.
+ * If there are any sessions left, this will initiate the removal and wait
+ * for the completion.
*/
void iscsi_host_remove(struct Scsi_Host *shost)
{
- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+ struct iscsi_host *ihost = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->lock, flags);
+ ihost->state = ISCSI_HOST_REMOVED;
+ spin_unlock_irqrestore(&ihost->lock, flags);
+
+ iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ wait_event_interruptible(ihost->session_removal_wq,
+ ihost->num_sessions == 0);
+ if (signal_pending(current))
+ flush_signals(current);
+
scsi_remove_host(shost);
}
EXPORT_SYMBOL_GPL(iscsi_host_remove);
@@ -1923,6 +2000,27 @@ void iscsi_host_free(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(iscsi_host_free);
+static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
+{
+ struct iscsi_host *ihost = shost_priv(shost);
+ unsigned long flags;
+
+ shost = scsi_host_get(shost);
+ if (!shost) {
+ printk(KERN_ERR "Invalid state. Cannot notify host removal "
+ "of session teardown event because host already "
+ "removed.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&ihost->lock, flags);
+ ihost->num_sessions--;
+ if (ihost->num_sessions == 0)
+ wake_up(&ihost->session_removal_wq);
+ spin_unlock_irqrestore(&ihost->lock, flags);
+ scsi_host_put(shost);
+}
+
/**
* iscsi_session_setup - create iscsi cls session and host and session
* @iscsit: iscsi transport template
@@ -1943,9 +2041,19 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
uint16_t cmds_max, int cmd_task_size,
uint32_t initial_cmdsn, unsigned int id)
{
+ struct iscsi_host *ihost = shost_priv(shost);
struct iscsi_session *session;
struct iscsi_cls_session *cls_session;
int cmd_i, scsi_cmds, total_cmds = cmds_max;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->lock, flags);
+ if (ihost->state == ISCSI_HOST_REMOVED) {
+ spin_unlock_irqrestore(&ihost->lock, flags);
+ return NULL;
+ }
+ ihost->num_sessions++;
+ spin_unlock_irqrestore(&ihost->lock, flags);
if (!total_cmds)
total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
@@ -1958,7 +2066,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
"must be a power of two that is at least %d.\n",
total_cmds, ISCSI_TOTAL_CMDS_MIN);
- return NULL;
+ goto dec_session_count;
}
if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
@@ -1982,7 +2090,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
cls_session = iscsi_alloc_session(shost, iscsit,
sizeof(struct iscsi_session));
if (!cls_session)
- return NULL;
+ goto dec_session_count;
session = cls_session->dd_data;
session->cls_session = cls_session;
session->host = shost;
@@ -2021,6 +2129,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
if (iscsi_add_session(cls_session, id))
goto cls_session_fail;
+
return cls_session;
cls_session_fail:
@@ -2029,6 +2138,8 @@ module_get_fail:
iscsi_pool_free(&session->cmdpool);
cmdpool_alloc_fail:
iscsi_free_session(cls_session);
+dec_session_count:
+ iscsi_host_dec_session_cnt(shost);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_session_setup);
@@ -2044,6 +2155,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
struct module *owner = cls_session->transport->owner;
+ struct Scsi_Host *shost = session->host;
iscsi_pool_free(&session->cmdpool);
@@ -2056,6 +2168,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->ifacename);
iscsi_destroy_session(cls_session);
+ iscsi_host_dec_session_cnt(shost);
module_put(owner);
}
EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -2335,8 +2448,10 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
* flush queues.
*/
spin_lock_bh(&session->lock);
- fail_all_commands(conn, -1,
- STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
+ if (flag == STOP_CONN_RECOVER)
+ fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED);
+ else
+ fail_all_commands(conn, -1, DID_ERROR);
flush_control_queues(session, conn);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e0e018d1265..60a9e6e9384 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -34,7 +34,14 @@ struct lpfc_sli2_slim;
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
-
+#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
+ queue depth change in millisecs */
+#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
+#define LPFC_MIN_TGT_QDEPTH 100
+#define LPFC_MAX_TGT_QDEPTH 0xFFFF
+
+#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
+ collection. */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
@@ -49,6 +56,9 @@ struct lpfc_sli2_slim;
#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
+/* Error Attention event polling interval */
+#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
+
/* Define macros for 64 bit support */
#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
@@ -60,6 +70,9 @@ struct lpfc_sli2_slim;
#define MAX_HBAEVT 32
+/* Number of MSI-X vectors the driver uses */
+#define LPFC_MSIX_VECTORS 2
+
/* lpfc wait event data ready flag */
#define LPFC_DATA_READY (1<<0)
@@ -357,6 +370,7 @@ struct lpfc_vport {
uint32_t cfg_log_verbose;
uint32_t cfg_max_luns;
uint32_t cfg_enable_da_id;
+ uint32_t cfg_max_scsicmpl_time;
uint32_t dev_loss_tmo_changed;
@@ -369,6 +383,8 @@ struct lpfc_vport {
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
+ uint8_t stat_data_enabled;
+ uint8_t stat_data_blocked;
};
struct hbq_s {
@@ -407,10 +423,11 @@ struct lpfc_hba {
struct lpfc_sli sli;
uint32_t sli_rev; /* SLI2 or SLI3 */
uint32_t sli3_options; /* Mask of enabled SLI3 options */
-#define LPFC_SLI3_ENABLED 0x01
-#define LPFC_SLI3_HBQ_ENABLED 0x02
-#define LPFC_SLI3_NPIV_ENABLED 0x04
-#define LPFC_SLI3_VPORT_TEARDOWN 0x08
+#define LPFC_SLI3_HBQ_ENABLED 0x01
+#define LPFC_SLI3_NPIV_ENABLED 0x02
+#define LPFC_SLI3_VPORT_TEARDOWN 0x04
+#define LPFC_SLI3_CRP_ENABLED 0x08
+#define LPFC_SLI3_INB_ENABLED 0x10
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@@ -422,10 +439,20 @@ struct lpfc_hba {
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
- struct lpfc_sli2_slim *slim2p;
- struct lpfc_dmabuf hbqslimp;
+ uint32_t hba_flag; /* hba generic flags */
+#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
+
+ struct lpfc_dmabuf slim2p;
- dma_addr_t slim2p_mapping;
+ MAILBOX_t *mbox;
+ uint32_t *inb_ha_copy;
+ uint32_t *inb_counter;
+ uint32_t inb_last_counter;
+ uint32_t ha_copy;
+ struct _PCB *pcb;
+ struct _IOCB *IOCBs;
+
+ struct lpfc_dmabuf hbqslimp;
uint16_t pci_cfg_value;
@@ -492,7 +519,7 @@ struct lpfc_hba {
wait_queue_head_t work_waitq;
struct task_struct *worker_thread;
- long data_flags;
+ unsigned long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
@@ -514,6 +541,7 @@ struct lpfc_hba {
void __iomem *HCregaddr; /* virtual address for host ctl reg */
struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
+ struct lpfc_pgp *port_gp;
uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
@@ -536,6 +564,7 @@ struct lpfc_hba {
uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
+ struct timer_list eratt_poll;
/*
* stat counters
@@ -565,7 +594,7 @@ struct lpfc_hba {
struct fc_host_statistics link_stats;
enum intr_type_t intr_type;
- struct msix_entry msix_entries[1];
+ struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
@@ -605,6 +634,7 @@ struct lpfc_hba {
unsigned long last_completion_time;
struct timer_list hb_tmofunc;
uint8_t hb_outstanding;
+ enum hba_temp_state over_temp_state;
/* ndlp reference management */
spinlock_t ndlp_lock;
/*
@@ -613,7 +643,19 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- enum hba_temp_state over_temp_state;
+ int wait_4_mlo_maint_flg;
+ wait_queue_head_t wait_4_mlo_m_q;
+ /* data structure used for latency data collection */
+#define LPFC_NO_BUCKET 0
+#define LPFC_LINEAR_BUCKET 1
+#define LPFC_POWER2_BUCKET 2
+ uint8_t bucket_type;
+ uint32_t bucket_base;
+ uint32_t bucket_step;
+
+/* Maximum number of events that can be outstanding at any time*/
+#define LPFC_MAX_EVT_COUNT 512
+ atomic_t fast_event_count;
};
static inline struct Scsi_Host *
@@ -650,15 +692,25 @@ lpfc_worker_wake_up(struct lpfc_hba *phba)
return;
}
-#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
-#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
- event */
+static inline void
+lpfc_sli_read_hs(struct lpfc_hba *phba)
+{
+ /*
+ * There was a link/board error. Read the status register to retrieve
+ * the error event and process it.
+ */
+ phba->sli.slistat.err_attn_event++;
+
+ /* Save status info */
+ phba->work_hs = readl(phba->HSregaddr);
+ phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
+ phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+
+ /* Clear chip Host Attention error bit */
+ writel(HA_ERATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ phba->pport->stopped = 1;
+
+ return;
+}
-struct temp_event {
- uint32_t event_type;
- uint32_t event_code;
- uint32_t data;
-};
-#define LPFC_CRIT_TEMP 0x1
-#define LPFC_THRESHOLD_TEMP 0x2
-#define LPFC_NORMAL_TEMP 0x3
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 37bfa0bd1da..aa3d6277581 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -32,6 +32,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -49,6 +50,21 @@
#define LPFC_LINK_SPEED_BITMAP 0x00000117
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
+/**
+ * lpfc_jedec_to_ascii: Hex to ascii convertor according to JEDEC rules.
+ * @incr: integer to convert.
+ * @hdw: ascii string holding converted integer plus a string terminator.
+ *
+ * Description:
+ * JEDEC Joint Electron Device Engineering Council.
+ * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
+ * character string. The string is then terminated with a NULL in byte 9.
+ * Hex 0-9 becomes ascii '0' to '9'.
+ * Hex a-f becomes ascii '=' to 'B' capital B.
+ *
+ * Notes:
+ * Coded for 32 bit integers only.
+ **/
static void
lpfc_jedec_to_ascii(int incr, char hdw[])
{
@@ -65,6 +81,14 @@ lpfc_jedec_to_ascii(int incr, char hdw[])
return;
}
+/**
+ * lpfc_drvr_version_show: Return the Emulex driver string with version number.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -72,6 +96,14 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
}
+/**
+ * lpfc_info_show: Return some pci info about the host in ascii.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text from lpfc_info().
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -81,6 +113,14 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
}
+/**
+ * lpfc_serialnum_show: Return the hba serial number in ascii.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text serial number.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -92,6 +132,18 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
}
+/**
+ * lpfc_temp_sensor_show: Return the temperature sensor level.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns a number indicating the temperature sensor level currently
+ * supported, zero or one in ascii.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -102,6 +154,14 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
}
+/**
+ * lpfc_modeldesc_show: Return the model description of the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model description.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -113,6 +173,14 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
}
+/**
+ * lpfc_modelname_show: Return the model name of the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model name.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -124,6 +192,14 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
}
+/**
+ * lpfc_programtype_show: Return the program type of the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -135,6 +211,33 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
}
+/**
+ * lpfc_mlomgmt_show: Return the Menlo Maintenance sli flag.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the Menlo Maintenance sli flag.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (phba->sli.sli_flag & LPFC_MENLO_MAINT));
+}
+
+/**
+ * lpfc_vportnum_show: Return the port number in ascii of the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -146,6 +249,14 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
}
+/**
+ * lpfc_fwrev_show: Return the firmware rev running in the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -159,6 +270,14 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
}
+/**
+ * lpfc_hdw_show: Return the jedec information about the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -171,6 +290,15 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
}
+
+/**
+ * lpfc_option_rom_version_show: Return the adapter ROM FCode version.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ROM and FCode ascii strings.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -181,6 +309,18 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
}
+
+/**
+ * lpfc_state_show: Return the link state of the port.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Notes:
+ * The switch statement has no default so zero will be returned.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -232,8 +372,10 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
"Unknown\n");
break;
}
-
- if (phba->fc_topology == TOPOLOGY_LOOP) {
+ if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Menlo Maint Mode\n");
+ else if (phba->fc_topology == TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += snprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
@@ -253,6 +395,18 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
return len;
}
+/**
+ * lpfc_num_discovered_ports_show: Return sum of mapped and unmapped vports.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the sum of fc mapped and unmapped.
+ *
+ * Description:
+ * Returns the ascii text number of the sum of the fc mapped and unmapped
+ * vport counts.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_num_discovered_ports_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -264,7 +418,20 @@ lpfc_num_discovered_ports_show(struct device *dev,
vport->fc_map_cnt + vport->fc_unmap_cnt);
}
-
+/**
+ * lpfc_issue_lip: Misnomer, name carried over from long ago.
+ * @shost: Scsi_Host pointer.
+ *
+ * Description:
+ * Bring the link down gracefully then re-init the link. The firmware will
+ * re-init the fiber channel interface as required. Does not issue a LIP.
+ *
+ * Returns:
+ * -EPERM port offline or management commands are being blocked
+ * -ENOMEM cannot allocate memory for the mailbox command
+ * -EIO error sending the mailbox command
+ * zero for success
+ **/
static int
lpfc_issue_lip(struct Scsi_Host *shost)
{
@@ -306,6 +473,21 @@ lpfc_issue_lip(struct Scsi_Host *shost)
return 0;
}
+/**
+ * lpfc_do_offline: Issues a mailbox command to bring the link down.
+ * @phba: lpfc_hba pointer.
+ * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ * Can wait up to 5 seconds for the port ring buffers count
+ * to reach zero, prints a warning if it is not zero and continues.
+ * lpfc_workq_post_event() returns a non-zero return coce if call fails.
+ *
+ * Returns:
+ * -EIO error posting the event
+ * zero for success
+ **/
static int
lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
{
@@ -353,6 +535,22 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
return 0;
}
+/**
+ * lpfc_selective_reset: Offline then onlines the port.
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * If the port is configured to allow a reset then the hba is brought
+ * offline then online.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ *
+ * Returns:
+ * lpfc_do_offline() return code if not zero
+ * -EIO reset not configured or error posting the event
+ * zero for success
+ **/
static int
lpfc_selective_reset(struct lpfc_hba *phba)
{
@@ -378,6 +576,27 @@ lpfc_selective_reset(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_issue_reset: Selectively resets an adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string "selective".
+ * @count: unused variable.
+ *
+ * Description:
+ * If the buf contains the string "selective" then lpfc_selective_reset()
+ * is called to perform the reset.
+ *
+ * Notes:
+ * Assumes any error from lpfc_selective_reset() will be negative.
+ * If lpfc_selective_reset() returns zero then the length of the buffer
+ * is returned which indicates succcess
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain the string "selective"
+ * length of buf if lpfc-selective_reset() if the call succeeds
+ * return value of lpfc_selective_reset() if the call fails
+**/
static ssize_t
lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -397,6 +616,14 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
return status;
}
+/**
+ * lpfc_nport_evt_cnt_show: Return the number of nport events.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ascii number of nport events.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -408,6 +635,14 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
}
+/**
+ * lpfc_board_mode_show: Return the state of the board.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the state of the adapter.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -429,6 +664,19 @@ lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n", state);
}
+/**
+ * lpfc_board_mode_store: Puts the hba in online, offline, warm or error state.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing one of the strings "online", "offline", "warm" or "error".
+ * @count: unused variable.
+ *
+ * Returns:
+ * -EACCES if enable hba reset not enabled
+ * -EINVAL if the buffer does not contain a valid string (see above)
+ * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
+ * buf length greater than zero indicates success
+ **/
static ssize_t
lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -462,6 +710,24 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
return -EIO;
}
+/**
+ * lpfc_get_hba_info: Return various bits of informaton about the adapter.
+ * @phba: pointer to the adapter structure.
+ * @mxri max xri count.
+ * @axri available xri count.
+ * @mrpi max rpi count.
+ * @arpi available rpi count.
+ * @mvpi max vpi count.
+ * @avpi available vpi count.
+ *
+ * Description:
+ * If an integer pointer for an count is not null then the value for the
+ * count is returned.
+ *
+ * Returns:
+ * zero on error
+ * one for success
+ **/
static int
lpfc_get_hba_info(struct lpfc_hba *phba,
uint32_t *mxri, uint32_t *axri,
@@ -524,6 +790,20 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
return 1;
}
+/**
+ * lpfc_max_rpi_show: Return maximum rpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -538,6 +818,20 @@ lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_used_rpi_show: Return maximum rpi minus available rpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the used rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -552,6 +846,20 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_max_xri_show: Return maximum xri.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -566,6 +874,20 @@ lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_used_xri_show: Return maximum xpi minus the available xpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -580,6 +902,20 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_max_vpi_show: Return maximum vpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -594,6 +930,20 @@ lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_used_vpi_show: Return maximum vpi minus the available vpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -608,6 +958,19 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_npiv_info_show: Return text about NPIV support for the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: text that must be interpreted to determine if npiv is supported.
+ *
+ * Description:
+ * Buffer will contain text indicating npiv is not suppoerted on the port,
+ * the port is an NPIV physical port, or it is an npiv virtual port with
+ * the id of the vport.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -623,6 +986,17 @@ lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
}
+/**
+ * lpfc_poll_show: Return text about poll support for the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the cfg_poll in hex.
+ *
+ * Notes:
+ * cfg_poll should be a lpfc_polling_flags type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_poll_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -634,6 +1008,20 @@ lpfc_poll_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
}
+/**
+ * lpfc_poll_store: Set the value of cfg_poll for the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Notes:
+ * buf contents converted to integer and checked for a valid value.
+ *
+ * Returns:
+ * -EINVAL if the buffer connot be converted or is out of range
+ * length of the buf on success
+ **/
static ssize_t
lpfc_poll_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -692,6 +1080,20 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
return strlen(buf);
}
+/**
+ * lpfc_param_show: Return a cfg attribute value in decimal.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show.
+ *
+ * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
#define lpfc_param_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -706,6 +1108,20 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
+/**
+ * lpfc_param_hex_show: Return a cfg attribute value in hex.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexidecimal.
+ *
+ * Returns: size of formatted string.
+ **/
#define lpfc_param_hex_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -720,6 +1136,25 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
+/**
+ * lpfc_param_init: Intializes a cfg attribute.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: Initializes an attribute.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Validates the min and max values then sets the adapter config field
+ * accordingly, or uses the default if out of range and prints an error message.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
#define lpfc_param_init(attr, default, minval, maxval) \
static int \
lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
@@ -735,6 +1170,26 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
return -EINVAL;\
}
+/**
+ * lpfc_param_set: Set a cfg attribute value.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description:
+ * Validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
#define lpfc_param_set(attr, default, minval, maxval) \
static int \
lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
@@ -749,6 +1204,27 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
return -EINVAL;\
}
+/**
+ * lpfc_param_store: Set a vport attribute value.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_store.
+ *
+ * lpfc_##attr##_store: Set an sttribute value.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the attribute value in ascii.
+ * @count: not used.
+ *
+ * Description:
+ * Convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
#define lpfc_param_store(attr) \
static ssize_t \
lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
@@ -768,6 +1244,20 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
return -EINVAL;\
}
+/**
+ * lpfc_vport_param_show: Return decimal formatted cfg attribute value.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in decimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: length of formatted string.
+ **/
#define lpfc_vport_param_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -780,6 +1270,21 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
+/**
+ * lpfc_vport_param_hex_show: Return hex formatted attribute value.
+ *
+ * Description:
+ * Macro that given an attr e.g.
+ * hba_queue_depth expands into a function with the name
+ * lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in hexidecimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexidecimal.
+ *
+ * Returns: length of formatted string.
+ **/
#define lpfc_vport_param_hex_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -792,6 +1297,24 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
+/**
+ * lpfc_vport_param_init: Initialize a vport cfg attribute.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: validates the min and max values then sets the
+ * adapter config field accordingly, or uses the default if out of range
+ * and prints an error message.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
#define lpfc_vport_param_init(attr, default, minval, maxval) \
static int \
lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
@@ -801,12 +1324,29 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
return 0;\
}\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
- "0449 lpfc_"#attr" attribute cannot be set to %d, "\
+ "0423 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
vport->cfg_##attr = default;\
return -EINVAL;\
}
+/**
+ * lpfc_vport_param_set: Set a vport cfg attribute.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
#define lpfc_vport_param_set(attr, default, minval, maxval) \
static int \
lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
@@ -816,11 +1356,28 @@ lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
return 0;\
}\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
- "0450 lpfc_"#attr" attribute cannot be set to %d, "\
+ "0424 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
return -EINVAL;\
}
+/**
+ * lpfc_vport_param_store: Set a vport attribute.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth
+ * expands into a function with the name lpfc_hba_queue_depth_store
+ *
+ * lpfc_##attr##_store: convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf: contains the attribute value in decimal.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
#define lpfc_vport_param_store(attr) \
static ssize_t \
lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
@@ -941,6 +1498,7 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
+static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
@@ -958,6 +1516,17 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+/**
+ * lpfc_soft_wwn_enable_store: Allows setting of the wwn if the key is valid.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string lpfc_soft_wwn_key.
+ * @count: must be size of lpfc_soft_wwn_key.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
+ * length of buf indicates success
+ **/
static ssize_t
lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -994,6 +1563,14 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
lpfc_soft_wwn_enable_store);
+/**
+ * lpfc_soft_wwpn_show: Return the cfg soft ww port name of the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwpn in hexidecimal.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1006,7 +1583,19 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
(unsigned long long)phba->cfg_soft_wwpn);
}
-
+/**
+ * lpfc_soft_wwpn_store: Set the ww port name of the adapter.
+ * @dev class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the wwpn in hexidecimal.
+ * @count: number of wwpn bytes in buf
+ *
+ * Returns:
+ * -EACCES hba reset not enabled, adapter over temp
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
+ * -EIO error taking adapter offline or online
+ * value of count on success
+ **/
static ssize_t
lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1080,6 +1669,14 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
+/**
+ * lpfc_soft_wwnn_show: Return the cfg soft ww node name for the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwnn in hexidecimal.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1090,7 +1687,16 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
(unsigned long long)phba->cfg_soft_wwnn);
}
-
+/**
+ * lpfc_soft_wwnn_store: sets the ww node name of the adapter.
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf: contains the ww node name in hexidecimal.
+ * @count: number of wwnn bytes in buf.
+ *
+ * Returns:
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
+ * value of count on success
+ **/
static ssize_t
lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1178,6 +1784,15 @@ module_param(lpfc_nodev_tmo, int, 0);
MODULE_PARM_DESC(lpfc_nodev_tmo,
"Seconds driver will hold I/O waiting "
"for a device to come back");
+
+/**
+ * lpfc_nodev_tmo_show: Return the hba dev loss timeout value.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the dev loss timeout in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1189,6 +1804,21 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
}
+/**
+ * lpfc_nodev_tmo_init: Set the hba nodev timeout value.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the nodev timeout value.
+ *
+ * Description:
+ * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
+ * a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
{
@@ -1196,7 +1826,7 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
if (val != LPFC_DEF_DEVLOSS_TMO)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0402 Ignoring nodev_tmo module "
+ "0407 Ignoring nodev_tmo module "
"parameter because devloss_tmo is "
"set.\n");
return 0;
@@ -1215,6 +1845,13 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
return -EINVAL;
}
+/**
+ * lpfc_update_rport_devloss_tmo: Update dev loss tmo value.
+ * @vport: lpfc vport structure pointer.
+ *
+ * Description:
+ * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
+ **/
static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{
@@ -1229,6 +1866,21 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_nodev_tmo_set: Set the vport nodev tmo and devloss tmo values.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If the devloss tmo is already set or the vport dev loss tmo has changed
+ * then a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
{
@@ -1269,6 +1921,21 @@ MODULE_PARM_DESC(lpfc_devloss_tmo,
lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
lpfc_vport_param_show(devloss_tmo)
+
+/**
+ * lpfc_devloss_tmo_set: Sets vport nodev tmo, devloss tmo values, changed bit.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If val is in a valid range then set the vport nodev tmo,
+ * devloss tmo, also set the vport dev loss tmo changed flag.
+ * Else a kernel error message is printed.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
{
@@ -1366,12 +2033,27 @@ MODULE_PARM_DESC(lpfc_restrict_login,
"Restrict virtual ports login to remote initiators.");
lpfc_vport_param_show(restrict_login);
+/**
+ * lpfc_restrict_login_init: Set the vport restrict login flag.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical clear the restrict login flag and return.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
{
if (val < 0 || val > 1) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0449 lpfc_restrict_login attribute cannot "
+ "0422 lpfc_restrict_login attribute cannot "
"be set to %d, allowed range is [0, 1]\n",
val);
vport->cfg_restrict_login = 1;
@@ -1385,12 +2067,28 @@ lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
return 0;
}
+/**
+ * lpfc_restrict_login_set: Set the vport restrict login flag.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical and the val is not zero log a kernel
+ * error message, clear the restrict login flag and return zero.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
{
if (val < 0 || val > 1) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0450 lpfc_restrict_login attribute cannot "
+ "0425 lpfc_restrict_login attribute cannot "
"be set to %d, allowed range is [0, 1]\n",
val);
vport->cfg_restrict_login = 1;
@@ -1441,6 +2139,23 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
# Default value is 0.
*/
+
+/**
+ * lpfc_topology_set: Set the adapters topology field.
+ * @phba: lpfc_hba pointer.
+ * @val: topology value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's topology field and
+ * issue a lip; if the lip fails reset the topology to the old value.
+ *
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
static int
lpfc_topology_set(struct lpfc_hba *phba, int val)
{
@@ -1469,6 +2184,335 @@ lpfc_param_store(topology)
static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
lpfc_topology_show, lpfc_topology_store);
+
+/**
+ * lpfc_stat_data_ctrl_store: write call back for lpfc_stat_data_ctrl
+ * sysfs file.
+ * @dev: Pointer to class device.
+ * @buf: Data buffer.
+ * @count: Size of the data buffer.
+ *
+ * This function get called when an user write to the lpfc_stat_data_ctrl
+ * sysfs file. This function parse the command written to the sysfs file
+ * and take appropriate action. These commands are used for controlling
+ * driver statistical data collection.
+ * Following are the command this function handles.
+ *
+ * setbucket <bucket_type> <base> <step>
+ * = Set the latency buckets.
+ * destroybucket = destroy all the buckets.
+ * start = start data collection
+ * stop = stop data collection
+ * reset = reset the collected data
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+#define LPFC_MAX_DATA_CTRL_LEN 1024
+ static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
+ unsigned long i;
+ char *str_ptr, *token;
+ struct lpfc_vport **vports;
+ struct Scsi_Host *v_shost;
+ char *bucket_type_str, *base_str, *step_str;
+ unsigned long base, step, bucket_type;
+
+ if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
+ if (strlen(buf) > LPFC_MAX_DATA_CTRL_LEN)
+ return -EINVAL;
+
+ strcpy(bucket_data, buf);
+ str_ptr = &bucket_data[0];
+ /* Ignore this token - this is command token */
+ token = strsep(&str_ptr, "\t ");
+ if (!token)
+ return -EINVAL;
+
+ bucket_type_str = strsep(&str_ptr, "\t ");
+ if (!bucket_type_str)
+ return -EINVAL;
+
+ if (!strncmp(bucket_type_str, "linear", strlen("linear")))
+ bucket_type = LPFC_LINEAR_BUCKET;
+ else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
+ bucket_type = LPFC_POWER2_BUCKET;
+ else
+ return -EINVAL;
+
+ base_str = strsep(&str_ptr, "\t ");
+ if (!base_str)
+ return -EINVAL;
+ base = simple_strtoul(base_str, NULL, 0);
+
+ step_str = strsep(&str_ptr, "\t ");
+ if (!step_str)
+ return -EINVAL;
+ step = simple_strtoul(step_str, NULL, 0);
+ if (!step)
+ return -EINVAL;
+
+ /* Block the data collection for every vport */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ v_shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(v_shost->host_lock);
+ /* Block and reset data collection */
+ vports[i]->stat_data_blocked = 1;
+ if (vports[i]->stat_data_enabled)
+ lpfc_vport_reset_stat_data(vports[i]);
+ spin_unlock_irq(v_shost->host_lock);
+ }
+
+ /* Set the bucket attributes */
+ phba->bucket_type = bucket_type;
+ phba->bucket_base = base;
+ phba->bucket_step = step;
+
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ v_shost = lpfc_shost_from_vport(vports[i]);
+
+ /* Unblock data collection */
+ spin_lock_irq(v_shost->host_lock);
+ vports[i]->stat_data_blocked = 0;
+ spin_unlock_irq(v_shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ v_shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ vports[i]->stat_data_blocked = 1;
+ lpfc_free_bucket(vport);
+ vport->stat_data_enabled = 0;
+ vports[i]->stat_data_blocked = 0;
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ phba->bucket_type = LPFC_NO_BUCKET;
+ phba->bucket_base = 0;
+ phba->bucket_step = 0;
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "start", strlen("start"))) {
+ /* If no buckets configured return error */
+ if (phba->bucket_type == LPFC_NO_BUCKET)
+ return -EINVAL;
+ spin_lock_irq(shost->host_lock);
+ if (vport->stat_data_enabled) {
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+ lpfc_alloc_bucket(vport);
+ vport->stat_data_enabled = 1;
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "stop", strlen("stop"))) {
+ spin_lock_irq(shost->host_lock);
+ if (vport->stat_data_enabled == 0) {
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+ lpfc_free_bucket(vport);
+ vport->stat_data_enabled = 0;
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "reset", strlen("reset"))) {
+ if ((phba->bucket_type == LPFC_NO_BUCKET)
+ || !vport->stat_data_enabled)
+ return strlen(buf);
+ spin_lock_irq(shost->host_lock);
+ vport->stat_data_blocked = 1;
+ lpfc_vport_reset_stat_data(vport);
+ vport->stat_data_blocked = 0;
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+
+/**
+ * lpfc_stat_data_ctrl_show: Read callback function for
+ * lpfc_stat_data_ctrl sysfs file.
+ * @dev: Pointer to class device object.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_stat_data_ctrl sysfs file. This function report the
+ * current statistical data collection state.
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int index = 0;
+ int i;
+ char *bucket_type;
+ unsigned long bucket_value;
+
+ switch (phba->bucket_type) {
+ case LPFC_LINEAR_BUCKET:
+ bucket_type = "linear";
+ break;
+ case LPFC_POWER2_BUCKET:
+ bucket_type = "power2";
+ break;
+ default:
+ bucket_type = "No Bucket";
+ break;
+ }
+
+ sprintf(&buf[index], "Statistical Data enabled :%d, "
+ "blocked :%d, Bucket type :%s, Bucket base :%d,"
+ " Bucket step :%d\nLatency Ranges :",
+ vport->stat_data_enabled, vport->stat_data_blocked,
+ bucket_type, phba->bucket_base, phba->bucket_step);
+ index = strlen(buf);
+ if (phba->bucket_type != LPFC_NO_BUCKET) {
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+ if (phba->bucket_type == LPFC_LINEAR_BUCKET)
+ bucket_value = phba->bucket_base +
+ phba->bucket_step * i;
+ else
+ bucket_value = phba->bucket_base +
+ (1 << i) * phba->bucket_step;
+
+ if (index + 10 > PAGE_SIZE)
+ break;
+ sprintf(&buf[index], "%08ld ", bucket_value);
+ index = strlen(buf);
+ }
+ }
+ sprintf(&buf[index], "\n");
+ return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
+ lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store);
+
+/*
+ * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
+ */
+
+/*
+ * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
+ * for each target.
+ */
+#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
+#define MAX_STAT_DATA_SIZE_PER_TARGET \
+ STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
+
+
+/**
+ * sysfs_drvr_stat_data_read: Read callback function for lpfc_drvr_stat_data
+ * sysfs attribute.
+ * @kobj: Pointer to the kernel object
+ * @bin_attr: Attribute object
+ * @buff: Buffer pointer
+ * @off: File offset
+ * @count: Buffer size
+ *
+ * This function is the read call back function for lpfc_drvr_stat_data
+ * sysfs file. This function export the statistical data to user
+ * applications.
+ **/
+static ssize_t
+sysfs_drvr_stat_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int i = 0, index = 0;
+ unsigned long nport_index;
+ struct lpfc_nodelist *ndlp = NULL;
+ nport_index = (unsigned long)off /
+ MAX_STAT_DATA_SIZE_PER_TARGET;
+
+ if (!vport->stat_data_enabled || vport->stat_data_blocked
+ || (phba->bucket_type == LPFC_NO_BUCKET))
+ return 0;
+
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
+ continue;
+
+ if (nport_index > 0) {
+ nport_index--;
+ continue;
+ }
+
+ if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
+ > count)
+ break;
+
+ if (!ndlp->lat_data)
+ continue;
+
+ /* Print the WWN */
+ sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
+ ndlp->nlp_portname.u.wwn[0],
+ ndlp->nlp_portname.u.wwn[1],
+ ndlp->nlp_portname.u.wwn[2],
+ ndlp->nlp_portname.u.wwn[3],
+ ndlp->nlp_portname.u.wwn[4],
+ ndlp->nlp_portname.u.wwn[5],
+ ndlp->nlp_portname.u.wwn[6],
+ ndlp->nlp_portname.u.wwn[7]);
+
+ index = strlen(buf);
+
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+ sprintf(&buf[index], "%010u,",
+ ndlp->lat_data[i].cmd_count);
+ index = strlen(buf);
+ }
+ sprintf(&buf[index], "\n");
+ index = strlen(buf);
+ }
+ spin_unlock_irq(shost->host_lock);
+ return index;
+}
+
+static struct bin_attribute sysfs_drvr_stat_data_attr = {
+ .attr = {
+ .name = "lpfc_drvr_stat_data",
+ .mode = S_IRUSR,
+ .owner = THIS_MODULE,
+ },
+ .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
+ .read = sysfs_drvr_stat_data_read,
+ .write = NULL,
+};
+
/*
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
# connection.
@@ -1479,6 +2523,24 @@ static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
# 8 = 8 Gigabaud
# Value range is [0,8]. Default value is 0.
*/
+
+/**
+ * lpfc_link_speed_set: Set the adapters link speed.
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field and
+ * issue a lip; if the lip fails reset the link speed to the old value.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay.
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
static int
lpfc_link_speed_set(struct lpfc_hba *phba, int val)
{
@@ -1513,6 +2575,23 @@ static int lpfc_link_speed = 0;
module_param(lpfc_link_speed, int, 0);
MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)
+
+/**
+ * lpfc_link_speed_init: Set the adapters link speed.
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message, clear the link
+ * speed and return an error.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
static int
lpfc_link_speed_init(struct lpfc_hba *phba, int val)
{
@@ -1522,7 +2601,7 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0454 lpfc_link_speed attribute cannot "
+ "0405 lpfc_link_speed attribute cannot "
"be set to %d, allowed values are "
"["LPFC_LINK_SPEED_STRING"]\n", val);
phba->cfg_link_speed = 0;
@@ -1548,6 +2627,48 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
/*
+# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
+# depth. Default value is 0. When the value of this parameter is zero the
+# SCSI command completion time is not used for controlling I/O queue depth. When
+# the parameter is set to a non-zero value, the I/O queue depth is controlled
+# to limit the I/O completion time to the parameter value.
+# The value is set in milliseconds.
+*/
+static int lpfc_max_scsicmpl_time;
+module_param(lpfc_max_scsicmpl_time, int, 0);
+MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+ "Use command completion time to control queue depth");
+lpfc_vport_param_show(max_scsicmpl_time);
+lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
+static int
+lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ if (val == vport->cfg_max_scsicmpl_time)
+ return 0;
+ if ((val < 0) || (val > 60000))
+ return -EINVAL;
+ vport->cfg_max_scsicmpl_time = val;
+
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+ }
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+}
+lpfc_vport_param_store(max_scsicmpl_time);
+static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
+ lpfc_max_scsicmpl_time_show,
+ lpfc_max_scsicmpl_time_store);
+
+/*
# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
# range is [0,1]. Default value is 0.
*/
@@ -1623,12 +2744,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
-# 0 = MSI disabled (default)
+# 0 = MSI disabled
# 1 = MSI enabled
-# 2 = MSI-X enabled
-# Value range is [0,2]. Default value is 0.
+# 2 = MSI-X enabled (default)
+# Value range is [0,2]. Default value is 2.
*/
-LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
+LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
@@ -1668,6 +2789,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_option_rom_version,
&dev_attr_link_state,
&dev_attr_num_discovered_ports,
+ &dev_attr_menlo_mgmt_mode,
&dev_attr_lpfc_drvr_version,
&dev_attr_lpfc_temp_sensor,
&dev_attr_lpfc_log_verbose,
@@ -1709,6 +2831,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_enable_hba_reset,
&dev_attr_lpfc_enable_hba_heartbeat,
&dev_attr_lpfc_sg_seg_cnt,
+ &dev_attr_lpfc_max_scsicmpl_time,
+ &dev_attr_lpfc_stat_data_ctrl,
NULL,
};
@@ -1731,9 +2855,29 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_nport_evt_cnt,
&dev_attr_npiv_info,
&dev_attr_lpfc_enable_da_id,
+ &dev_attr_lpfc_max_scsicmpl_time,
+ &dev_attr_lpfc_stat_data_ctrl,
NULL,
};
+/**
+ * sysfs_ctlreg_write: Write method for writing to ctlreg.
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to send buf contents to the adapter.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * -EPERM adapter is offline
+ * value of count, buf contents written
+ **/
static ssize_t
sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -1766,6 +2910,23 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
return count;
}
+/**
+ * sysfs_ctlreg_read: Read method for reading from ctlreg.
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: if succesful contains the data from the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to read data into buf.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * value of count, buf contents read
+ **/
static ssize_t
sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -1810,7 +2971,10 @@ static struct bin_attribute sysfs_ctlreg_attr = {
.write = sysfs_ctlreg_write,
};
-
+/**
+ * sysfs_mbox_idle: frees the sysfs mailbox.
+ * @phba: lpfc_hba pointer
+ **/
static void
sysfs_mbox_idle(struct lpfc_hba *phba)
{
@@ -1824,6 +2988,27 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
}
}
+/**
+ * sysfs_mbox_write: Write method for writing information via mbox.
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/mbox.
+ * Uses the sysfs mbox to send buf contents to the adapter.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * zero if count is zero
+ * -EPERM adapter is offline
+ * -ENOMEM failed to allocate memory for the mail box
+ * -EAGAIN offset, state or mbox is NULL
+ * count number of bytes transferred
+ **/
static ssize_t
sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -1878,6 +3063,29 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
return count;
}
+/**
+ * sysfs_mbox_read: Read method for reading information via mbox.
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be read from sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/mbox.
+ * Uses the sysfs mbox to receive data from to the adapter.
+ *
+ * Returns:
+ * -ERANGE off greater than mailbox command size
+ * -EINVAL off, count or buff address invalid
+ * zero if off and count are zero
+ * -EACCES adapter over temp
+ * -EPERM garbage can value to catch a multitude of errors
+ * -EAGAIN management IO not permitted, state or off error
+ * -ETIME mailbox timeout
+ * -ENODEV mailbox error
+ * count number of bytes transferred
+ **/
static ssize_t
sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -1954,6 +3162,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
case MBX_DEL_LD_ENTRY:
case MBX_SET_VARIABLE:
case MBX_WRITE_WWN:
+ case MBX_PORT_CAPABILITIES:
+ case MBX_PORT_IOV_CONTROL:
break;
case MBX_READ_SPARM64:
case MBX_READ_LA:
@@ -1978,17 +3188,15 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
/* If HBA encountered an error attention, allow only DUMP
* or RESTART mailbox commands until the HBA is restarted.
*/
- if ((phba->pport->stopped) &&
- (phba->sysfs_mbox.mbox->mb.mbxCommand !=
- MBX_DUMP_MEMORY &&
- phba->sysfs_mbox.mbox->mb.mbxCommand !=
- MBX_RESTART &&
- phba->sysfs_mbox.mbox->mb.mbxCommand !=
- MBX_WRITE_VPARMS)) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
+ if (phba->pport->stopped &&
+ phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
+ phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
+ phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
+ phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "1259 mbox: Issued mailbox cmd "
+ "0x%x while in stopped state.\n",
+ phba->sysfs_mbox.mbox->mb.mbxCommand);
phba->sysfs_mbox.mbox->vport = vport;
@@ -2059,6 +3267,14 @@ static struct bin_attribute sysfs_mbox_attr = {
.write = sysfs_mbox_write,
};
+/**
+ * lpfc_alloc_sysfs_attr: Creates the ctlreg and mbox entries.
+ * @vport: address of lpfc vport structure.
+ *
+ * Return codes:
+ * zero on success
+ * error return code from sysfs_create_bin_file()
+ **/
int
lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
{
@@ -2075,18 +3291,30 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
if (error)
goto out_remove_ctlreg_attr;
+ error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+ &sysfs_drvr_stat_data_attr);
+ if (error)
+ goto out_remove_mbox_attr;
+
return 0;
+out_remove_mbox_attr:
+ sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
out:
return error;
}
+/**
+ * lpfc_free_sysfs_attr: Removes the ctlreg and mbox entries.
+ * @vport: address of lpfc vport structure.
+ **/
void
lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
+ sysfs_remove_bin_file(&shost->shost_dev.kobj,
+ &sysfs_drvr_stat_data_attr);
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
}
@@ -2096,6 +3324,10 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
* Dynamic FC Host Attributes Support
*/
+/**
+ * lpfc_get_host_port_id: Copy the vport DID into the scsi host port id.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_port_id(struct Scsi_Host *shost)
{
@@ -2105,6 +3337,10 @@ lpfc_get_host_port_id(struct Scsi_Host *shost)
fc_host_port_id(shost) = vport->fc_myDID;
}
+/**
+ * lpfc_get_host_port_type: Set the value of the scsi host port type.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_port_type(struct Scsi_Host *shost)
{
@@ -2133,6 +3369,10 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_get_host_port_state: Set the value of the scsi host port state.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_port_state(struct Scsi_Host *shost)
{
@@ -2167,6 +3407,10 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_get_host_speed: Set the value of the scsi host speed.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_speed(struct Scsi_Host *shost)
{
@@ -2199,6 +3443,10 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_get_host_fabric_name: Set the value of the scsi host fabric name.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_fabric_name (struct Scsi_Host *shost)
{
@@ -2221,6 +3469,18 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
fc_host_fabric_name(shost) = node_name;
}
+/**
+ * lpfc_get_stats: Return statistical information about the adapter.
+ * @shost: kernel scsi host pointer.
+ *
+ * Notes:
+ * NULL on error for link down, no mbox pool, sli2 active,
+ * management not allowed, memory allocation error, or mbox error.
+ *
+ * Returns:
+ * NULL for error
+ * address of the adapter host statistics
+ **/
static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host *shost)
{
@@ -2334,6 +3594,10 @@ lpfc_get_stats(struct Scsi_Host *shost)
return hs;
}
+/**
+ * lpfc_reset_stats: Copy the adapter link stats information.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_reset_stats(struct Scsi_Host *shost)
{
@@ -2411,6 +3675,14 @@ lpfc_reset_stats(struct Scsi_Host *shost)
* are no sysfs handlers for link_down_tmo.
*/
+/**
+ * lpfc_get_node_by_target: Return the nodelist for a target.
+ * @starget: kernel scsi target pointer.
+ *
+ * Returns:
+ * address of the node list if found
+ * NULL target not found
+ **/
static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target *starget)
{
@@ -2432,6 +3704,10 @@ lpfc_get_node_by_target(struct scsi_target *starget)
return NULL;
}
+/**
+ * lpfc_get_starget_port_id: Set the target port id to the ndlp DID or -1.
+ * @starget: kernel scsi target pointer.
+ **/
static void
lpfc_get_starget_port_id(struct scsi_target *starget)
{
@@ -2440,6 +3716,12 @@ lpfc_get_starget_port_id(struct scsi_target *starget)
fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
}
+/**
+ * lpfc_get_starget_node_name: Set the target node name.
+ * @starget: kernel scsi target pointer.
+ *
+ * Description: Set the target node name to the ndlp node name wwn or zero.
+ **/
static void
lpfc_get_starget_node_name(struct scsi_target *starget)
{
@@ -2449,6 +3731,12 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
}
+/**
+ * lpfc_get_starget_port_name: Set the target port name.
+ * @starget: kernel scsi target pointer.
+ *
+ * Description: set the target port name to the ndlp port name wwn or zero.
+ **/
static void
lpfc_get_starget_port_name(struct scsi_target *starget)
{
@@ -2458,6 +3746,15 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
}
+/**
+ * lpfc_set_rport_loss_tmo: Set the rport dev loss tmo.
+ * @rport: fc rport address.
+ * @timeout: new value for dev loss tmo.
+ *
+ * Description:
+ * If timeout is non zero set the dev_loss_tmo to timeout, else set
+ * dev_loss_tmo to one.
+ **/
static void
lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
@@ -2467,7 +3764,18 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
rport->dev_loss_tmo = 1;
}
-
+/**
+ * lpfc_rport_show_function: Return rport target information.
+ *
+ * Description:
+ * Macro that uses field to generate a function with the name lpfc_show_rport_
+ *
+ * lpfc_show_rport_##field: returns the bytes formatted in buf
+ * @cdev: class converted to an fc_rport.
+ * @buf: on return contains the target_field or zero.
+ *
+ * Returns: size of formatted string.
+ **/
#define lpfc_rport_show_function(field, format_string, sz, cast) \
static ssize_t \
lpfc_show_rport_##field (struct device *dev, \
@@ -2602,6 +3910,10 @@ struct fc_function_template lpfc_vport_transport_functions = {
.vport_disable = lpfc_vport_disable,
};
+/**
+ * lpfc_get_cfgparam: Used during probe_one to init the adapter structure.
+ * @phba: lpfc_hba pointer.
+ **/
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
@@ -2637,6 +3949,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
return;
}
+/**
+ * lpfc_get_vport_cfgparam: Used during port create, init the vport structure.
+ * @vport: lpfc_vport pointer.
+ **/
void
lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
{
@@ -2648,6 +3964,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
lpfc_restrict_login_init(vport, lpfc_restrict_login);
lpfc_fcp_class_init(vport, lpfc_fcp_class);
lpfc_use_adisc_init(vport, lpfc_use_adisc);
+ lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
lpfc_max_luns_init(vport, lpfc_max_luns);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 1b8245213b8..044ef4057d2 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
+typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
@@ -26,11 +26,11 @@ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
- struct lpfc_dmabuf *mp);
+int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport);
+void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -43,7 +43,7 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
-void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
+void lpfc_cleanup_rpis(struct lpfc_vport *, int);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -135,7 +135,7 @@ void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
-void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport);
+void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *);
@@ -155,6 +155,8 @@ int lpfc_sli_queue_setup(struct lpfc_hba *);
void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *);
irqreturn_t lpfc_intr_handler(int, void *);
+irqreturn_t lpfc_sp_intr_handler(int, void *);
+irqreturn_t lpfc_fp_intr_handler(int, void *);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -175,11 +177,12 @@ void lpfc_mem_free(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
void lpfc_poll_timeout(unsigned long ptr);
-void lpfc_poll_start_timer(struct lpfc_hba * phba);
-void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
+void lpfc_poll_start_timer(struct lpfc_hba *);
+void lpfc_poll_eratt(unsigned long);
+void lpfc_sli_poll_fcp_ring(struct lpfc_hba *);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
-void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
-uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
+void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
+uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_reset_barrier(struct lpfc_hba * phba);
int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -187,11 +190,13 @@ int lpfc_sli_brdkill(struct lpfc_hba *);
int lpfc_sli_brdreset(struct lpfc_hba *);
int lpfc_sli_brdrestart(struct lpfc_hba *);
int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_config_port(struct lpfc_hba *, int);
int lpfc_sli_host_down(struct lpfc_vport *);
int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
+int lpfc_sli_check_eratt(struct lpfc_hba *);
int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -199,6 +204,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@@ -226,17 +232,13 @@ struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
struct lpfc_name *);
-int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
- uint32_t timeout);
+int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
-int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * piocb,
- struct lpfc_iocbq * prspiocbq,
- uint32_t timeout);
-void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb);
+int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *, struct lpfc_iocbq *,
+ uint32_t);
+void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
@@ -269,7 +271,7 @@ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *);
int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
-void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
+int lpfc_mbx_unreg_vpi(struct lpfc_vport *);
void destroy_port(struct lpfc_vport *);
int lpfc_get_instance(void);
void lpfc_host_attrib_init(struct Scsi_Host *);
@@ -290,6 +292,13 @@ void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_adjust_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
+void lpfc_scsi_dev_block(struct lpfc_hba *);
+
+void
+lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
+void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7fc74cf5823..26dae8bae2d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -34,6 +34,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -134,25 +135,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
list_del(&head);
} else {
- struct lpfc_iocbq *next;
-
- list_for_each_entry_safe(iocbq, next, &piocbq->list, list) {
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &piocbq->list);
+ list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb;
if (icmd->ulpBdeCount == 0)
- lpfc_ct_unsol_buffer(phba, piocbq, NULL, 0);
+ lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
for (i = 0; i < icmd->ulpBdeCount; i++) {
paddr = getPaddr(icmd->un.cont64[i].addrHigh,
icmd->un.cont64[i].addrLow);
mp = lpfc_sli_ringpostbuf_get(phba, pring,
paddr);
size = icmd->un.cont64[i].tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
+ lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
}
- list_del(&iocbq->list);
- lpfc_sli_release_iocbq(phba, iocbq);
lpfc_post_buffer(phba, pring, i);
}
+ list_del(&head);
}
}
@@ -212,7 +212,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
else
list_add_tail(&mp->list, &mlist->list);
- bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
/* build buffer ptr list for IOCB */
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
@@ -283,7 +283,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->un.genreq64.bdl.ulpIoTag32 = 0;
icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
if (usr_flg)
@@ -861,7 +861,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry++;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0216 Retrying NS cmd %x\n", cmdcode);
+ "0250 Retrying NS cmd %x\n", cmdcode);
rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
if (rc == 0)
goto out;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 094b47e94b2..771920bdde4 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -35,6 +35,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -46,13 +47,14 @@
#include "lpfc_debugfs.h"
#ifdef CONFIG_LPFC_DEBUG_FS
-/* debugfs interface
+/**
+ * debugfs interface
*
* To access this interface the user should:
* # mkdir /debug
* # mount -t debugfs none /debug
*
- * The lpfc debugfs directory hierachy is:
+ * The lpfc debugfs directory hierarchy is:
* lpfc/lpfcX/vportY
* where X is the lpfc hba unique_id
* where Y is the vport VPI on that hba
@@ -61,14 +63,21 @@
* discovery_trace
* This is an ACSII readable file that contains a trace of the last
* lpfc_debugfs_max_disc_trc events that happened on a specific vport.
- * See lpfc_debugfs.h for different categories of
- * discovery events. To enable the discovery trace, the following
- * module parameters must be set:
+ * See lpfc_debugfs.h for different categories of discovery events.
+ * To enable the discovery trace, the following module parameters must be set:
* lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
* lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
* EACH vport. X MUST also be a power of 2.
* lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
* lpfc_debugfs.h .
+ *
+ * slow_ring_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA.
+ * To enable the slow ring trace, the following module parameters must be set:
+ * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for
+ * the HBA. X MUST also be a power of 2.
*/
static int lpfc_debugfs_enable = 1;
module_param(lpfc_debugfs_enable, int, 0);
@@ -117,6 +126,25 @@ struct lpfc_debug {
static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
static unsigned long lpfc_debugfs_start_time = 0L;
+/**
+ * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer.
+ * @vport: The vport to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc discovery debugfs data from the @vport and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Discovery logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
{
@@ -125,7 +153,6 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_debugfs_trc *dtp;
char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
-
enable = lpfc_debugfs_enable;
lpfc_debugfs_enable = 0;
@@ -159,6 +186,25 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
return len;
}
+/**
+ * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer.
+ * @phba: The HBA to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc slow ring debugfs data from the @phba and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Slow ring logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
{
@@ -203,6 +249,25 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
static int lpfc_debugfs_last_hbq = -1;
+/**
+ * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer.
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the host buffer queue info from the @phba to @buf up to
+ * @size number of bytes. A header that describes the current hbq state will be
+ * dumped to @buf first and then info on each hbq entry will be dumped to @buf
+ * until @size bytes have been dumped or all the hbq info has been dumped.
+ *
+ * Notes:
+ * This routine will rotate through each configured HBQ each time called.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
{
@@ -303,6 +368,24 @@ skipit:
static int lpfc_debugfs_last_hba_slim_off;
+/**
+ * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer.
+ * @phba: The HBA to gather SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of HBA SLIM for the HBA associated
+ * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data.
+ *
+ * Notes:
+ * This routine will only dump up to 1024 bytes of data each time called and
+ * should be called multiple times to dump the entire HBA SLIM.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
{
@@ -342,6 +425,21 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
return len;
}
+/**
+ * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer.
+ * @phba: The HBA to gather Host SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of host SLIM for the host associated
+ * with @phba to @buf up to @size bytes of data. The dump will contain the
+ * Mailbox, PCB, Rings, and Registers that are located in host memory.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
{
@@ -357,7 +455,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
spin_lock_irq(&phba->hbalock);
len += snprintf(buf+len, size-len, "SLIM Mailbox\n");
- ptr = (uint32_t *)phba->slim2p;
+ ptr = (uint32_t *)phba->slim2p.virt;
i = sizeof(MAILBOX_t);
while (i > 0) {
len += snprintf(buf+len, size-len,
@@ -370,7 +468,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
}
len += snprintf(buf+len, size-len, "SLIM PCB\n");
- ptr = (uint32_t *)&phba->slim2p->pcb;
+ ptr = (uint32_t *)phba->pcb;
i = sizeof(PCB_t);
while (i > 0) {
len += snprintf(buf+len, size-len,
@@ -382,44 +480,16 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
off += (8 * sizeof(uint32_t));
}
- pgpp = (struct lpfc_pgp *)&phba->slim2p->mbx.us.s3_pgp.port;
- pring = &psli->ring[0];
- len += snprintf(buf+len, size-len,
- "Ring 0: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
- "RSP PutInx:%d Max:%d\n",
- pgpp->cmdGetInx, pring->numCiocb,
- pring->next_cmdidx, pring->local_getidx, pring->flag,
- pgpp->rspPutInx, pring->numRiocb);
- pgpp++;
-
- pring = &psli->ring[1];
- len += snprintf(buf+len, size-len,
- "Ring 1: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
- "RSP PutInx:%d Max:%d\n",
- pgpp->cmdGetInx, pring->numCiocb,
- pring->next_cmdidx, pring->local_getidx, pring->flag,
- pgpp->rspPutInx, pring->numRiocb);
- pgpp++;
-
- pring = &psli->ring[2];
- len += snprintf(buf+len, size-len,
- "Ring 2: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
- "RSP PutInx:%d Max:%d\n",
- pgpp->cmdGetInx, pring->numCiocb,
- pring->next_cmdidx, pring->local_getidx, pring->flag,
- pgpp->rspPutInx, pring->numRiocb);
- pgpp++;
-
- pring = &psli->ring[3];
- len += snprintf(buf+len, size-len,
- "Ring 3: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
- "RSP PutInx:%d Max:%d\n",
- pgpp->cmdGetInx, pring->numCiocb,
- pring->next_cmdidx, pring->local_getidx, pring->flag,
- pgpp->rspPutInx, pring->numRiocb);
-
-
- ptr = (uint32_t *)&phba->slim2p->mbx.us.s3_pgp.hbq_get;
+ for (i = 0; i < 4; i++) {
+ pgpp = &phba->port_gp[i];
+ pring = &psli->ring[i];
+ len += snprintf(buf+len, size-len,
+ "Ring %d: CMD GetInx:%d (Max:%d Next:%d "
+ "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
+ i, pgpp->cmdGetInx, pring->numCiocb,
+ pring->next_cmdidx, pring->local_getidx,
+ pring->flag, pgpp->rspPutInx, pring->numRiocb);
+ }
word0 = readl(phba->HAregaddr);
word1 = readl(phba->CAregaddr);
word2 = readl(phba->HSregaddr);
@@ -430,6 +500,21 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
return len;
}
+/**
+ * lpfc_debugfs_nodelist_data - Dump target node list to a buffer.
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current target node list associated with @vport to
+ * @buf up to @size bytes of data. Each node entry in the dump will contain a
+ * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
{
@@ -513,7 +598,22 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
}
#endif
-
+/**
+ * lpfc_debugfs_disc_trc - Store discovery trace log.
+ * @vport: The vport to associate this trace string with for retrieval.
+ * @mask: Log entry classification.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. Only entries with a @mask that
+ * match the current debugfs discovery mask will be saved. Entries that do not
+ * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like
+ * printf when displaying the log.
+ **/
inline void
lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
@@ -542,6 +642,19 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
return;
}
+/**
+ * lpfc_debugfs_slow_ring_trc - Store slow ring trace log.
+ * @phba: The phba to associate this trace string with for retrieval.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and
+ * @data3 are used like printf when displaying the log.
+ **/
inline void
lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
@@ -568,6 +681,21 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
}
#ifdef CONFIG_LPFC_DEBUG_FS
+/**
+ * lpfc_debugfs_disc_trc_open - Open the discovery trace log.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
{
@@ -585,7 +713,7 @@ lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
size = PAGE_ALIGN(size);
@@ -603,6 +731,21 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
{
@@ -620,7 +763,7 @@ lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
size = PAGE_ALIGN(size);
@@ -638,6 +781,21 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
{
@@ -649,7 +807,7 @@ lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@@ -665,6 +823,21 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
{
@@ -676,7 +849,7 @@ lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@@ -692,6 +865,21 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
{
@@ -703,7 +891,7 @@ lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@@ -719,6 +907,21 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
{
@@ -730,7 +933,7 @@ lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@@ -746,6 +949,23 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_lseek - Seek through a debugfs file.
+ * @file: The file pointer to seek through.
+ * @off: The offset to seek to or the amount to seek by.
+ * @whence: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation. The
+ * @whence parameter indicates whether @off is the offset to directly seek to,
+ * or if it is a value to seek forward or reverse by. This function figures out
+ * what the new offset of the debugfs file will be and assigns that value to the
+ * f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ **/
static loff_t
lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
{
@@ -767,6 +987,22 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
}
+/**
+ * lpfc_debugfs_read - Read a debugfs file.
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from from the buffer indicated in the private_data
+ * field of @file. It will start reading at @ppos and copy up to @nbytes of
+ * data to @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
static ssize_t
lpfc_debugfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
@@ -776,6 +1012,18 @@ lpfc_debugfs_read(struct file *file, char __user *buf,
debug->len);
}
+/**
+ * lpfc_debugfs_release - Release the buffer used to store debugfs file data.
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file was
+ * opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
static int
lpfc_debugfs_release(struct inode *inode, struct file *file)
{
@@ -845,6 +1093,16 @@ static struct dentry *lpfc_debugfs_root = NULL;
static atomic_t lpfc_debugfs_hba_count;
#endif
+/**
+ * lpfc_debugfs_initialize - Initialize debugfs for a vport.
+ * @vport: The vport pointer to initialize.
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the lpfc debugfs file system.
+ * If not already created, this routine will create the lpfc directory, and
+ * lpfcX directory (for this HBA), and vportX directory for this vport. It will
+ * also create each file used to access lpfc specific debugfs information.
+ **/
inline void
lpfc_debugfs_initialize(struct lpfc_vport *vport)
{
@@ -862,7 +1120,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
atomic_set(&lpfc_debugfs_hba_count, 0);
if (!lpfc_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs root\n");
+ "0408 Cannot create debugfs root\n");
goto debug_failed;
}
}
@@ -876,7 +1134,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_dir(name, lpfc_debugfs_root);
if (!phba->hba_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs hba\n");
+ "0412 Cannot create debugfs hba\n");
goto debug_failed;
}
atomic_inc(&lpfc_debugfs_hba_count);
@@ -890,7 +1148,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_hbqinfo);
if (!phba->debug_hbqinfo) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs hbqinfo\n");
+ "0411 Cannot create debugfs hbqinfo\n");
goto debug_failed;
}
@@ -902,7 +1160,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_dumpHBASlim);
if (!phba->debug_dumpHBASlim) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs dumpHBASlim\n");
+ "0413 Cannot create debugfs dumpHBASlim\n");
goto debug_failed;
}
@@ -914,7 +1172,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_dumpHostSlim);
if (!phba->debug_dumpHostSlim) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs dumpHostSlim\n");
+ "0414 Cannot create debugfs dumpHostSlim\n");
goto debug_failed;
}
@@ -944,7 +1202,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_slow_ring_trc);
if (!phba->debug_slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs "
+ "0415 Cannot create debugfs "
"slow_ring_trace\n");
goto debug_failed;
}
@@ -955,7 +1213,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
GFP_KERNEL);
if (!phba->slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs "
+ "0416 Cannot create debugfs "
"slow_ring buffer\n");
goto debug_failed;
}
@@ -972,7 +1230,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_dir(name, phba->hba_debugfs_root);
if (!vport->vport_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cant create debugfs");
+ "0417 Cant create debugfs");
goto debug_failed;
}
atomic_inc(&phba->debugfs_vport_count);
@@ -1001,7 +1259,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
if (!vport->disc_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs disc trace "
+ "0418 Cannot create debugfs disc trace "
"buffer\n");
goto debug_failed;
}
@@ -1014,7 +1272,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
vport, &lpfc_debugfs_op_disc_trc);
if (!vport->debug_disc_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs "
+ "0419 Cannot create debugfs "
"discovery_trace\n");
goto debug_failed;
}
@@ -1033,7 +1291,17 @@ debug_failed:
#endif
}
-
+/**
+ * lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport.
+ * @vport: The vport pointer to remove from debugfs.
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system elements
+ * that are specific to this vport. It also checks to see if there are any
+ * users left for the debugfs directories associated with the HBA and driver. If
+ * this is the last user of the HBA directory or driver directory then it will
+ * remove those from the debugfs infrastructure as well.
+ **/
inline void
lpfc_debugfs_terminate(struct lpfc_vport *vport)
{
@@ -1096,5 +1364,3 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
#endif
return;
}
-
-
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 2db0b74b6fa..f29e548a90d 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -37,6 +37,7 @@ enum lpfc_work_type {
LPFC_EVT_KILL,
LPFC_EVT_ELS_RETRY,
LPFC_EVT_DEV_LOSS,
+ LPFC_EVT_FASTPATH_MGMT_EVT,
};
/* structure used to queue event to the discovery tasklet */
@@ -47,6 +48,24 @@ struct lpfc_work_evt {
enum lpfc_work_type evt;
};
+struct lpfc_scsi_check_condition_event;
+struct lpfc_scsi_varqueuedepth_event;
+struct lpfc_scsi_event_header;
+struct lpfc_fabric_event_header;
+struct lpfc_fcprdchkerr_event;
+
+/* structure used for sending events from fast path */
+struct lpfc_fast_path_event {
+ struct lpfc_work_evt work_evt;
+ struct lpfc_vport *vport;
+ union {
+ struct lpfc_scsi_check_condition_event check_cond_evt;
+ struct lpfc_scsi_varqueuedepth_event queue_depth_evt;
+ struct lpfc_scsi_event_header scsi_evt;
+ struct lpfc_fabric_event_header fabric_evt;
+ struct lpfc_fcprdchkerr_event read_check_error;
+ } un;
+};
struct lpfc_nodelist {
struct list_head nlp_listp;
@@ -88,6 +107,10 @@ struct lpfc_nodelist {
unsigned long last_ramp_up_time; /* jiffy of last ramp up */
unsigned long last_q_full_time; /* jiffy of last queue full */
struct kref kref;
+ atomic_t cmd_pending;
+ uint32_t cmd_qdepth;
+ unsigned long last_change_time;
+ struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
};
/* Defines for nlp_flag (uint32) */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f54e0f7eaee..630bd28fb99 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -53,6 +54,28 @@ static void lpfc_register_new_vport(struct lpfc_hba *phba,
static int lpfc_max_els_tries = 3;
+/**
+ * lpfc_els_chk_latt: Check host link attention event for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there is an outstanding host link
+ * attention event during the discovery process with the @vport. It is done
+ * by reading the HBA's Host Attention (HA) register. If there is any host
+ * link attention events during this @vport's discovery process, the @vport
+ * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
+ * be issued if the link state is not already in host link cleared state,
+ * and a return code shall indicate whether the host link attention event
+ * had happened.
+ *
+ * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
+ * state in LPFC_VPORT_READY, the request for checking host link attention
+ * event will be ignored and a return code shall indicate no host link
+ * attention event had happened.
+ *
+ * Return codes
+ * 0 - no host link attention event happened
+ * 1 - host link attention event happened
+ **/
int
lpfc_els_chk_latt(struct lpfc_vport *vport)
{
@@ -92,6 +115,34 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
return 1;
}
+/**
+ * lpfc_prep_els_iocb: Allocate and prepare a lpfc iocb data structure.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @expectRsp: flag indicating whether response is expected.
+ * @cmdSize: size of the ELS command.
+ * @retry: number of retries to the command IOCB when it fails.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: destination identifier.
+ * @elscmd: the ELS command code.
+ *
+ * This routine is used for allocating a lpfc-IOCB data structure from
+ * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
+ * passed into the routine for discovery state machine to issue an Extended
+ * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
+ * and preparation routine that is used by all the discovery state machine
+ * routines and the ELS command-specific fields will be later set up by
+ * the individual discovery machine routines after calling this routine
+ * allocating and preparing a generic IOCB data structure. It fills in the
+ * Buffer Descriptor Entries (BDEs), allocates buffers for both command
+ * payload and response payload (if expected). The reference count on the
+ * ndlp is incremented by 1 and the reference to the ndlp is put into
+ * context1 of the IOCB data structure for this IOCB to hold the ndlp
+ * reference for the command's callback function to access later.
+ *
+ * Return code
+ * Pointer to the newly allocated/prepared els iocb data structure
+ * NULL - when els iocb data structure allocation/preparation failed
+ **/
static struct lpfc_iocbq *
lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
uint16_t cmdSize, uint8_t retry,
@@ -150,7 +201,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.elsreq64.remoteID = did; /* DID */
if (expectRsp) {
icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
@@ -185,7 +236,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
bpl->tus.f.bdeSize = FCELSSIZE;
- bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
}
@@ -233,6 +284,22 @@ els_iocb_free_pcmb_exit:
return NULL;
}
+/**
+ * lpfc_issue_fabric_reglogin: Issue fabric registration login for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a fabric registration login for a @vport. An
+ * active ndlp node with Fabric_DID must already exist for this @vport.
+ * The routine invokes two mailbox commands to carry out fabric registration
+ * login through the HBA firmware: the first mailbox command requests the
+ * HBA to perform link configuration for the @vport; and the second mailbox
+ * command requests the HBA to perform the actual fabric registration login
+ * with the @vport.
+ *
+ * Return code
+ * 0 - successfully issued fabric registration login for @vport
+ * -ENXIO -- failed to issue fabric registration login for @vport
+ **/
static int
lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
@@ -313,6 +380,26 @@ fail:
return -ENXIO;
}
+/**
+ * lpfc_cmpl_els_flogi_fabric: Completion function for flogi to a fabric port.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ * @irsp: pointer to the IOCB within the lpfc response IOCB.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into a fabric
+ * port in a fabric topology. It properly sets up the parameters to the @ndlp
+ * from the IOCB response. It also check the newly assigned N_Port ID to the
+ * @vport against the previously assigned N_Port ID. If it is different from
+ * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
+ * is invoked on all the remaining nodes with the @vport to unregister the
+ * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ * 0 - Success (currently, always return 0)
+ **/
static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp, IOCB_t *irsp)
@@ -387,7 +474,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
+ if (!NLP_CHK_NODE_ACT(np))
continue;
if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
@@ -416,9 +503,26 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
-/*
- * We FLOGIed into an NPort, initiate pt2pt protocol
- */
+/**
+ * lpfc_cmpl_els_flogi_nport: Completion function for flogi to an N_Port.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
+ * in a point-to-point topology. First, the @vport's N_Port Name is compared
+ * with the received N_Port Name: if the @vport's N_Port Name is greater than
+ * the received N_Port Name lexicographically, this node shall assign local
+ * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
+ * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
+ * this node shall just wait for the remote node to issue PLOGI and assign
+ * N_Port IDs.
+ *
+ * Return code
+ * 0 - Success
+ * -ENXIO - Fail
+ **/
static int
lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp)
@@ -516,6 +620,29 @@ fail:
return -ENXIO;
}
+/**
+ * lpfc_cmpl_els_flogi: Completion callback function for flogi.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the top-level completion callback function for issuing
+ * a Fabric Login (FLOGI) command. If the response IOCB reported error,
+ * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
+ * retry has been made (either immediately or delayed with lpfc_els_retry()
+ * returning 1), the command IOCB will be released and function returned.
+ * If the retry attempt has been given up (possibly reach the maximum
+ * number of retries), one additional decrement of ndlp reference shall be
+ * invoked before going out after releasing the command IOCB. This will
+ * actually release the remote node (Note, lpfc_els_free_iocb() will also
+ * invoke one decrement of ndlp reference count). If no error reported in
+ * the IOCB status, the command Port ID field is used to determine whether
+ * this is a point-to-point topology or a fabric topology: if the Port ID
+ * field is assigned, it is a fabric topology; otherwise, it is a
+ * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
+ * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
+ * specific topology completion conditions.
+ **/
static void
lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -618,6 +745,28 @@ out:
lpfc_els_free_iocb(phba, cmdiocb);
}
+/**
+ * lpfc_issue_els_flogi: Issue an flogi iocb command for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fabric Login (FLOGI) Request ELS command
+ * for a @vport. The initiator service parameters are put into the payload
+ * of the FLOGI Request IOCB and the top-level callback function pointer
+ * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
+ * function field. The lpfc_issue_fabric_iocb routine is invoked to send
+ * out FLOGI ELS command with one outstanding fabric IOCB at a time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FLOGI ELS command.
+ *
+ * Return code
+ * 0 - successfully issued flogi iocb for @vport
+ * 1 - failed to issue flogi iocb for @vport
+ **/
static int
lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -694,6 +843,20 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_els_abort_flogi: Abort all outstanding flogi iocbs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
+ * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
+ * list and issues an abort IOCB commond on each outstanding IOCB that
+ * contains a active Fabric_DID ndlp. Note that this function is to issue
+ * the abort IOCB command on all the outstanding IOCBs, thus when this
+ * function returns, it does not guarantee all the IOCBs are actually aborted.
+ *
+ * Return code
+ * 0 - Sucessfully issued abort iocb on all outstanding flogis (Always 0)
+ **/
int
lpfc_els_abort_flogi(struct lpfc_hba *phba)
{
@@ -729,6 +892,22 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_initial_flogi: Issue an initial fabric login for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Login (FLOGI) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
+ * is then invoked with the @vport and the ndlp to perform the FLOGI for the
+ * @vport.
+ *
+ * Return code
+ * 0 - failed to issue initial flogi for @vport
+ * 1 - successfully issued initial flogi for @vport
+ **/
int
lpfc_initial_flogi(struct lpfc_vport *vport)
{
@@ -764,6 +943,22 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
return 1;
}
+/**
+ * lpfc_initial_fdisc: Issue an initial fabric discovery for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Discover (FDISC) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
+ * is then invoked with the @vport and the ndlp to perform the FDISC for the
+ * @vport.
+ *
+ * Return code
+ * 0 - failed to issue initial fdisc for @vport
+ * 1 - successfully issued initial fdisc for @vport
+ **/
int
lpfc_initial_fdisc(struct lpfc_vport *vport)
{
@@ -797,6 +992,17 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
return 1;
}
+/**
+ * lpfc_more_plogi: Check and issue remaining plogis for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there are more remaining Port Logins
+ * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
+ * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
+ * to issue ELS PLOGIs up to the configured discover threads with the
+ * @vport (@vport->cfg_discovery_threads). The function also decrement
+ * the @vport's num_disc_node by 1 if it is not already 0.
+ **/
void
lpfc_more_plogi(struct lpfc_vport *vport)
{
@@ -819,6 +1025,37 @@ lpfc_more_plogi(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_plogi_confirm_nport: Confirm pologi wwpn matches stored ndlp.
+ * @phba: pointer to lpfc hba data structure.
+ * @prsp: pointer to response IOCB payload.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine checks and indicates whether the WWPN of an N_Port, retrieved
+ * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
+ * The following cases are considered N_Port confirmed:
+ * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
+ * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
+ * it does not have WWPN assigned either. If the WWPN is confirmed, the
+ * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
+ * 1) if there is a node on vport list other than the @ndlp with the same
+ * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
+ * on that node to release the RPI associated with the node; 2) if there is
+ * no node found on vport list with the same WWPN of the N_Port PLOGI logged
+ * into, a new node shall be allocated (or activated). In either case, the
+ * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
+ * be released and the new_ndlp shall be put on to the vport node list and
+ * its pointer returned as the confirmed node.
+ *
+ * Note that before the @ndlp got "released", the keepDID from not-matching
+ * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
+ * of the @ndlp. This is because the release of @ndlp is actually to put it
+ * into an inactive state on the vport node list and the vport node list
+ * management algorithm does not allow two node with a same DID.
+ *
+ * Return code
+ * pointer to the PLOGI N_Port @ndlp
+ **/
static struct lpfc_nodelist *
lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
@@ -922,6 +1159,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
return new_ndlp;
}
+/**
+ * lpfc_end_rscn: Check and handle more rscn for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether more Registration State Change
+ * Notifications (RSCNs) came in while the discovery state machine was in
+ * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
+ * invoked to handle the additional RSCNs for the @vport. Otherwise, the
+ * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
+ * handling the RSCNs.
+ **/
void
lpfc_end_rscn(struct lpfc_vport *vport)
{
@@ -943,6 +1191,26 @@ lpfc_end_rscn(struct lpfc_vport *vport)
}
}
+/**
+ * lpfc_cmpl_els_plogi: Completion callback function for plogi.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for issuing the Port
+ * Login (PLOGI) command. For PLOGI completion, there must be an active
+ * ndlp on the vport node list that matches the remote node ID from the
+ * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply
+ * ignored and command IOCB released. The PLOGI response IOCB status is
+ * checked for error conditons. If there is error status reported, PLOGI
+ * retry shall be attempted by invoking the lpfc_els_retry() routine.
+ * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
+ * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
+ * (DSM) is set for this PLOGI completion. Finally, it checks whether
+ * there are additional N_Port nodes with the vport that need to perform
+ * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
+ * PLOGIs.
+ **/
static void
lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1048,6 +1316,27 @@ out:
return;
}
+/**
+ * lpfc_issue_els_plogi: Issue an plogi iocb command for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: destination port identifier.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Port Login (PLOGI) command to a remote N_Port
+ * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
+ * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
+ * This routine constructs the proper feilds of the PLOGI IOCB and invokes
+ * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PLOGI ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued a plogi for @vport
+ * 1 - failed to issue a plogi for @vport
+ **/
int
lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
{
@@ -1106,6 +1395,19 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
return 0;
}
+/**
+ * lpfc_cmpl_els_prli: Completion callback function for prli.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for a Process Login
+ * (PRLI) ELS command. The PRLI response IOCB status is checked for error
+ * status. If there is error status reported, PRLI retry shall be attempted
+ * by invoking the lpfc_els_retry() routine. Otherwise, the state
+ * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
+ * ndlp to mark the PRLI completion.
+ **/
static void
lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1164,6 +1466,27 @@ out:
return;
}
+/**
+ * lpfc_issue_els_prli: Issue a prli iocb command for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Process Login (PRLI) ELS command for the
+ * @vport. The PRLI service parameters are set up in the payload of the
+ * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
+ * is put to the IOCB completion callback func field before invoking the
+ * routine lpfc_sli_issue_iocb() to send out PRLI command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI ELS command.
+ *
+ * Return code
+ * 0 - successfully issued prli iocb command for @vport
+ * 1 - failed to issue prli iocb command for @vport
+ **/
int
lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -1233,6 +1556,92 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_rscn_disc: Perform rscn discovery for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine performs Registration State Change Notification (RSCN)
+ * discovery for a @vport. If the @vport's node port recovery count is not
+ * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
+ * the nodes that need recovery. If none of the PLOGI were needed through
+ * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
+ * invoked to check and handle possible more RSCN came in during the period
+ * of processing the current ones.
+ **/
+static void
+lpfc_rscn_disc(struct lpfc_vport *vport)
+{
+ lpfc_can_disctmo(vport);
+
+ /* RSCN discovery */
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ if (lpfc_els_disc_plogi(vport))
+ return;
+
+ lpfc_end_rscn(vport);
+}
+
+/**
+ * lpfc_adisc_done: Complete the adisc phase of discovery.
+ * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
+ *
+ * This function is called when the final ADISC is completed during discovery.
+ * This function handles clearing link attention or issuing reg_vpi depending
+ * on whether npiv is enabled. This function also kicks off the PLOGI phase of
+ * discovery.
+ * This function is called with no locks held.
+ **/
+static void
+lpfc_adisc_done(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /*
+ * For NPIV, cmpl_reg_vpi will set port_state to READY,
+ * and continue discovery.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->fc_flag & FC_RSCN_MODE)) {
+ lpfc_issue_reg_vpi(phba, vport);
+ return;
+ }
+ /*
+ * For SLI2, we need to set port_state to READY
+ * and continue discovery.
+ */
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* If we get here, there is nothing to ADISC */
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_issue_clear_la(phba, vport);
+ if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ vport->num_disc_nodes = 0;
+ /* go thru NPR list, issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
+ vport->port_state = LPFC_VPORT_READY;
+ } else
+ lpfc_rscn_disc(vport);
+}
+
+/**
+ * lpfc_more_adisc: Issue more adisc as needed.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine determines whether there are more ndlps on a @vport
+ * node list need to have Address Discover (ADISC) issued. If so, it will
+ * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
+ * remaining nodes which need to have ADISC sent.
+ **/
void
lpfc_more_adisc(struct lpfc_vport *vport)
{
@@ -1252,23 +1661,27 @@ lpfc_more_adisc(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS ADISCs */
sentadisc = lpfc_els_disc_adisc(vport);
}
+ if (!vport->num_disc_nodes)
+ lpfc_adisc_done(vport);
return;
}
-static void
-lpfc_rscn_disc(struct lpfc_vport *vport)
-{
- lpfc_can_disctmo(vport);
-
- /* RSCN discovery */
- /* go thru NPR nodes and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
- if (lpfc_els_disc_plogi(vport))
- return;
-
- lpfc_end_rscn(vport);
-}
-
+/**
+ * lpfc_cmpl_els_adisc: Completion callback function for adisc.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the Address Discover
+ * (ADISC) command. It first checks to see whether link went down during
+ * the discovery process. If so, the node will be marked as node port
+ * recovery for issuing discover IOCB by the link attention handler and
+ * exit. Otherwise, the response status is checked. If error was reported
+ * in the response status, the ADISC command shall be retried by invoking
+ * the lpfc_els_retry() routine. Otherwise, if no error was reported in
+ * the response status, the state machine is invoked to set transition
+ * with respect to NLP_EVT_CMPL_ADISC event.
+ **/
static void
lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1333,57 +1746,34 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
- if (disc && vport->num_disc_nodes) {
- /* Check to see if there are more ADISCs to be sent */
+ /* Check to see if there are more ADISCs to be sent */
+ if (disc && vport->num_disc_nodes)
lpfc_more_adisc(vport);
-
- /* Check to see if we are done with ADISC authentication */
- if (vport->num_disc_nodes == 0) {
- /* If we get here, there is nothing left to ADISC */
- /*
- * For NPIV, cmpl_reg_vpi will set port_state to READY,
- * and continue discovery.
- */
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- !(vport->fc_flag & FC_RSCN_MODE)) {
- lpfc_issue_reg_vpi(phba, vport);
- goto out;
- }
- /*
- * For SLI2, we need to set port_state to READY
- * and continue discovery.
- */
- if (vport->port_state < LPFC_VPORT_READY) {
- /* If we get here, there is nothing to ADISC */
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
-
- if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
- vport->num_disc_nodes = 0;
- /* go thru NPR list, issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
- lpfc_els_disc_plogi(vport);
-
- if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &=
- ~FC_NDISC_ACTIVE;
- spin_unlock_irq(
- shost->host_lock);
- lpfc_can_disctmo(vport);
- }
- }
- vport->port_state = LPFC_VPORT_READY;
- } else {
- lpfc_rscn_disc(vport);
- }
- }
- }
out:
lpfc_els_free_iocb(phba, cmdiocb);
return;
}
+/**
+ * lpfc_issue_els_adisc: Issue an address discover iocb to an node on a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues an Address Discover (ADISC) for an @ndlp on a
+ * @vport. It prepares the payload of the ADISC ELS command, updates the
+ * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
+ * to issue the ADISC ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC ELS command.
+ *
+ * Return code
+ * 0 - successfully issued adisc
+ * 1 - failed to issue adisc
+ **/
int
lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -1437,6 +1827,18 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_cmpl_els_logo: Completion callback function for logo.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the ELS Logout (LOGO)
+ * command. If no error status was reported from the LOGO response, the
+ * state machine of the associated ndlp shall be invoked for transition with
+ * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
+ * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
+ **/
static void
lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1502,6 +1904,26 @@ out:
return;
}
+/**
+ * lpfc_issue_els_logo: Issue a logo to an node on a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine constructs and issues an ELS Logout (LOGO) iocb command
+ * to a remote node, referred by an @ndlp on a @vport. It constructs the
+ * payload of the IOCB, properly sets up the @ndlp state, and invokes the
+ * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Return code
+ * 0 - successfully issued logo
+ * 1 - failed to issue logo
+ **/
int
lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -1563,6 +1985,22 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_cmpl_els_cmd: Completion callback function for generic els command.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is a generic completion callback function for ELS commands.
+ * Specifically, it is the callback function which does not need to perform
+ * any command specific operations. It is currently used by the ELS command
+ * issuing routines for the ELS State Change Request (SCR),
+ * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
+ * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
+ * certain debug loggings, this callback function simply invokes the
+ * lpfc_els_chk_latt() routine to check whether link went down during the
+ * discovery process.
+ **/
static void
lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1587,6 +2025,28 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_issue_els_scr: Issue a scr to an node on a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a State Change Request (SCR) to a fabric node
+ * on a @vport. The remote node @nportid is passed into the function. It
+ * first search the @vport node list to find the matching ndlp. If no such
+ * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
+ * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
+ * routine is invoked to send the SCR IOCB.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the SCR ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued scr command
+ * 1 - Failed to issue scr command
+ **/
int
lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
@@ -1659,6 +2119,28 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
return 0;
}
+/**
+ * lpfc_issue_els_farpr: Issue a farp to an node on a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fibre Channel Address Resolution Response
+ * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
+ * is passed into the function. It first search the @vport node list to find
+ * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
+ * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
+ * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PARPR ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued farpr command
+ * 1 - Failed to issue farpr command
+ **/
static int
lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
@@ -1748,6 +2230,18 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
return 0;
}
+/**
+ * lpfc_cancel_retry_delay_tmo: Cancel the timer with delayed iocb-cmd retry.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nlp: pointer to a node-list data structure.
+ *
+ * This routine cancels the timer with a delayed IOCB-command retry for
+ * a @vport's @ndlp. It stops the timer for the delayed function retrial and
+ * removes the ELS retry event if it presents. In addition, if the
+ * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
+ * commands are sent for the @vport's nodes that require issuing discovery
+ * ADISC.
+ **/
void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
{
@@ -1775,25 +2269,36 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
if (vport->port_state < LPFC_VPORT_READY) {
/* Check if there are more ADISCs to be sent */
lpfc_more_adisc(vport);
- if ((vport->num_disc_nodes == 0) &&
- (vport->fc_npr_cnt))
- lpfc_els_disc_plogi(vport);
} else {
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
- }
- if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
- lpfc_can_disctmo(vport);
- lpfc_end_rscn(vport);
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
}
}
}
return;
}
+/**
+ * lpfc_els_retry_delay: Timer function with a ndlp delayed function timer.
+ * @ptr: holder for the pointer to the timer function associated data (ndlp).
+ *
+ * This routine is invoked by the ndlp delayed-function timer to check
+ * whether there is any pending ELS retry event(s) with the node. If not, it
+ * simply returns. Otherwise, if there is at least one ELS delayed event, it
+ * adds the delayed events to the HBA work list and invokes the
+ * lpfc_worker_wake_up() routine to wake up worker thread to process the
+ * event. Note that lpfc_nlp_get() is called before posting the event to
+ * the work list to hold reference count of ndlp so that it guarantees the
+ * reference to ndlp will still be available when the worker thread gets
+ * to the event associated with the ndlp.
+ **/
void
lpfc_els_retry_delay(unsigned long ptr)
{
@@ -1822,6 +2327,15 @@ lpfc_els_retry_delay(unsigned long ptr)
return;
}
+/**
+ * lpfc_els_retry_delay_handler: Work thread handler for ndlp delayed function.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine is the worker-thread handler for processing the @ndlp delayed
+ * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
+ * the last ELS command from the associated ndlp and invokes the proper ELS
+ * function according to the delayed ELS command to retry the command.
+ **/
void
lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
{
@@ -1884,6 +2398,27 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
return;
}
+/**
+ * lpfc_els_retry: Make retry decision on an els command iocb.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine makes a retry decision on an ELS command IOCB, which has
+ * failed. The following ELS IOCBs use this function for retrying the command
+ * when previously issued command responsed with error status: FLOGI, PLOGI,
+ * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
+ * returned error status, it makes the decision whether a retry shall be
+ * issued for the command, and whether a retry shall be made immediately or
+ * delayed. In the former case, the corresponding ELS command issuing-function
+ * is called to retry the command. In the later case, the ELS command shall
+ * be posted to the ndlp delayed event and delayed function timer set to the
+ * ndlp for the delayed command issusing.
+ *
+ * Return code
+ * 0 - No retry of els command is made
+ * 1 - Immediate or delayed retry of els command is made
+ **/
static int
lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -2051,7 +2586,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0123 FDISC Failed (x%x). "
+ "0122 FDISC Failed (x%x). "
"Fabric Detected Bad WWN\n",
stat.un.lsRjtError);
lpfc_vport_set_state(vport,
@@ -2182,12 +2717,26 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_free_data: Free lpfc dma buffer and data structure with an iocb.
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
+ *
+ * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
+ * associated with a command IOCB back to the lpfc DMA buffer pool. It first
+ * checks to see whether there is a lpfc DMA buffer associated with the
+ * response of the command IOCB. If so, it will be released before releasing
+ * the lpfc DMA buffer associated with the IOCB itself.
+ *
+ * Return code
+ * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
static int
lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
{
struct lpfc_dmabuf *buf_ptr;
- /* Free the response before processing the command. */
+ /* Free the response before processing the command. */
if (!list_empty(&buf_ptr1->list)) {
list_remove_head(&buf_ptr1->list, buf_ptr,
struct lpfc_dmabuf,
@@ -2200,6 +2749,18 @@ lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
return 0;
}
+/**
+ * lpfc_els_free_bpl: Free lpfc dma buffer and data structure with bpl.
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr: pointer to the lpfc dma buffer data structure.
+ *
+ * This routine releases the lpfc Direct Memory Access (DMA) buffer
+ * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
+ * pool.
+ *
+ * Return code
+ * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
static int
lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
{
@@ -2208,6 +2769,33 @@ lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
return 0;
}
+/**
+ * lpfc_els_free_iocb: Free a command iocb and its associated resources.
+ * @phba: pointer to lpfc hba data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine frees a command IOCB and its associated resources. The
+ * command IOCB data structure contains the reference to various associated
+ * resources, these fields must be set to NULL if the associated reference
+ * not present:
+ * context1 - reference to ndlp
+ * context2 - reference to cmd
+ * context2->next - reference to rsp
+ * context3 - reference to bpl
+ *
+ * It first properly decrements the reference count held on ndlp for the
+ * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
+ * set, it invokes the lpfc_els_free_data() routine to release the Direct
+ * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
+ * adds the DMA buffer the @phba data structure for the delayed release.
+ * If reference to the Buffer Pointer List (BPL) is present, the
+ * lpfc_els_free_bpl() routine is invoked to release the DMA memory
+ * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
+ * invoked to release the IOCB data structure back to @phba IOCBQ list.
+ *
+ * Return code
+ * 0 - Success (currently, always return 0)
+ **/
int
lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
@@ -2274,6 +2862,23 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
return 0;
}
+/**
+ * lpfc_cmpl_els_logo_acc: Completion callback function to logo acc response.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the Logout (LOGO)
+ * Accept (ACC) Response ELS command. This routine is invoked to indicate
+ * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
+ * release the ndlp if it has the last reference remaining (reference count
+ * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
+ * field to NULL to inform the following lpfc_els_free_iocb() routine no
+ * ndlp reference count needs to be decremented. Otherwise, the ndlp
+ * reference use-count shall be decremented by the lpfc_els_free_iocb()
+ * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
+ * IOCB data structure.
+ **/
static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -2311,6 +2916,19 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_mbx_cmpl_dflt_rpi: Completion callbk func for unreg dflt rpi mbox cmd.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for unregister default
+ * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
+ * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
+ * decrements the ndlp reference count held for this completion callback
+ * function. After that, it invokes the lpfc_nlp_not_used() to check
+ * whether there is only one reference left on the ndlp. If so, it will
+ * perform one more decrement and trigger the release of the ndlp.
+ **/
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -2332,6 +2950,22 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+/**
+ * lpfc_cmpl_els_rsp: Completion callback function for els response iocb cmd.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for ELS Response IOCB
+ * command. In normal case, this callback function just properly sets the
+ * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
+ * field in the command IOCB is not NULL, the referred mailbox command will
+ * be send out, and then invokes the lpfc_els_free_iocb() routine to release
+ * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
+ * link down event occurred during the discovery, the lpfc_nlp_not_used()
+ * routine shall be invoked trying to release the ndlp if no other threads
+ * are currently referring it.
+ **/
static void
lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -2487,6 +3121,31 @@ out:
return;
}
+/**
+ * lpfc_els_rsp_acc: Prepare and issue an acc response iocb command.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @flag: the els command code to be accepted.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issues an Accept (ACC) response IOCB
+ * command. It uses the @flag to properly set up the IOCB field for the
+ * specific ACC response command to be issued and invokes the
+ * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
+ * @mbox pointer is passed in, it will be put into the context_un.mbox
+ * field of the IOCB for the completion callback function to issue the
+ * mailbox command to the HBA later when callback is invoked.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the corresponding response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued acc response
+ * 1 - Failed to issue acc response
+ **/
int
lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
@@ -2601,6 +3260,28 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 0;
}
+/**
+ * lpfc_els_rsp_reject: Propare and issue a rjt response iocb command.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @rejectError:
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issue an Reject (RJT) response IOCB
+ * command. If a @mbox pointer is passed in, it will be put into the
+ * context_un.mbox field of the IOCB for the completion callback function
+ * to issue to the HBA later.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the reject response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued reject response
+ * 1 - Failed to issue reject response
+ **/
int
lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
@@ -2660,6 +3341,25 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
return 0;
}
+/**
+ * lpfc_els_rsp_adisc_acc: Prepare and issue acc response to adisc iocb cmd.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Address
+ * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC Accept response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued acc adisc response
+ * 1 - Failed to issue adisc acc response
+ **/
int
lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_nodelist *ndlp)
@@ -2716,6 +3416,25 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
return 0;
}
+/**
+ * lpfc_els_rsp_prli_acc: Prepare and issue acc response to prli iocb cmd.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Process
+ * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI Accept response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued acc prli response
+ * 1 - Failed to issue acc prli response
+ **/
int
lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_nodelist *ndlp)
@@ -2795,6 +3514,32 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
return 0;
}
+/**
+ * lpfc_els_rsp_rnid_acc: Issue rnid acc response iocb command.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @format: rnid command format.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a Request Node Identification Data (RNID) Accept
+ * (ACC) response. It constructs the RNID ACC response command according to
+ * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
+ * issue the response. Note that this command does not need to hold the ndlp
+ * reference count for the callback. So, the ndlp reference count taken by
+ * the lpfc_prep_els_iocb() routine is put back and the context1 field of
+ * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
+ * there is no ndlp reference available.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function. However, for the RNID Accept Response ELS command,
+ * this is undone later by this routine after the IOCB is allocated.
+ *
+ * Return code
+ * 0 - Successfully issued acc rnid response
+ * 1 - Failed to issue acc rnid response
+ **/
static int
lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
@@ -2875,6 +3620,25 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
return 0;
}
+/**
+ * lpfc_els_disc_adisc: Issue remaining adisc iocbs to npr nodes of a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Address Discover (ADISC) ELS commands to those
+ * N_Ports which are in node port recovery state and ADISC has not been issued
+ * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
+ * lpfc_issue_els_adisc() routine, the per @vport number of discover count
+ * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
+ * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
+ * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
+ * IOCBs quit for later pick up. On the other hand, after walking through
+ * all the ndlps with the @vport and there is none ADISC IOCB issued, the
+ * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
+ * no more ADISC need to be sent.
+ *
+ * Return code
+ * The number of N_Ports with adisc issued.
+ **/
int
lpfc_els_disc_adisc(struct lpfc_vport *vport)
{
@@ -2914,6 +3678,25 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
return sentadisc;
}
+/**
+ * lpfc_els_disc_plogi: Issue plogi for all npr nodes of a vport before adisc.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
+ * which are in node port recovery state, with a @vport. Each time an ELS
+ * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
+ * the per @vport number of discover count (num_disc_nodes) shall be
+ * incremented. If the num_disc_nodes reaches a pre-configured threshold
+ * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
+ * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
+ * later pick up. On the other hand, after walking through all the ndlps with
+ * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
+ * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
+ * PLOGI need to be sent.
+ *
+ * Return code
+ * The number of N_Ports with plogi issued.
+ **/
int
lpfc_els_disc_plogi(struct lpfc_vport *vport)
{
@@ -2954,6 +3737,15 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
return sentplogi;
}
+/**
+ * lpfc_els_flush_rscn: Clean up any rscn activities with a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine cleans up any Registration State Change Notification
+ * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
+ * @vport together with the host_lock is used to prevent multiple thread
+ * trying to access the RSCN array on a same @vport at the same time.
+ **/
void
lpfc_els_flush_rscn(struct lpfc_vport *vport)
{
@@ -2984,6 +3776,18 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
vport->fc_rscn_flush = 0;
}
+/**
+ * lpfc_rscn_payload_check: Check whether there is a pending rscn to a did.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: remote destination port identifier.
+ *
+ * This routine checks whether there is any pending Registration State
+ * Configuration Notification (RSCN) to a @did on @vport.
+ *
+ * Return code
+ * None zero - The @did matched with a pending rscn
+ * 0 - not able to match @did with a pending rscn
+ **/
int
lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
{
@@ -3053,6 +3857,17 @@ return_did_out:
return did;
}
+/**
+ * lpfc_rscn_recovery_check: Send recovery event to vport nodes matching rscn
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
+ * state machine for a @vport's nodes that are with pending RSCN (Registration
+ * State Change Notification).
+ *
+ * Return code
+ * 0 - Successful (currently alway return 0)
+ **/
static int
lpfc_rscn_recovery_check(struct lpfc_vport *vport)
{
@@ -3071,6 +3886,28 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
return 0;
}
+/**
+ * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes an unsolicited RSCN (Registration State Change
+ * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
+ * to invoke fc_host_post_event() routine to the FC transport layer. If the
+ * discover state machine is about to begin discovery, it just accepts the
+ * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
+ * contains N_Port IDs for other vports on this HBA, it just accepts the
+ * RSCN and ignore processing it. If the state machine is in the recovery
+ * state, the fc_rscn_id_list of this @vport is walked and the
+ * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
+ * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
+ * routine is invoked to handle the RSCN event.
+ *
+ * Return code
+ * 0 - Just sent the acc response
+ * 1 - Sent the acc response and waited for name server completion
+ **/
static int
lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3130,7 +3967,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (rscn_id == hba_id) {
/* ALL NPortIDs in RSCN are on HBA */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0214 Ignore RSCN "
+ "0219 Ignore RSCN "
"Data: x%x x%x x%x x%x\n",
vport->fc_flag, payload_len,
*lp, vport->fc_rscn_id_cnt);
@@ -3241,6 +4078,22 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return lpfc_els_handle_rscn(vport);
}
+/**
+ * lpfc_els_handle_rscn: Handle rscn for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine handles the Registration State Configuration Notification
+ * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
+ * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
+ * if the ndlp to NameServer exists, a Common Transport (CT) command to the
+ * NameServer shall be issued. If CT command to the NameServer fails to be
+ * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
+ * RSCN activities with the @vport.
+ *
+ * Return code
+ * 0 - Cleaned up rscn on the @vport
+ * 1 - Wait for plogi to name server before proceed
+ **/
int
lpfc_els_handle_rscn(struct lpfc_vport *vport)
{
@@ -3313,6 +4166,31 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
return 0;
}
+/**
+ * lpfc_els_rcv_flogi: Process an unsolicited flogi iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
+ * unsolicited event. An unsolicited FLOGI can be received in a point-to-
+ * point topology. As an unsolicited FLOGI should not be received in a loop
+ * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
+ * lpfc_check_sparm() routine is invoked to check the parameters in the
+ * unsolicited FLOGI. If parameters validation failed, the routine
+ * lpfc_els_rsp_reject() shall be called with reject reason code set to
+ * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
+ * FLOGI shall be compared with the Port WWN of the @vport to determine who
+ * will initiate PLOGI. The higher lexicographical value party shall has
+ * higher priority (as the winning port) and will initiate PLOGI and
+ * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
+ * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
+ * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
+ *
+ * Return code
+ * 0 - Successfully processed the unsolicited flogi
+ * 1 - Failed to process the unsolicited flogi
+ **/
static int
lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3402,6 +4280,22 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_rnid: Process an unsolicited rnid iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Request Node Identification Data (RNID) IOCB
+ * received as an ELS unsolicited event. Only when the RNID specified format
+ * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
+ * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
+ * Accept (ACC) the RNID ELS command. All the other RNID formats are
+ * rejected by invoking the lpfc_els_rsp_reject() routine.
+ *
+ * Return code
+ * 0 - Successfully processed rnid iocb (currently always return 0)
+ **/
static int
lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3441,6 +4335,19 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_lirr: Process an unsolicited lirr iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Link Incident Report Registration(LIRR) IOCB
+ * received as an ELS unsolicited event. Currently, this function just invokes
+ * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
+ *
+ * Return code
+ * 0 - Successfully processed lirr iocb (currently always return 0)
+ **/
static int
lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3456,6 +4363,25 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rsp_rps_acc: Completion callbk func for MBX_READ_LNK_STAT mbox cmd.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
static void
lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -3531,6 +4457,24 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+/**
+ * lpfc_els_rcv_rps: Process an unsolicited rps iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPS) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPS Accept (ACC) response.
+ *
+ * Return codes
+ * 0 - Successfully processed rps iocb (currently always return 0)
+ **/
static int
lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3544,14 +4488,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct ls_rjt stat;
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
- (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
- stat.un.b.lsRjtRsvd0 = 0;
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
- stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
- NULL);
- }
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+ /* reject the unsolicited RPS request and done with it */
+ goto reject_out;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -3584,6 +4523,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
}
+
+reject_out:
+ /* issue rejection response */
stat.un.b.lsRjtRsvd0 = 0;
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
@@ -3592,6 +4534,25 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rsp_rpl_acc: Issue an accept rpl els command.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdsize: size of the ELS command.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
+ * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPL Accept Response ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued ACC RPL ELS command
+ * 1 - Failed to issue ACC RPL ELS command
+ **/
static int
lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
@@ -3645,6 +4606,22 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
return 0;
}
+/**
+ * lpfc_els_rcv_rpl: Process an unsolicited rpl iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port List (RPL) IOCB received as an ELS
+ * unsolicited event. It first checks the remote port state. If the remote
+ * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
+ * invokes the lpfc_els_rsp_reject() routine to send reject response.
+ * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
+ * to accept the RPL.
+ *
+ * Return code
+ * 0 - Successfully processed rpl iocb (currently always return 0)
+ **/
static int
lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3658,12 +4635,15 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+ /* issue rejection response */
stat.un.b.lsRjtRsvd0 = 0;
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
NULL);
+ /* rejected the unsolicited RPL request and done with it */
+ return 0;
}
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -3685,6 +4665,30 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_farp: Process an unsolicited farp request els command.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
+ * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
+ * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
+ * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
+ * remote PortName is compared against the FC PortName stored in the @vport
+ * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
+ * compared against the FC NodeName stored in the @vport data structure.
+ * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
+ * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
+ * invoked to send out FARP Response to the remote node. Before sending the
+ * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
+ * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
+ * routine is invoked to log into the remote port first.
+ *
+ * Return code
+ * 0 - Either the FARP Match Mode not supported or successfully processed
+ **/
static int
lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3744,6 +4748,20 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_farpr: Process an unsolicited farp response iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
+ * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
+ * the FARP response request.
+ *
+ * Return code
+ * 0 - Successfully processed FARPR IOCB (currently always return 0)
+ **/
static int
lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3768,6 +4786,25 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_fan: Process an unsolicited fan iocb command.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @fan_ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Fabric Address Notification (FAN) IOCB
+ * command received as an ELS unsolicited event. The FAN ELS command will
+ * only be processed on a physical port (i.e., the @vport represents the
+ * physical port). The fabric NodeName and PortName from the FAN IOCB are
+ * compared against those in the phba data structure. If any of those is
+ * different, the lpfc_initial_flogi() routine is invoked to initialize
+ * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
+ * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ * 0 - Successfully processed fan iocb (currently always return 0).
+ **/
static int
lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *fan_ndlp)
@@ -3797,6 +4834,16 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_timeout: Handler funciton to the els timer.
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the ELS timer after timeout. It posts the ELS
+ * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
+ * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
+ * up the worker thread. It is for the worker thread to invoke the routine
+ * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
+ **/
void
lpfc_els_timeout(unsigned long ptr)
{
@@ -3816,6 +4863,15 @@ lpfc_els_timeout(unsigned long ptr)
return;
}
+/**
+ * lpfc_els_timeout_handler: Process an els timeout event.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine is the actual handler function that processes an ELS timeout
+ * event. It walks the ELS ring to get and abort all the IOCBs (except the
+ * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
+ * invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
void
lpfc_els_timeout_handler(struct lpfc_vport *vport)
{
@@ -3886,6 +4942,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
}
+/**
+ * lpfc_els_flush_cmd: Clean up the outstanding els commands to a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with a non-NULL completion callback function, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
+ * callback function, the IOCB will simply be released. Finally, it walks
+ * the ELS transmit completion queue to issue an abort IOCB to any transmit
+ * completion queue IOCB that is associated with the @vport and is not
+ * an IOCB from libdfc (i.e., the management plane IOCBs that are not
+ * part of the discovery state machine) out to HBA by invoking the
+ * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
+ * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
+ * the IOCBs are aborted when this function returns.
+ **/
void
lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
@@ -3948,6 +5024,23 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_els_flush_all_cmd: Clean up all the outstanding els commands to a HBA.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with the completion callback function associated, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
+ * callback function associated, the IOCB will simply be released. Finally,
+ * it walks the ELS transmit completion queue to issue an abort IOCB to any
+ * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
+ * management plane IOCBs that are not part of the discovery state machine)
+ * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
void
lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
{
@@ -3992,6 +5085,130 @@ lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
return;
}
+/**
+ * lpfc_send_els_failure_event: Posts an ELS command failure event.
+ * @phba: Pointer to hba context object.
+ * @cmdiocbp: Pointer to command iocb which reported error.
+ * @rspiocbp: Pointer to response iocb which reported error.
+ *
+ * This function sends an event when there is an ELS command
+ * failure.
+ **/
+void
+lpfc_send_els_failure_event(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbp,
+ struct lpfc_iocbq *rspiocbp)
+{
+ struct lpfc_vport *vport = cmdiocbp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_lsrjt_event lsrjt_event;
+ struct lpfc_fabric_event_header fabric_event;
+ struct ls_rjt stat;
+ struct lpfc_nodelist *ndlp;
+ uint32_t *pcmd;
+
+ ndlp = cmdiocbp->context1;
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
+ lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
+ lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
+ memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ cmdiocbp->context2)->virt);
+ lsrjt_event.command = *pcmd;
+ stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
+ lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
+ lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(lsrjt_event),
+ (char *)&lsrjt_event,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+ return;
+ }
+ if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
+ (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
+ fabric_event.event_type = FC_REG_FABRIC_EVENT;
+ if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
+ fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
+ else
+ fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
+ memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(fabric_event),
+ (char *)&fabric_event,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+ return;
+ }
+
+}
+
+/**
+ * lpfc_send_els_event: Posts unsolicited els event.
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer FC node object.
+ * @cmd: ELS command code.
+ *
+ * This function posts an event when there is an incoming
+ * unsolicited ELS command.
+ **/
+static void
+lpfc_send_els_event(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ uint32_t cmd)
+{
+ struct lpfc_els_event_header els_data;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ els_data.event_type = FC_REG_ELS_EVENT;
+ switch (cmd) {
+ case ELS_CMD_PLOGI:
+ els_data.subcategory = LPFC_EVENT_PLOGI_RCV;
+ break;
+ case ELS_CMD_PRLO:
+ els_data.subcategory = LPFC_EVENT_PRLO_RCV;
+ break;
+ case ELS_CMD_ADISC:
+ els_data.subcategory = LPFC_EVENT_ADISC_RCV;
+ break;
+ default:
+ return;
+ }
+ memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(els_data),
+ (char *)&els_data,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
+ return;
+}
+
+
+/**
+ * lpfc_els_unsol_buffer: Process an unsolicited event data buffer.
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine is used for processing the IOCB associated with a unsolicited
+ * event. It first determines whether there is an existing ndlp that matches
+ * the DID from the unsolicited IOCB. If not, it will create a new one with
+ * the DID from the unsolicited IOCB. The ELS command from the unsolicited
+ * IOCB is then used to invoke the proper routine and to set up proper state
+ * of the discovery state machine.
+ **/
static void
lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
@@ -4059,8 +5276,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
phba->fc_stat.elsRcvFrame++;
- if (elsiocb->context1)
- lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = lpfc_nlp_get(ndlp);
elsiocb->vport = vport;
@@ -4081,6 +5296,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPLOGI++;
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+ lpfc_send_els_event(vport, ndlp, cmd);
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -4130,6 +5346,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvPRLO++;
+ lpfc_send_els_event(vport, ndlp, cmd);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
break;
@@ -4147,6 +5364,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"RCV ADISC: did:x%x/ste:x%x flg:x%x",
did, vport->port_state, ndlp->nlp_flag);
+ lpfc_send_els_event(vport, ndlp, cmd);
phba->fc_stat.elsRcvADISC++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
@@ -4270,6 +5488,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
NULL);
}
+ lpfc_nlp_put(elsiocb->context1);
+ elsiocb->context1 = NULL;
return;
dropit:
@@ -4282,6 +5502,19 @@ dropit:
phba->fc_stat.elsRcvDrop++;
}
+/**
+ * lpfc_find_vport_by_vpid: Find a vport on a HBA through vport identifier.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ * NULL - No vport with the matching @vpi found
+ * Otherwise - Address to the vport with the matching @vpi.
+ **/
static struct lpfc_vport *
lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
@@ -4299,6 +5532,18 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
return NULL;
}
+/**
+ * lpfc_els_unsol_event: Process an unsolicited event from an els sli ring.
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @elsiocb: pointer to lpfc els iocb data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking the routine
+ * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
+ * SLI ring on which the unsolicited event was received.
+ **/
void
lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *elsiocb)
@@ -4309,6 +5554,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
+ elsiocb->context1 = NULL;
elsiocb->context2 = NULL;
elsiocb->context3 = NULL;
@@ -4356,8 +5602,6 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* The different unsolicited event handlers would tell us
* if they are done with "mp" by setting context2 to NULL.
*/
- lpfc_nlp_put(elsiocb->context1);
- elsiocb->context1 = NULL;
if (elsiocb->context2) {
lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
elsiocb->context2 = NULL;
@@ -4376,6 +5620,19 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
+/**
+ * lpfc_do_scr_ns_plogi: Issue a plogi to the name server for scr.
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine issues a Port Login (PLOGI) to the Name Server with
+ * State Change Request (SCR) for a @vport. This routine will create an
+ * ndlp for the Name Server associated to the @vport if such node does
+ * not already exist. The PLOGI to Name Server is issued by invoking the
+ * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
+ * (FDMI) is configured to the @vport, a FDMI node will be created and
+ * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
+ **/
void
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
@@ -4434,6 +5691,18 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_cmpl_reg_new_vport: Completion callback function to register new vport.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function to register new vport
+ * mailbox command. If the new vport mailbox command completes successfully,
+ * the fabric registration login shall be performed on physical port (the
+ * new vport created is actually a physical port, with VPI 0) or the port
+ * login to Name Server for State Change Request (SCR) will be performed
+ * on virtual port (real virtual port, with VPI greater than 0).
+ **/
static void
lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -4491,6 +5760,15 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+/**
+ * lpfc_register_new_vport: Register a new vport with a HBA.
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine registers the @vport as a new virtual port with a HBA.
+ * It is done through a registering vpi mailbox command.
+ **/
static void
lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
@@ -4531,6 +5809,26 @@ mbox_err_exit:
return;
}
+/**
+ * lpfc_cmpl_els_fdisc: Completion function for fdisc iocb command.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to a Fabric Discover
+ * (FDISC) ELS command. Since all the FDISC ELS commands are issued
+ * single threaded, each FDISC completion callback function will reset
+ * the discovery timer for all vports such that the timers will not get
+ * unnecessary timeout. The function checks the FDISC IOCB status. If error
+ * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
+ * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
+ * assigned to the vport has been changed with the completion of the FDISC
+ * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
+ * are unregistered from the HBA, and then the lpfc_register_new_vport()
+ * routine is invoked to register new vport with the HBA. Otherwise, the
+ * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
+ * Server for State Change Request (SCR).
+ **/
static void
lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -4565,58 +5863,80 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* FDISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0124 FDISC failed. (%d/%d)\n",
+ "0126 FDISC failed. (%d/%d)\n",
irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto fdisc_failed;
+ }
if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_nlp_put(ndlp);
/* giving up on FDISC. Cancel discovery timer */
lpfc_can_disctmo(vport);
- } else {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_FABRIC;
- if (vport->phba->fc_topology == TOPOLOGY_LOOP)
- vport->fc_flag |= FC_PUBLIC_LOOP;
- spin_unlock_irq(shost->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_FABRIC;
+ if (vport->phba->fc_topology == TOPOLOGY_LOOP)
+ vport->fc_flag |= FC_PUBLIC_LOOP;
+ spin_unlock_irq(shost->host_lock);
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
- lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
- if ((vport->fc_prevDID != vport->fc_myDID) &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
- /* If our NportID changed, we need to ensure all
- * remaining NPORTs get unreg_login'ed so we can
- * issue unreg_vpi.
- */
- list_for_each_entry_safe(np, next_np,
- &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) ||
- (np->nlp_state != NLP_STE_NPR_NODE) ||
- !(np->nlp_flag & NLP_NPR_ADISC))
- continue;
- spin_lock_irq(shost->host_lock);
- np->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
- lpfc_unreg_rpi(vport, np);
- }
- lpfc_mbx_unreg_vpi(vport);
+ vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
+ if ((vport->fc_prevDID != vport->fc_myDID) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ /* If our NportID changed, we need to ensure all
+ * remaining NPORTs get unreg_login'ed so we can
+ * issue unreg_vpi.
+ */
+ list_for_each_entry_safe(np, next_np,
+ &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp) ||
+ (np->nlp_state != NLP_STE_NPR_NODE) ||
+ !(np->nlp_flag & NLP_NPR_ADISC))
+ continue;
spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ np->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vport, np);
}
-
- if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
- lpfc_register_new_vport(phba, vport, ndlp);
- else
- lpfc_do_scr_ns_plogi(phba, vport);
-
- /* Unconditionaly kick off releasing fabric node for vports */
- lpfc_nlp_put(ndlp);
+ lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
}
+ if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ lpfc_register_new_vport(phba, vport, ndlp);
+ else
+ lpfc_do_scr_ns_plogi(phba, vport);
+ goto out;
+fdisc_failed:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ /* Cancel discovery timer */
+ lpfc_can_disctmo(vport);
+ lpfc_nlp_put(ndlp);
out:
lpfc_els_free_iocb(phba, cmdiocb);
}
+/**
+ * lpfc_issue_els_fdisc: Issue a fdisc iocb command.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
+ * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
+ * routine to issue the IOCB, which makes sure only one outstanding fabric
+ * IOCB will be sent off HBA at any given time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FDISC ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued fdisc iocb command
+ * 1 - Failed to issue fdisc iocb command
+ **/
static int
lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -4691,6 +6011,20 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_cmpl_els_npiv_logo: Completion function with vport logo.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the issuing of a LOGO
+ * ELS command off a vport. It frees the command IOCB and then decrement the
+ * reference count held on ndlp for this completion function, indicating that
+ * the reference to the ndlp is no long needed. Note that the
+ * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
+ * callback function and an additional explicit ndlp reference decrementation
+ * will trigger the actual release of the ndlp.
+ **/
static void
lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -4712,6 +6046,22 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_nlp_put(ndlp);
}
+/**
+ * lpfc_issue_els_npiv_logo: Issue a logo off a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a LOGO ELS command to an @ndlp off a @vport.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Return codes
+ * 0 - Successfully issued logo off the @vport
+ * 1 - Failed to issue logo off the @vport
+ **/
int
lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -4757,6 +6107,17 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 0;
}
+/**
+ * lpfc_fabric_block_timeout: Handler function to the fabric block timer.
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the fabric iocb block timer after
+ * timeout. It posts the fabric iocb block timeout event by setting the
+ * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
+ * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
+ * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
+ * posted event WORKER_FABRIC_BLOCK_TMO.
+ **/
void
lpfc_fabric_block_timeout(unsigned long ptr)
{
@@ -4775,6 +6136,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
return;
}
+/**
+ * lpfc_resume_fabric_iocbs: Issue a fabric iocb from driver internal list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues one fabric iocb from the driver internal list to
+ * the HBA. It first checks whether it's ready to issue one fabric iocb to
+ * the HBA (whether there is no outstanding fabric iocb). If so, it shall
+ * remove one pending fabric iocb from the driver internal list and invokes
+ * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
+ **/
static void
lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
{
@@ -4824,6 +6195,15 @@ repeat:
return;
}
+/**
+ * lpfc_unblock_fabric_iocbs: Unblock issuing fabric iocb command.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine unblocks the issuing fabric iocb command. The function
+ * will clear the fabric iocb block bit and then invoke the routine
+ * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
+ * from the driver internal fabric iocb list.
+ **/
void
lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
{
@@ -4833,6 +6213,15 @@ lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
return;
}
+/**
+ * lpfc_block_fabric_iocbs: Block issuing fabric iocb command.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine blocks the issuing fabric iocb for a specified amount of
+ * time (currently 100 ms). This is done by set the fabric iocb block bit
+ * and set up a timeout timer for 100ms. When the block bit is set, no more
+ * fabric iocb will be issued out of the HBA.
+ **/
static void
lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
{
@@ -4846,6 +6235,19 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
return;
}
+/**
+ * lpfc_cmpl_fabric_iocb: Completion callback function for fabric iocb.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the callback function that is put to the fabric iocb's
+ * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
+ * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
+ * function first restores and invokes the original iocb's callback function
+ * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
+ * fabric bound iocb from the driver internal fabric iocb list onto the wire.
+ **/
static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -4892,6 +6294,30 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
+/**
+ * lpfc_issue_fabric_iocb: Issue a fabric iocb command.
+ * @phba: pointer to lpfc hba data structure.
+ * @iocb: pointer to lpfc command iocb data structure.
+ *
+ * This routine is used as the top-level API for issuing a fabric iocb command
+ * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
+ * function makes sure that only one fabric bound iocb will be outstanding at
+ * any given time. As such, this function will first check to see whether there
+ * is already an outstanding fabric iocb on the wire. If so, it will put the
+ * newly issued iocb onto the driver internal fabric iocb list, waiting to be
+ * issued later. Otherwise, it will issue the iocb on the wire and update the
+ * fabric iocb count it indicate that there is one fabric iocb on the wire.
+ *
+ * Note, this implementation has a potential sending out fabric IOCBs out of
+ * order. The problem is caused by the construction of the "ready" boolen does
+ * not include the condition that the internal fabric IOCB list is empty. As
+ * such, it is possible a fabric IOCB issued by this routine might be "jump"
+ * ahead of the fabric IOCBs in the internal list.
+ *
+ * Return code
+ * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
+ * IOCB_ERROR - failed to issue fabric iocb
+ **/
static int
lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
{
@@ -4937,7 +6363,17 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
return ret;
}
-
+/**
+ * lpfc_fabric_abort_vport: Abort a vport's iocbs from driver fabric iocb list.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine aborts all the IOCBs associated with a @vport from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @vport off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
{
LIST_HEAD(completions);
@@ -4967,6 +6403,17 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
}
}
+/**
+ * lpfc_fabric_abort_nport: Abort a ndlp's iocbs from driver fabric iocb list.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine aborts all the IOCBs associated with an @ndlp from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @ndlp off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
@@ -4996,6 +6443,17 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
}
}
+/**
+ * lpfc_fabric_abort_hba: Abort all iocbs on driver fabric iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the IOCBs currently on the driver internal
+ * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
+ * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
+ * list, removes IOCBs off the list, set the status feild to
+ * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
+ * the IOCB.
+ **/
void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a98d11bf357..a1a70d9ffc2 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -30,6 +30,7 @@
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_sli.h"
#include "lpfc_scsi.h"
@@ -88,14 +89,6 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
&phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
-
- /*
- * A device is normally blocked for rediscovery and unblocked when
- * devloss timeout happens. In case a vport is removed or driver
- * unloaded before devloss timeout happens, we need to unblock here.
- */
- scsi_target_unblock(&rport->dev);
- return;
}
/*
@@ -215,8 +208,16 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return;
}
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0284 Devloss timeout Ignored on "
+ "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
+ "NPort x%x\n",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7),
+ ndlp->nlp_DID);
return;
+ }
if (ndlp->nlp_type & NLP_FABRIC) {
/* We will clean up these Nodes in linkup */
@@ -237,8 +238,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
- if (vport->load_flag & FC_UNLOADING)
- warn_on = 0;
if (warn_on) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -276,6 +275,124 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
}
+/**
+ * lpfc_alloc_fast_evt: Allocates data structure for posting event.
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the functions which need to post
+ * events from interrupt context. This function allocates data
+ * structure required for posting event. It also keeps track of
+ * number of events pending and prevent event storm when there are
+ * too many events.
+ **/
+struct lpfc_fast_path_event *
+lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
+ struct lpfc_fast_path_event *ret;
+
+ /* If there are lot of fast event do not exhaust memory due to this */
+ if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
+ return NULL;
+
+ ret = kzalloc(sizeof(struct lpfc_fast_path_event),
+ GFP_ATOMIC);
+ if (ret)
+ atomic_inc(&phba->fast_event_count);
+ INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+ ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+ return ret;
+}
+
+/**
+ * lpfc_free_fast_evt: Frees event data structure.
+ * @phba: Pointer to hba context object.
+ * @evt: Event object which need to be freed.
+ *
+ * This function frees the data structure required for posting
+ * events.
+ **/
+void
+lpfc_free_fast_evt(struct lpfc_hba *phba,
+ struct lpfc_fast_path_event *evt) {
+
+ atomic_dec(&phba->fast_event_count);
+ kfree(evt);
+}
+
+/**
+ * lpfc_send_fastpath_evt: Posts events generated from fast path.
+ * @phba: Pointer to hba context object.
+ * @evtp: Event data structure.
+ *
+ * This function is called from worker thread, when the interrupt
+ * context need to post an event. This function posts the event
+ * to fc transport netlink interface.
+ **/
+static void
+lpfc_send_fastpath_evt(struct lpfc_hba *phba,
+ struct lpfc_work_evt *evtp)
+{
+ unsigned long evt_category, evt_sub_category;
+ struct lpfc_fast_path_event *fast_evt_data;
+ char *evt_data;
+ uint32_t evt_data_size;
+ struct Scsi_Host *shost;
+
+ fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
+ work_evt);
+
+ evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
+ evt_sub_category = (unsigned long) fast_evt_data->un.
+ fabric_evt.subcategory;
+ shost = lpfc_shost_from_vport(fast_evt_data->vport);
+ if (evt_category == FC_REG_FABRIC_EVENT) {
+ if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
+ evt_data = (char *) &fast_evt_data->un.read_check_error;
+ evt_data_size = sizeof(fast_evt_data->un.
+ read_check_error);
+ } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
+ (evt_sub_category == IOSTAT_NPORT_BSY)) {
+ evt_data = (char *) &fast_evt_data->un.fabric_evt;
+ evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
+ } else {
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+ }
+ } else if (evt_category == FC_REG_SCSI_EVENT) {
+ switch (evt_sub_category) {
+ case LPFC_EVENT_QFULL:
+ case LPFC_EVENT_DEVBSY:
+ evt_data = (char *) &fast_evt_data->un.scsi_evt;
+ evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
+ break;
+ case LPFC_EVENT_CHECK_COND:
+ evt_data = (char *) &fast_evt_data->un.check_cond_evt;
+ evt_data_size = sizeof(fast_evt_data->un.
+ check_cond_evt);
+ break;
+ case LPFC_EVENT_VARQUEDEPTH:
+ evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
+ evt_data_size = sizeof(fast_evt_data->un.
+ queue_depth_evt);
+ break;
+ default:
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+ }
+ } else {
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+ }
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ evt_data_size,
+ evt_data,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+}
+
static void
lpfc_work_list_done(struct lpfc_hba *phba)
{
@@ -347,6 +464,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
lpfc_unblock_mgmt_io(phba);
complete((struct completion *)(evtp->evt_arg2));
break;
+ case LPFC_EVT_FASTPATH_MGMT_EVT:
+ lpfc_send_fastpath_evt(phba, evtp);
+ free_evt = 0;
+ break;
}
if (free_evt)
kfree(evtp);
@@ -371,6 +492,7 @@ lpfc_work_done(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
if (ha_copy & HA_ERATT)
+ /* Handle the error attention event */
lpfc_handle_eratt(phba);
if (ha_copy & HA_MBATT)
@@ -378,6 +500,7 @@ lpfc_work_done(struct lpfc_hba *phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
+
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i <= phba->max_vpi; i++) {
@@ -1013,14 +1136,10 @@ out:
}
static void
-lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+lpfc_enable_la(struct lpfc_hba *phba)
{
uint32_t control;
struct lpfc_sli *psli = &phba->sli;
-
- lpfc_linkdown(phba);
-
- /* turn on Link Attention interrupts - no CLEAR_LA needed */
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
@@ -1030,6 +1149,15 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
}
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
+ lpfc_linkdown(phba);
+ lpfc_enable_la(phba);
+ /* turn on Link Attention interrupts - no CLEAR_LA needed */
+}
+
+
/*
* This routine handles processing a READ_LA mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@@ -1077,8 +1205,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
+ if (la->mm)
+ phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+ else
+ phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- if (la->attType == AT_LINK_UP) {
+ if (la->attType == AT_LINK_UP && (!la->mm)) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -1090,13 +1222,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x x%x x%x %d\n",
la->eventTag, phba->fc_eventTag,
la->granted_AL_PA, la->UlnkSpeed,
- phba->alpa_map[0]);
+ phba->alpa_map[0],
+ la->mm, la->fa,
+ phba->wait_4_mlo_maint_flg);
}
lpfc_mbx_process_link_up(phba, la);
- } else {
+ } else if (la->attType == AT_LINK_DOWN) {
phba->fc_stat.LinkDown++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -1109,11 +1243,46 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
+ "Data: x%x x%x x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag,
+ la->mm, la->fa);
+ }
+ lpfc_mbx_issue_link_down(phba);
+ }
+ if (la->mm && la->attType == AT_LINK_UP) {
+ if (phba->link_state != LPFC_LINK_DOWN) {
+ phba->fc_stat.LinkDown++;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1312 Link Down Event x%x received "
+ "Data: x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag);
+ lpfc_mbx_issue_link_down(phba);
+ } else
+ lpfc_enable_la(phba);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1310 Menlo Maint Mode Link up Event x%x rcvd "
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
+ /*
+ * The cmnd that triggered this will be waiting for this
+ * signal.
+ */
+ /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
+ if (phba->wait_4_mlo_maint_flg) {
+ phba->wait_4_mlo_maint_flg = 0;
+ wake_up_interruptible(&phba->wait_4_mlo_m_q);
}
- lpfc_mbx_issue_link_down(phba);
+ }
+
+ if (la->fa) {
+ if (la->mm)
+ lpfc_issue_clear_la(phba, vport);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+ "1311 fa %d\n", la->fa);
}
lpfc_mbx_cmpl_read_la_free_mbuf:
@@ -1177,7 +1346,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
scsi_host_put(shost);
}
-void
+int
lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
@@ -1186,7 +1355,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
- return;
+ return 1;
lpfc_unreg_vpi(phba, vport->vpi, mbox);
mbox->vport = vport;
@@ -1197,7 +1366,9 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
vport->unreg_vpi_cmpl = VPORT_ERROR;
+ return rc;
}
+ return 0;
}
static void
@@ -1553,6 +1724,22 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
lpfc_register_remote_port(vport, ndlp);
}
+ if ((new_state == NLP_STE_MAPPED_NODE) &&
+ (vport->stat_data_enabled)) {
+ /*
+ * A new target is discovered, if there is no buffer for
+ * statistical data collection allocate buffer.
+ */
+ ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+ sizeof(struct lpfc_scsicmd_bkt),
+ GFP_KERNEL);
+
+ if (!ndlp->lat_data)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0286 lpfc_nlp_state_cleanup failed to "
+ "allocate statistical data buffer DID "
+ "0x%x\n", ndlp->nlp_DID);
+ }
/*
* if we added to Mapped list, but the remote port
* registration failed or assigned a target id outside
@@ -2786,7 +2973,7 @@ restart_disc:
default:
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0229 Unexpected discovery timeout, "
+ "0273 Unexpected discovery timeout, "
"vport State x%x\n", vport->port_state);
break;
}
@@ -2940,6 +3127,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
INIT_LIST_HEAD(&ndlp->nlp_listp);
kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp);
+ atomic_set(&ndlp->cmd_pending, 0);
+ ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node init: did:x%x",
@@ -2979,8 +3168,10 @@ lpfc_nlp_release(struct kref *kref)
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
/* free ndlp memory for final ndlp release */
- if (NLP_CHK_FREE_REQ(ndlp))
+ if (NLP_CHK_FREE_REQ(ndlp)) {
+ kfree(ndlp->lat_data);
mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
+ }
}
/* This routine bumps the reference count for a ndlp structure to ensure
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7773b949aa7..5de5dabbbee 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1107,6 +1107,8 @@ typedef struct {
/* Start FireFly Register definitions */
#define PCI_VENDOR_ID_EMULEX 0x10df
#define PCI_DEVICE_ID_FIREFLY 0x1ae5
+#define PCI_DEVICE_ID_PROTEUS_VF 0xe100
+#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@@ -1133,10 +1135,12 @@ typedef struct {
#define PCI_DEVICE_ID_LP11000S 0xfc10
#define PCI_DEVICE_ID_LPE11000S 0xfc20
#define PCI_DEVICE_ID_SAT_S 0xfc40
+#define PCI_DEVICE_ID_PROTEUS_S 0xfc50
#define PCI_DEVICE_ID_HELIOS 0xfd00
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
+#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
@@ -1154,6 +1158,7 @@ typedef struct {
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
+#define HORNET_JDEC_ID 0x2057706D
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@@ -1198,6 +1203,18 @@ typedef struct { /* FireFly BIU registers */
#define HA_RXATT 0x00000008 /* Bit 3 */
#define HA_RXMASK 0x0000000f
+#define HA_R0_CLR_MSK (HA_R0RE_REQ | HA_R0CE_RSP | HA_R0ATT)
+#define HA_R1_CLR_MSK (HA_R1RE_REQ | HA_R1CE_RSP | HA_R1ATT)
+#define HA_R2_CLR_MSK (HA_R2RE_REQ | HA_R2CE_RSP | HA_R2ATT)
+#define HA_R3_CLR_MSK (HA_R3RE_REQ | HA_R3CE_RSP | HA_R3ATT)
+
+#define HA_R0_POS 3
+#define HA_R1_POS 7
+#define HA_R2_POS 11
+#define HA_R3_POS 15
+#define HA_LE_POS 29
+#define HA_MB_POS 30
+#define HA_ER_POS 31
/* Chip Attention Register */
#define CA_REG_OFFSET 4 /* Byte offset from register base address */
@@ -1235,7 +1252,7 @@ typedef struct { /* FireFly BIU registers */
/* Host Control Register */
-#define HC_REG_OFFSET 12 /* Word offset from register base address */
+#define HC_REG_OFFSET 12 /* Byte offset from register base address */
#define HC_MBINT_ENA 0x00000001 /* Bit 0 */
#define HC_R0INT_ENA 0x00000002 /* Bit 1 */
@@ -1248,6 +1265,19 @@ typedef struct { /* FireFly BIU registers */
#define HC_LAINT_ENA 0x20000000 /* Bit 29 */
#define HC_ERINT_ENA 0x80000000 /* Bit 31 */
+/* Message Signaled Interrupt eXtension (MSI-X) message identifiers */
+#define MSIX_DFLT_ID 0
+#define MSIX_RNG0_ID 0
+#define MSIX_RNG1_ID 1
+#define MSIX_RNG2_ID 2
+#define MSIX_RNG3_ID 3
+
+#define MSIX_LINK_ID 4
+#define MSIX_MBOX_ID 5
+
+#define MSIX_SPARE0_ID 6
+#define MSIX_SPARE1_ID 7
+
/* Mailbox Commands */
#define MBX_SHUTDOWN 0x00 /* terminate testing */
#define MBX_LOAD_SM 0x01
@@ -1285,10 +1315,14 @@ typedef struct { /* FireFly BIU registers */
#define MBX_KILL_BOARD 0x24
#define MBX_CONFIG_FARP 0x25
#define MBX_BEACON 0x2A
+#define MBX_CONFIG_MSI 0x30
#define MBX_HEARTBEAT 0x31
#define MBX_WRITE_VPARMS 0x32
#define MBX_ASYNCEVT_ENABLE 0x33
+#define MBX_PORT_CAPABILITIES 0x3B
+#define MBX_PORT_IOV_CONTROL 0x3C
+
#define MBX_CONFIG_HBQ 0x7C
#define MBX_LOAD_AREA 0x81
#define MBX_RUN_BIU_DIAG64 0x84
@@ -1474,24 +1508,18 @@ struct ulp_bde64 { /* SLI-2 */
uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
VALUE !! */
#endif
-
-#define BUFF_USE_RSVD 0x01 /* bdeFlags */
-#define BUFF_USE_INTRPT 0x02 /* Not Implemented with LP6000 */
-#define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */
-#define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit
- buffer */
-#define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit
- addr */
-#define BUFF_TYPE_SPECIAL 0x20 /* Not Implemented with LP6000 */
-#define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */
-#define BUFF_TYPE_INVALID 0x80 /* "" "" */
+#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
+#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
+#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
+#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
+#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
+#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
+#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
} f;
} tus;
uint32_t addrLow;
uint32_t addrHigh;
};
-#define BDE64_SIZE_WORD 0
-#define BPL64_SIZE_WORD 0x40
typedef struct ULP_BDL { /* SLI-2 */
#ifdef __BIG_ENDIAN_BITFIELD
@@ -2201,7 +2229,10 @@ typedef struct {
typedef struct {
uint32_t eventTag; /* Event tag */
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd1:22;
+ uint32_t rsvd1:19;
+ uint32_t fa:1;
+ uint32_t mm:1; /* Menlo Maintenance mode enabled */
+ uint32_t rx:1;
uint32_t pb:1;
uint32_t il:1;
uint32_t attType:8;
@@ -2209,7 +2240,10 @@ typedef struct {
uint32_t attType:8;
uint32_t il:1;
uint32_t pb:1;
- uint32_t rsvd1:22;
+ uint32_t rx:1;
+ uint32_t mm:1;
+ uint32_t fa:1;
+ uint32_t rsvd1:19;
#endif
#define AT_RESERVED 0x00 /* Reserved - attType */
@@ -2230,6 +2264,7 @@ typedef struct {
#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
+#define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */
union {
struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
@@ -2324,6 +2359,36 @@ typedef struct {
#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
+/* Structure for MB Command UPDATE_CFG (0x1B) */
+
+struct update_cfg_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2:16;
+ uint32_t type:8;
+ uint32_t rsvd:1;
+ uint32_t ra:1;
+ uint32_t co:1;
+ uint32_t cv:1;
+ uint32_t req:4;
+ uint32_t entry_length:16;
+ uint32_t region_id:16;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t req:4;
+ uint32_t cv:1;
+ uint32_t co:1;
+ uint32_t ra:1;
+ uint32_t rsvd:1;
+ uint32_t type:8;
+ uint32_t rsvd2:16;
+ uint32_t region_id:16;
+ uint32_t entry_length:16;
+#endif
+
+ uint32_t resp_info;
+ uint32_t byte_cnt;
+ uint32_t data_offset;
+};
+
struct hbq_mask {
#ifdef __BIG_ENDIAN_BITFIELD
uint8_t tmatch;
@@ -2560,6 +2625,40 @@ typedef struct {
} CONFIG_PORT_VAR;
+/* Structure for MB Command CONFIG_MSI (0x30) */
+struct config_msi_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dfltMsgNum:8; /* Default message number */
+ uint32_t rsvd1:11; /* Reserved */
+ uint32_t NID:5; /* Number of secondary attention IDs */
+ uint32_t rsvd2:5; /* Reserved */
+ uint32_t dfltPresent:1; /* Default message number present */
+ uint32_t addFlag:1; /* Add association flag */
+ uint32_t reportFlag:1; /* Report association flag */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t reportFlag:1; /* Report association flag */
+ uint32_t addFlag:1; /* Add association flag */
+ uint32_t dfltPresent:1; /* Default message number present */
+ uint32_t rsvd2:5; /* Reserved */
+ uint32_t NID:5; /* Number of secondary attention IDs */
+ uint32_t rsvd1:11; /* Reserved */
+ uint32_t dfltMsgNum:8; /* Default message number */
+#endif
+ uint32_t attentionConditions[2];
+ uint8_t attentionId[16];
+ uint8_t messageNumberByHA[64];
+ uint8_t messageNumberByID[16];
+ uint32_t autoClearHA[2];
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd3:16;
+ uint32_t autoClearID:16;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t autoClearID:16;
+ uint32_t rsvd3:16;
+#endif
+ uint32_t rsvd4;
+};
+
/* SLI-2 Port Control Block */
/* SLIM POINTER */
@@ -2678,10 +2777,12 @@ typedef union {
* NEW_FEATURE
*/
struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
+ struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+ struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
} MAILVARIANTS;
/*
@@ -2715,11 +2816,19 @@ struct sli3_pgp {
uint32_t hbq_get[16];
};
-typedef union {
- struct sli2_desc s2;
- struct sli3_desc s3;
- struct sli3_pgp s3_pgp;
-} SLI_VAR;
+struct sli3_inb_pgp {
+ uint32_t ha_copy;
+ uint32_t counter;
+ struct lpfc_pgp port[MAX_RINGS];
+ uint32_t hbq_get[16];
+};
+
+union sli_var {
+ struct sli2_desc s2;
+ struct sli3_desc s3;
+ struct sli3_pgp s3_pgp;
+ struct sli3_inb_pgp s3_inb_pgp;
+};
typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -2737,7 +2846,7 @@ typedef struct {
#endif
MAILVARIANTS un;
- SLI_VAR us;
+ union sli_var us;
} MAILBOX_t;
/*
@@ -3105,6 +3214,27 @@ struct que_xri64cx_ext_fields {
struct lpfc_hbq_entry buff[5];
};
+#define LPFC_EXT_DATA_BDE_COUNT 3
+struct fcp_irw_ext {
+ uint32_t io_tag64_low;
+ uint32_t io_tag64_high;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t reserved1;
+ uint8_t reserved2;
+ uint8_t reserved3;
+ uint8_t ebde_count;
+#else /* __LITTLE_ENDIAN */
+ uint8_t ebde_count;
+ uint8_t reserved3;
+ uint8_t reserved2;
+ uint8_t reserved1;
+#endif
+ uint32_t reserved4;
+ struct ulp_bde64 rbde; /* response bde */
+ struct ulp_bde64 dbde[LPFC_EXT_DATA_BDE_COUNT]; /* data BDE or BPL */
+ uint8_t icd[32]; /* immediate command data (32 bytes) */
+};
+
typedef struct _IOCB { /* IOCB structure */
union {
GENERIC_RSP grsp; /* Generic response */
@@ -3190,7 +3320,7 @@ typedef struct _IOCB { /* IOCB structure */
/* words 8-31 used for que_xri_cx iocb */
struct que_xri64cx_ext_fields que_xri64cx_ext_words;
-
+ struct fcp_irw_ext fcp_ext;
uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
} unsli3;
@@ -3292,3 +3422,10 @@ lpfc_error_lost_link(IOCB_t *iocbp)
iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
}
+
+#define MENLO_TRANSPORT_TYPE 0xfe
+#define MENLO_CONTEXT 0
+#define MENLO_PU 3
+#define MENLO_TIMEOUT 30
+#define SETVAR_MLOMNT 0x103107
+#define SETVAR_MLORST 0x103007
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d51a2a4b43e..909be3301bb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -36,6 +36,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -52,17 +53,20 @@ static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
-/************************************************************************/
-/* */
-/* lpfc_config_port_prep */
-/* This routine will do LPFC initialization prior to the */
-/* CONFIG_PORT mailbox command. This will be initialized */
-/* as a SLI layer callback routine. */
-/* This routine returns 0 on success or -ERESTART if it wants */
-/* the SLI layer to reset the HBA and try again. Any */
-/* other return value indicates an error. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_config_port_prep: Perform lpfc initialization prior to config port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
+ * mailbox command. It retrieves the revision information from the HBA and
+ * collects the Vital Product Data (VPD) about the HBA for preparing the
+ * configuration of the HBA.
+ *
+ * Return codes:
+ * 0 - success.
+ * -ERESTART - requests the SLI layer to reset the HBA and try again.
+ * Any other value - indicates an error.
+ **/
int
lpfc_config_port_prep(struct lpfc_hba *phba)
{
@@ -180,12 +184,9 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
sizeof (phba->RandomData));
/* Get adapter VPD information */
- pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
- if (!pmb->context2)
- goto out_free_mbox;
lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
if (!lpfc_vpd_data)
- goto out_free_context2;
+ goto out_free_mbox;
do {
lpfc_dump_mem(phba, pmb, offset);
@@ -200,21 +201,29 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
}
if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
- lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+ lpfc_vpd_data + offset,
mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
kfree(lpfc_vpd_data);
-out_free_context2:
- kfree(pmb->context2);
out_free_mbox:
mempool_free(pmb, phba->mbox_mem_pool);
return 0;
}
-/* Completion handler for config async event mailbox command. */
+/**
+ * lpfc_config_async_cmpl: Completion handler for config async event mbox cmd.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for driver's configuring asynchronous event
+ * mailbox command to the device. If the mailbox command returns successfully,
+ * it will set internal async event support flag to 1; otherwise, it will
+ * set internal async event support flag to 0.
+ **/
static void
lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
{
@@ -226,16 +235,19 @@ lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
return;
}
-/************************************************************************/
-/* */
-/* lpfc_config_port_post */
-/* This routine will do LPFC initialization after the */
-/* CONFIG_PORT mailbox command. This will be initialized */
-/* as a SLI layer callback routine. */
-/* This routine returns 0 on success. Any other return value */
-/* indicates an error. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_config_port_post: Perform lpfc initialization after config port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization after the CONFIG_PORT mailbox
+ * command call. It performs all internal resource and state setups on the
+ * port: post IOCB buffers, enable appropriate host interrupt attentions,
+ * ELS ring timers, etc.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
int
lpfc_config_port_post(struct lpfc_hba *phba)
{
@@ -378,6 +390,29 @@ lpfc_config_port_post(struct lpfc_hba *phba)
if (phba->sli_rev != 3)
lpfc_post_rcv_buf(phba);
+ /*
+ * Configure HBA MSI-X attention conditions to messages if MSI-X mode
+ */
+ if (phba->intr_type == MSIX) {
+ rc = lpfc_config_msi(phba, pmb);
+ if (rc) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0352 Config MSI mailbox command "
+ "failed, mbxCmd x%x, mbxStatus x%x\n",
+ pmb->mb.mbxCommand, pmb->mb.mbxStatus);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ }
+
+ /* Initialize ERATT handling flag */
+ phba->hba_flag &= ~HBA_ERATT_HANDLED;
+
/* Enable appropriate host interrupts */
spin_lock_irq(&phba->hbalock);
status = readl(phba->HCregaddr);
@@ -393,26 +428,26 @@ lpfc_config_port_post(struct lpfc_hba *phba)
if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
(phba->cfg_poll & DISABLE_FCP_RING_INT))
- status &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
+ status &= ~(HC_R0INT_ENA);
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
spin_unlock_irq(&phba->hbalock);
- /*
- * Setup the ring 0 (els) timeout handler
- */
- timeout = phba->fc_ratov << 1;
+ /* Set up ring-0 (ELS) timer */
+ timeout = phba->fc_ratov * 2;
mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+ /* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
phba->hb_outstanding = 0;
phba->last_completion_time = jiffies;
+ /* Set up error attention (ERATT) polling timer */
+ mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- pmb->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
lpfc_set_loopback_flag(phba);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0454 Adapter failed to init, mbxCmd x%x "
@@ -447,19 +482,20 @@ lpfc_config_port_post(struct lpfc_hba *phba)
rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
- return (0);
+ return 0;
}
-/************************************************************************/
-/* */
-/* lpfc_hba_down_prep */
-/* This routine will do LPFC uninitialization before the */
-/* HBA is reset when bringing down the SLI Layer. This will be */
-/* initialized as a SLI layer callback routine. */
-/* This routine returns 0 on success. Any other return value */
-/* indicates an error. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_hba_down_prep: Perform lpfc uninitialization prior to HBA reset.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do LPFC uninitialization before the HBA is reset when
+ * bringing down the SLI Layer.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
int
lpfc_hba_down_prep(struct lpfc_hba *phba)
{
@@ -481,15 +517,17 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
return 0;
}
-/************************************************************************/
-/* */
-/* lpfc_hba_down_post */
-/* This routine will do uninitialization after the HBA is reset */
-/* when bringing down the SLI Layer. */
-/* This routine returns 0 on success. Any other return value */
-/* indicates an error. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_hba_down_post: Perform lpfc uninitialization after HBA reset.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ * 0 - sucess.
+ * Any other value - error.
+ **/
int
lpfc_hba_down_post(struct lpfc_hba *phba)
{
@@ -548,7 +586,18 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
return 0;
}
-/* HBA heart beat timeout handler */
+/**
+ * lpfc_hb_timeout: The HBA-timer timeout handler.
+ * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ *
+ * This is the HBA-timer timeout handler registered to the lpfc driver. When
+ * this timer fires, a HBA timeout event shall be posted to the lpfc driver
+ * work-port-events bitmap and the worker thread is notified. This timeout
+ * event will be used by the worker thread to invoke the actual timeout
+ * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
+ * be performed in the timeout handler and the HBA timeout event bit shall
+ * be cleared by the worker thread after it has taken the event bitmap out.
+ **/
static void
lpfc_hb_timeout(unsigned long ptr)
{
@@ -557,17 +606,36 @@ lpfc_hb_timeout(unsigned long ptr)
unsigned long iflag;
phba = (struct lpfc_hba *)ptr;
+
+ /* Check for heart beat timeout conditions */
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
if (!tmo_posted)
phba->pport->work_port_events |= WORKER_HB_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+ /* Tell the worker thread there is work to do */
if (!tmo_posted)
lpfc_worker_wake_up(phba);
return;
}
+/**
+ * lpfc_hb_mbox_cmpl: The lpfc heart-beat mailbox command callback function.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the callback function to the lpfc heart-beat mailbox command.
+ * If configured, the lpfc driver issues the heart-beat mailbox command to
+ * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
+ * heart-beat mailbox command is issued, the driver shall set up heart-beat
+ * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
+ * heart-beat outstanding state. Once the mailbox command comes back and
+ * no error conditions detected, the heart-beat mailbox command timer is
+ * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
+ * state is cleared for the next heart-beat. If the timer expired with the
+ * heart-beat outstanding state set, the driver will put the HBA offline.
+ **/
static void
lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
{
@@ -577,6 +645,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
phba->hb_outstanding = 0;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ /* Check and reset heart-beat timer is necessary */
mempool_free(pmboxq, phba->mbox_mem_pool);
if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
!(phba->link_state == LPFC_HBA_ERROR) &&
@@ -586,6 +655,22 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
return;
}
+/**
+ * lpfc_hb_timeout_handler: The HBA-timer timeout handler.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is the actual HBA-timer timeout handler to be invoked by the worker
+ * thread whenever the HBA timer fired and HBA-timeout event posted. This
+ * handler performs any periodic operations needed for the device. If such
+ * periodic event has already been attended to either in the interrupt handler
+ * or by processing slow-ring or fast-ring events within the HBA-timer
+ * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
+ * the timer for the next timeout period. If lpfc heart-beat mailbox command
+ * is configured and there is no heart-beat mailbox command outstanding, a
+ * heart-beat mailbox is issued and timer set properly. Otherwise, if there
+ * has been a heart-beat mailbox command outstanding, the HBA shall be put
+ * to offline.
+ **/
void
lpfc_hb_timeout_handler(struct lpfc_hba *phba)
{
@@ -684,6 +769,13 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
}
}
+/**
+ * lpfc_offline_eratt: Bring lpfc offline on hardware error attention.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring the HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
static void
lpfc_offline_eratt(struct lpfc_hba *phba)
{
@@ -704,14 +796,16 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
return;
}
-/************************************************************************/
-/* */
-/* lpfc_handle_eratt */
-/* This routine will handle processing a Host Attention */
-/* Error Status event. This will be initialized */
-/* as a SLI layer callback routine. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_handle_eratt: The HBA hardware error handler.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the following HBA hardware error
+ * conditions:
+ * 1 - HBA error attention interrupt
+ * 2 - DMA ring index out of range
+ * 3 - Mailbox command came back as unknown
+ **/
void
lpfc_handle_eratt(struct lpfc_hba *phba)
{
@@ -722,6 +816,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
unsigned long temperature;
struct temp_event temp_event_data;
struct Scsi_Host *shost;
+ struct lpfc_board_event_header board_event;
/* If the pci channel is offline, ignore possible errors,
* since we cannot communicate with the pci card anyway. */
@@ -731,6 +826,16 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
if (!phba->cfg_enable_hba_reset)
return;
+ /* Send an internal error event to mgmt application */
+ board_event.event_type = FC_REG_BOARD_EVENT;
+ board_event.subcategory = LPFC_EVENT_PORTINTERR;
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(board_event),
+ (char *) &board_event,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+
if (phba->work_hs & HS_FFER6) {
/* Re-establishing Link */
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -771,7 +876,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
temp_event_data.data = (uint32_t)temperature;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0459 Adapter maximum temperature exceeded "
+ "0406 Adapter maximum temperature exceeded "
"(%ld), taking this port offline "
"Data: x%x x%x x%x\n",
temperature, phba->work_hs,
@@ -791,8 +896,8 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
} else {
/* The if clause above forces this code path when the status
- * failure is a value other than FFER6. Do not call the offline
- * twice. This is the adapter hardware error path.
+ * failure is a value other than FFER6. Do not call the offline
+ * twice. This is the adapter hardware error path.
*/
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0457 Adapter Hardware Error "
@@ -808,16 +913,16 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
lpfc_offline_eratt(phba);
}
+ return;
}
-/************************************************************************/
-/* */
-/* lpfc_handle_latt */
-/* This routine will handle processing a Host Attention */
-/* Link Status event. This will be initialized */
-/* as a SLI layer callback routine. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_handle_latt: The HBA link event handler.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked from the worker thread to handle a HBA host
+ * attention link event.
+ **/
void
lpfc_handle_latt(struct lpfc_hba *phba)
{
@@ -898,12 +1003,20 @@ lpfc_handle_latt_err_exit:
return;
}
-/************************************************************************/
-/* */
-/* lpfc_parse_vpd */
-/* This routine will parse the VPD data */
-/* */
-/************************************************************************/
+/**
+ * lpfc_parse_vpd: Parse VPD (Vital Product Data).
+ * @phba: pointer to lpfc hba data structure.
+ * @vpd: pointer to the vital product data.
+ * @len: length of the vital product data in bytes.
+ *
+ * This routine parses the Vital Product Data (VPD). The VPD is treated as
+ * an array of characters. In this routine, the ModelName, ProgramType, and
+ * ModelDesc, etc. fields of the phba data structure will be populated.
+ *
+ * Return codes
+ * 0 - pointer to the VPD passed in is NULL
+ * 1 - success
+ **/
static int
lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
@@ -1040,12 +1153,25 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
return(1);
}
+/**
+ * lpfc_get_hba_model_desc: Retrieve HBA device model name and description.
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
static void
lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
{
lpfc_vpd_t *vp;
uint16_t dev_id = phba->pcidev->device;
int max_speed;
+ int GE = 0;
struct {
char * name;
int max_speed;
@@ -1177,6 +1303,19 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_SAT_S:
m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
break;
+ case PCI_DEVICE_ID_HORNET:
+ m = (typeof(m)){"LP21000", max_speed, "PCIe"};
+ GE = 1;
+ break;
+ case PCI_DEVICE_ID_PROTEUS_VF:
+ m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+ break;
+ case PCI_DEVICE_ID_PROTEUS_PF:
+ m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+ break;
+ case PCI_DEVICE_ID_PROTEUS_S:
+ m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
+ break;
default:
m = (typeof(m)){ NULL };
break;
@@ -1186,18 +1325,25 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
snprintf(mdp, 79,"%s", m.name);
if (descp && descp[0] == '\0')
snprintf(descp, 255,
- "Emulex %s %dGb %s Fibre Channel Adapter",
- m.name, m.max_speed, m.bus);
+ "Emulex %s %d%s %s %s",
+ m.name, m.max_speed,
+ (GE) ? "GE" : "Gb",
+ m.bus,
+ (GE) ? "FCoE Adapter" : "Fibre Channel Adapter");
}
-/**************************************************/
-/* lpfc_post_buffer */
-/* */
-/* This routine will post count buffers to the */
-/* ring with the QUE_RING_BUF_CN command. This */
-/* allows 3 buffers / command to be posted. */
-/* Returns the number of buffers NOT posted. */
-/**************************************************/
+/**
+ * lpfc_post_buffer: Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring.
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a IOCB ring.
+ * @cnt: the number of IOCBs to be posted to the IOCB ring.
+ *
+ * This routine posts a given number of IOCBs with the associated DMA buffer
+ * descriptors specified by the cnt argument to the given IOCB ring.
+ *
+ * Return codes
+ * The number of IOCBs NOT able to be posted to the IOCB ring.
+ **/
int
lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
{
@@ -1287,12 +1433,17 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
return 0;
}
-/************************************************************************/
-/* */
-/* lpfc_post_rcv_buf */
-/* This routine post initial rcv buffers to the configured rings */
-/* */
-/************************************************************************/
+/**
+ * lpfc_post_rcv_buf: Post the initial receive IOCB buffers to ELS ring.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine posts initial receive IOCB buffers to the ELS ring. The
+ * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
+ * set to 64 IOCBs.
+ *
+ * Return codes
+ * 0 - success (currently always success)
+ **/
static int
lpfc_post_rcv_buf(struct lpfc_hba *phba)
{
@@ -1307,11 +1458,13 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
-/************************************************************************/
-/* */
-/* lpfc_sha_init */
-/* */
-/************************************************************************/
+/**
+ * lpfc_sha_init: Set up initial array of hash table entries.
+ * @HashResultPointer: pointer to an array as hash table.
+ *
+ * This routine sets up the initial values to the array of hash table entries
+ * for the LC HBAs.
+ **/
static void
lpfc_sha_init(uint32_t * HashResultPointer)
{
@@ -1322,11 +1475,16 @@ lpfc_sha_init(uint32_t * HashResultPointer)
HashResultPointer[4] = 0xC3D2E1F0;
}
-/************************************************************************/
-/* */
-/* lpfc_sha_iterate */
-/* */
-/************************************************************************/
+/**
+ * lpfc_sha_iterate: Iterate initial hash table with the working hash table.
+ * @HashResultPointer: pointer to an initial/result hash table.
+ * @HashWorkingPointer: pointer to an working hash table.
+ *
+ * This routine iterates an initial hash table pointed by @HashResultPointer
+ * with the values from the working hash table pointeed by @HashWorkingPointer.
+ * The results are putting back to the initial hash table, returned through
+ * the @HashResultPointer as the result hash table.
+ **/
static void
lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
{
@@ -1374,22 +1532,29 @@ lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
}
-/************************************************************************/
-/* */
-/* lpfc_challenge_key */
-/* */
-/************************************************************************/
+/**
+ * lpfc_challenge_key: Create challenge key based on WWPN of the HBA.
+ * @RandomChallenge: pointer to the entry of host challenge random number array.
+ * @HashWorking: pointer to the entry of the working hash array.
+ *
+ * This routine calculates the working hash array referred by @HashWorking
+ * from the challenge random numbers associated with the host, referred by
+ * @RandomChallenge. The result is put into the entry of the working hash
+ * array and returned by reference through @HashWorking.
+ **/
static void
lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
{
*HashWorking = (*RandomChallenge ^ *HashWorking);
}
-/************************************************************************/
-/* */
-/* lpfc_hba_init */
-/* */
-/************************************************************************/
+/**
+ * lpfc_hba_init: Perform special handling for LC HBA initialization.
+ * @phba: pointer to lpfc hba data structure.
+ * @hbainit: pointer to an array of unsigned 32-bit integers.
+ *
+ * This routine performs the special handling for LC HBA initialization.
+ **/
void
lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
{
@@ -1412,6 +1577,15 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
kfree(HashWorking);
}
+/**
+ * lpfc_cleanup: Performs vport cleanups before deleting a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine performs the necessary cleanups before deleting the @vport.
+ * It invokes the discovery state machine to perform necessary state
+ * transitions and to release the ndlps associated with the @vport. Note,
+ * the physical port is treated as @vport 0.
+ **/
void
lpfc_cleanup(struct lpfc_vport *vport)
{
@@ -1459,14 +1633,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
- /* nlp_type zero is not defined, nlp_flag zero also not defined,
- * nlp_state is unused, this happens when
- * an initiator has logged
- * into us so cleanup this ndlp.
- */
- if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) &&
- (ndlp->nlp_state == 0))
- lpfc_nlp_put(ndlp);
}
/* At this point, ALL ndlp's should be gone
@@ -1482,7 +1648,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
LOG_NODE,
- "0282: did:x%x ndlp:x%p "
+ "0282 did:x%x ndlp:x%p "
"usgmap:x%x refcnt:%d\n",
ndlp->nlp_DID, (void *)ndlp,
ndlp->nlp_usg_map,
@@ -1498,6 +1664,14 @@ lpfc_cleanup(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_stop_vport_timers: Stop all the timers associated with a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine stops all the timers associated with a @vport. This function
+ * is invoked before disabling or deleting a @vport. Note that the physical
+ * port is treated as @vport 0.
+ **/
void
lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
@@ -1507,6 +1681,13 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_stop_phba_timers: Stop all the timers associated with an HBA.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops all the timers associated with a HBA. This function is
+ * invoked before either putting a HBA offline or unloading the driver.
+ **/
static void
lpfc_stop_phba_timers(struct lpfc_hba *phba)
{
@@ -1516,9 +1697,20 @@ lpfc_stop_phba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->fabric_block_timer);
phba->hb_outstanding = 0;
del_timer_sync(&phba->hb_tmofunc);
+ del_timer_sync(&phba->eratt_poll);
return;
}
+/**
+ * lpfc_block_mgmt_io: Mark a HBA's management interface as blocked.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as blocked. Once the HBA's
+ * management interface is marked as blocked, all the user space access to
+ * the HBA, whether they are from sysfs interface or libdfc interface will
+ * all be blocked. The HBA is set to block the management interface when the
+ * driver prepares the HBA interface for online or offline.
+ **/
static void
lpfc_block_mgmt_io(struct lpfc_hba * phba)
{
@@ -1529,6 +1721,18 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
+/**
+ * lpfc_online: Initialize and bring a HBA online.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine initializes the HBA and brings a HBA online. During this
+ * process, the management interface is blocked to prevent user space access
+ * to the HBA interfering with the driver initialization.
+ *
+ * Return codes
+ * 0 - successful
+ * 1 - failed
+ **/
int
lpfc_online(struct lpfc_hba *phba)
{
@@ -1574,6 +1778,17 @@ lpfc_online(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_unblock_mgmt_io: Mark a HBA's management interface to be not blocked.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as not blocked. Once the
+ * HBA's management interface is marked as not blocked, all the user space
+ * access to the HBA, whether they are from sysfs interface or libdfc
+ * interface will be allowed. The HBA is set to block the management interface
+ * when the driver prepares the HBA interface for online or offline and then
+ * set to unblock the management interface afterwards.
+ **/
void
lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
{
@@ -1584,6 +1799,14 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
+/**
+ * lpfc_offline_prep: Prepare a HBA to be brought offline.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to prepare a HBA to be brought offline. It performs
+ * unregistration login to all the nodes on all vports and flushes the mailbox
+ * queue to make it ready to be brought offline.
+ **/
void
lpfc_offline_prep(struct lpfc_hba * phba)
{
@@ -1633,6 +1856,14 @@ lpfc_offline_prep(struct lpfc_hba * phba)
lpfc_sli_flush_mbox_queue(phba);
}
+/**
+ * lpfc_offline: Bring a HBA offline.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine actually brings a HBA offline. It stops all the timers
+ * associated with the HBA, brings down the SLI layer, and eventually
+ * marks the HBA as in offline state for the upper layer protocol.
+ **/
void
lpfc_offline(struct lpfc_hba *phba)
{
@@ -1670,12 +1901,17 @@ lpfc_offline(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
}
-/******************************************************************************
-* Function name: lpfc_scsi_free
-*
-* Description: Called from lpfc_pci_remove_one free internal driver resources
-*
-******************************************************************************/
+/**
+ * lpfc_scsi_free: Free all the SCSI buffers and IOCBs from driver lists.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the SCSI buffers and IOCBs from the driver
+ * list back to kernel. It is called from lpfc_pci_remove_one to free
+ * the internal resources before the device is removed from the system.
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
static int
lpfc_scsi_free(struct lpfc_hba *phba)
{
@@ -1704,6 +1940,22 @@ lpfc_scsi_free(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_create_port: Create an FC port.
+ * @phba: pointer to lpfc hba data structure.
+ * @instance: a unique integer ID to this FC port.
+ * @dev: pointer to the device data structure.
+ *
+ * This routine creates a FC port for the upper layer protocol. The FC port
+ * can be created on top of either a physical port or a virtual port provided
+ * by the HBA. This routine also allocates a SCSI host data structure (shost)
+ * and associates the FC port created before adding the shost into the SCSI
+ * layer.
+ *
+ * Return codes
+ * @vport - pointer to the virtual N_Port data structure.
+ * NULL - port create failed.
+ **/
struct lpfc_vport *
lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
{
@@ -1777,6 +2029,13 @@ out:
return NULL;
}
+/**
+ * destroy_port: Destroy an FC port.
+ * @vport: pointer to an lpfc virtual N_Port data structure.
+ *
+ * This routine destroys a FC port from the upper layer protocol. All the
+ * resources associated with the port are released.
+ **/
void
destroy_port(struct lpfc_vport *vport)
{
@@ -1797,6 +2056,16 @@ destroy_port(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_get_instance: Get a unique integer ID.
+ *
+ * This routine allocates a unique integer ID from lpfc_hba_index pool. It
+ * uses the kernel idr facility to perform the task.
+ *
+ * Return codes:
+ * instance - a unique integer ID allocated as the new instance.
+ * -1 - lpfc get instance failed.
+ **/
int
lpfc_get_instance(void)
{
@@ -1810,11 +2079,21 @@ lpfc_get_instance(void)
return instance;
}
-/*
- * Note: there is no scan_start function as adapter initialization
- * will have asynchronously kicked off the link initialization.
- */
-
+/**
+ * lpfc_scan_finished: method for SCSI layer to detect whether scan is done.
+ * @shost: pointer to SCSI host data structure.
+ * @time: elapsed time of the scan in jiffies.
+ *
+ * This routine is called by the SCSI layer with a SCSI host to determine
+ * whether the scan host is finished.
+ *
+ * Note: there is no scan_start function as adapter initialization will have
+ * asynchronously kicked off the link initialization.
+ *
+ * Return codes
+ * 0 - SCSI host scan is not over yet.
+ * 1 - SCSI host scan is over.
+ **/
int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -1858,6 +2137,13 @@ finished:
return stat;
}
+/**
+ * lpfc_host_attrib_init: Initialize SCSI host attributes on a FC port.
+ * @shost: pointer to SCSI host data structure.
+ *
+ * This routine initializes a given SCSI host attributes on a FC port. The
+ * SCSI host can be either on top of a physical port or a virtual port.
+ **/
void lpfc_host_attrib_init(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -1906,42 +2192,157 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_enable_msix: Enable MSI-X interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors. The kernel
+ * function pci_enable_msix() is called to enable the MSI-X vectors. Note that
+ * pci_enable_msix(), once invoked, enables either all or nothing, depending
+ * on the current availability of PCI vector resources. The device driver is
+ * responsible for calling the individual request_irq() to register each MSI-X
+ * vector with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
static int
lpfc_enable_msix(struct lpfc_hba *phba)
{
- int error;
+ int rc, i;
+ LPFC_MBOXQ_t *pmb;
- phba->msix_entries[0].entry = 0;
- phba->msix_entries[0].vector = 0;
+ /* Set up MSI-X multi-message vectors */
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ phba->msix_entries[i].entry = i;
- error = pci_enable_msix(phba->pcidev, phba->msix_entries,
+ /* Configure MSI-X capability structure */
+ rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
ARRAY_SIZE(phba->msix_entries));
- if (error) {
+ if (rc) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0420 Enable MSI-X failed (%d), continuing "
- "with MSI\n", error);
- pci_disable_msix(phba->pcidev);
- return error;
+ "with MSI\n", rc);
+ goto msi_fail_out;
+ } else
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0477 MSI-X entry[%d]: vector=x%x "
+ "message=%d\n", i,
+ phba->msix_entries[i].vector,
+ phba->msix_entries[i].entry);
+ /*
+ * Assign MSI-X vectors to interrupt handlers
+ */
+
+ /* vector-0 is associated to slow-path handler */
+ rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
+ IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0421 MSI-X slow-path request_irq failed "
+ "(%d), continuing with MSI\n", rc);
+ goto msi_fail_out;
}
- error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0,
- LPFC_DRIVER_NAME, phba);
- if (error) {
+ /* vector-1 is associated to fast-path handler */
+ rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler,
+ IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
+
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0421 MSI-X request_irq failed (%d), "
- "continuing with MSI\n", error);
- pci_disable_msix(phba->pcidev);
+ "0429 MSI-X fast-path request_irq failed "
+ "(%d), continuing with MSI\n", rc);
+ goto irq_fail_out;
}
- return error;
+
+ /*
+ * Configure HBA MSI-X attention conditions to messages
+ */
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+ if (!pmb) {
+ rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0474 Unable to allocate memory for issuing "
+ "MBOX_CONFIG_MSI command\n");
+ goto mem_fail_out;
+ }
+ rc = lpfc_config_msi(phba, pmb);
+ if (rc)
+ goto mbx_fail_out;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0351 Config MSI mailbox command failed, "
+ "mbxCmd x%x, mbxStatus x%x\n",
+ pmb->mb.mbxCommand, pmb->mb.mbxStatus);
+ goto mbx_fail_out;
+ }
+
+ /* Free memory allocated for mailbox command */
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return rc;
+
+mbx_fail_out:
+ /* Free memory allocated for mailbox command */
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+mem_fail_out:
+ /* free the irq already requested */
+ free_irq(phba->msix_entries[1].vector, phba);
+
+irq_fail_out:
+ /* free the irq already requested */
+ free_irq(phba->msix_entries[0].vector, phba);
+
+msi_fail_out:
+ /* Unconfigure MSI-X capability structure */
+ pci_disable_msix(phba->pcidev);
+ return rc;
}
+/**
+ * lpfc_disable_msix: Disable MSI-X interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode.
+ **/
static void
lpfc_disable_msix(struct lpfc_hba *phba)
{
- free_irq(phba->msix_entries[0].vector, phba);
+ int i;
+
+ /* Free up MSI-X multi-message vectors */
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ free_irq(phba->msix_entries[i].vector, phba);
+ /* Disable MSI-X */
pci_disable_msix(phba->pcidev);
}
+/**
+ * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
+ * PCI device-specific information of the device and driver to see if the
+ * driver state that it can support this kind of device. If the match is
+ * successful, the driver core invokes this routine. If this routine
+ * determines it can claim the HBA, it does all the initialization that it
+ * needs to do to handle the HBA properly.
+ *
+ * Return code
+ * 0 - driver can claim the device
+ * negative value - driver can not claim the device
+ **/
static int __devinit
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
{
@@ -1956,6 +2357,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
int i, hbq_count;
uint16_t iotag;
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ struct lpfc_adapter_event_header adapter_event;
if (pci_enable_device_mem(pdev))
goto out;
@@ -1966,6 +2368,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (!phba)
goto out_release_regions;
+ atomic_set(&phba->fast_event_count, 0);
spin_lock_init(&phba->hbalock);
/* Initialize ndlp management spinlock */
@@ -1978,6 +2381,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_phba;
INIT_LIST_HEAD(&phba->port_list);
+ init_waitqueue_head(&phba->wait_4_mlo_m_q);
/*
* Get all the module params for configuring this host and then
* establish the host.
@@ -2000,6 +2404,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
init_timer(&phba->fabric_block_timer);
phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
phba->fabric_block_timer.data = (unsigned long) phba;
+ init_timer(&phba->eratt_poll);
+ phba->eratt_poll.function = lpfc_poll_eratt;
+ phba->eratt_poll.data = (unsigned long) phba;
pci_set_master(pdev);
pci_try_set_mwi(pdev);
@@ -2019,7 +2426,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
bar2map_len = pci_resource_len(phba->pcidev, 2);
/* Map HBA SLIM to a kernel virtual address. */
- phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
+ phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
if (!phba->slim_memmap_p) {
error = -ENODEV;
dev_printk(KERN_ERR, &pdev->dev,
@@ -2037,12 +2444,18 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
}
/* Allocate memory for SLI-2 structures */
- phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
- &phba->slim2p_mapping, GFP_KERNEL);
- if (!phba->slim2p)
+ phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI2_SLIM_SIZE,
+ &phba->slim2p.phys,
+ GFP_KERNEL);
+ if (!phba->slim2p.virt)
goto out_iounmap;
- memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
+ memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
+ phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+ phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
+ phba->IOCBs = (phba->slim2p.virt +
+ offsetof(struct lpfc_sli2_slim, IOCBs));
phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
lpfc_sli_hbq_size(),
@@ -2111,7 +2524,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->fc_arbtov = FF_DEF_ARBTOV;
INIT_LIST_HEAD(&phba->work_list);
- phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
+ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
/* Initialize the wait queue head for the kernel thread */
@@ -2146,21 +2559,42 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
pci_set_drvdata(pdev, shost);
phba->intr_type = NONE;
+ phba->MBslimaddr = phba->slim_memmap_p;
+ phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+ phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+ phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+ phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+ /* Configure and enable interrupt */
if (phba->cfg_use_msi == 2) {
- error = lpfc_enable_msix(phba);
- if (!error)
- phba->intr_type = MSIX;
+ /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+ error = lpfc_sli_config_port(phba, 3);
+ if (error)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0427 Firmware not capable of SLI 3 mode.\n");
+ else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0426 Firmware capable of SLI 3 mode.\n");
+ /* Now, try to enable MSI-X interrupt mode */
+ error = lpfc_enable_msix(phba);
+ if (!error) {
+ phba->intr_type = MSIX;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0430 enable MSI-X mode.\n");
+ }
+ }
}
/* Fallback to MSI if MSI-X initialization failed */
if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
retval = pci_enable_msi(phba->pcidev);
- if (!retval)
+ if (!retval) {
phba->intr_type = MSI;
- else
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0452 Enable MSI failed, continuing "
- "with IRQ\n");
+ "0473 enable MSI mode.\n");
+ } else
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0452 enable IRQ mode.\n");
}
/* MSI-X is the only case the doesn't need to call request_irq */
@@ -2176,18 +2610,16 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->intr_type = INTx;
}
- phba->MBslimaddr = phba->slim_memmap_p;
- phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
- phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
- phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
- phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
-
if (lpfc_alloc_sysfs_attr(vport)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1476 Failed to allocate sysfs attr\n");
error = -ENOMEM;
goto out_free_irq;
}
if (lpfc_sli_hba_setup(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1477 Failed to set up hba\n");
error = -ENODEV;
goto out_remove_device;
}
@@ -2206,6 +2638,16 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
spin_unlock_irq(shost->host_lock);
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0428 Perform SCSI scan\n");
+ /* Send board arrival event to upper layer */
+ adapter_event.event_type = FC_REG_ADAPTER_EVENT;
+ adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(adapter_event),
+ (char *) &adapter_event,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
scsi_scan_host(shost);
return 0;
@@ -2238,11 +2680,11 @@ out_free_iocbq:
}
lpfc_mem_free(phba);
out_free_hbqslimp:
- dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
- phba->hbqslimp.phys);
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+ phba->hbqslimp.virt, phba->hbqslimp.phys);
out_free_slim:
- dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
- phba->slim2p_mapping);
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p.virt, phba->slim2p.phys);
out_iounmap:
iounmap(phba->ctrl_regs_memmap_p);
out_iounmap_slim:
@@ -2262,6 +2704,14 @@ out:
return error;
}
+/**
+ * lpfc_pci_remove_one: lpfc PCI func to unregister device from PCI subsystem.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is removed from PCI bus. It perform all the necessary cleanup
+ * for the HBA device to be removed from the PCI subsystem properly.
+ **/
static void __devexit
lpfc_pci_remove_one(struct pci_dev *pdev)
{
@@ -2316,12 +2766,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
lpfc_scsi_free(phba);
lpfc_mem_free(phba);
- dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
- phba->hbqslimp.phys);
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+ phba->hbqslimp.virt, phba->hbqslimp.phys);
/* Free resources associated with SLI2 interface */
dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
- phba->slim2p, phba->slim2p_mapping);
+ phba->slim2p.virt, phba->slim2p.phys);
/* unmap adapter SLIM and Control Registers */
iounmap(phba->ctrl_regs_memmap_p);
@@ -2336,13 +2786,21 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
}
/**
- * lpfc_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci conneection state
+ * lpfc_io_error_detected: Driver method for handling PCI I/O error detected.
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
*
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this function is invoked, it will
+ * need to stop all the I/Os and interrupt(s) to the device. Once that is
+ * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to
+ * perform proper recovery as desired.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -2351,8 +2809,15 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0472 PCI channel I/O permanent failure\n");
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+ /* Clean up all driver's outstanding SCSI I/Os */
+ lpfc_sli_flush_fcp_rings(phba);
return PCI_ERS_RESULT_DISCONNECT;
+ }
pci_disable_device(pdev);
/*
@@ -2376,10 +2841,21 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
}
/**
- * lpfc_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
+ * lpfc_io_slot_reset: Restart a PCI device from scratch.
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This is
+ * called after PCI bus has been reset to restart the PCI card from scratch,
+ * as if from a cold-boot. During the PCI subsystem error recovery, after the
+ * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform
+ * proper error recovery and then call this routine before calling the .resume
+ * method to recover the device. This function will initialize the HBA device,
+ * enable the interrupt, but it will just put the HBA to offline state without
+ * passing any I/O traffic.
*
- * Restart the card from scratch, as if from a cold-boot.
+ * Return codes
+ * PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
*/
static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
{
@@ -2404,20 +2880,34 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
/* Enable configured interrupt method */
phba->intr_type = NONE;
if (phba->cfg_use_msi == 2) {
- error = lpfc_enable_msix(phba);
- if (!error)
- phba->intr_type = MSIX;
+ /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+ error = lpfc_sli_config_port(phba, 3);
+ if (error)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0478 Firmware not capable of SLI 3 mode.\n");
+ else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0479 Firmware capable of SLI 3 mode.\n");
+ /* Now, try to enable MSI-X interrupt mode */
+ error = lpfc_enable_msix(phba);
+ if (!error) {
+ phba->intr_type = MSIX;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0480 enable MSI-X mode.\n");
+ }
+ }
}
/* Fallback to MSI if MSI-X initialization failed */
if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
retval = pci_enable_msi(phba->pcidev);
- if (!retval)
+ if (!retval) {
phba->intr_type = MSI;
- else
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0470 Enable MSI failed, continuing "
- "with IRQ\n");
+ "0481 enable MSI mode.\n");
+ } else
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0470 enable IRQ mode.\n");
}
/* MSI-X is the only case the doesn't need to call request_irq */
@@ -2440,11 +2930,13 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
}
/**
- * lpfc_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
+ * lpfc_io_resume: Resume PCI I/O operation.
+ * @pdev: pointer to PCI device
*
- * This callback is called when the error recovery driver tells us that
- * its OK to resume normal operation.
+ * This routine is registered to the PCI subsystem for error handling. It is
+ * called when kernel error recovery tells the lpfc driver that it is ok to
+ * resume normal PCI operation after PCI bus error recovery. After this call,
+ * traffic can start to flow from this device again.
*/
static void lpfc_io_resume(struct pci_dev *pdev)
{
@@ -2491,6 +2983,8 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
+ PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
@@ -2521,6 +3015,12 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
@@ -2540,6 +3040,18 @@ static struct pci_driver lpfc_driver = {
.err_handler = &lpfc_err_handler,
};
+/**
+ * lpfc_init: lpfc module initialization routine.
+ *
+ * This routine is to be invoked when the lpfc module is loaded into the
+ * kernel. The special kernel macro module_init() is used to indicate the
+ * role of this routine to the kernel as lpfc module entry point.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - FC attach transport failed
+ * all others - failed
+ */
static int __init
lpfc_init(void)
{
@@ -2567,12 +3079,20 @@ lpfc_init(void)
error = pci_register_driver(&lpfc_driver);
if (error) {
fc_release_transport(lpfc_transport_template);
- fc_release_transport(lpfc_vport_transport_template);
+ if (lpfc_enable_npiv)
+ fc_release_transport(lpfc_vport_transport_template);
}
return error;
}
+/**
+ * lpfc_exit: lpfc module removal routine.
+ *
+ * This routine is invoked when the lpfc module is removed from the kernel.
+ * The special kernel macro module_exit() is used to indicate the role of
+ * this routine to the kernel as lpfc module exit point.
+ */
static void __exit
lpfc_exit(void)
{
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 7a9be4c5b7c..7465fe746fe 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -37,10 +38,20 @@
#include "lpfc_crtn.h"
#include "lpfc_compat.h"
-/**********************************************/
-
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_dump_mem: Prepare a mailbox command for retrieving HBA's VPD memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset for dumping VPD memory mailbox command.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping HBA Vital Product
+ * Data (VPD) memory. This mailbox command is to be used for retrieving a
+ * portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address
+ * offset specified by the offset parameter.
+ **/
void
lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
{
@@ -65,10 +76,17 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
return;
}
-/**********************************************/
-/* lpfc_read_nv Issue a READ NVPARAM */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read NVRAM mailbox command returns the HBA's non-volatile parameters
+ * that are used as defaults when the Fibre Channel link is brought on-line.
+ *
+ * This routine prepares the mailbox command for reading information stored
+ * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN.
+ **/
void
lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -81,10 +99,19 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/**********************************************/
-/* lpfc_config_async Issue a */
-/* MBX_ASYNC_EVT_ENABLE mailbox command */
-/**********************************************/
+/**
+ * lpfc_config_async: Prepare a mailbox command for enabling HBA async event.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @ring: ring number for the asynchronous event to be configured.
+ *
+ * The asynchronous event enable mailbox command is used to enable the
+ * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and
+ * specifies the default ring to which events are posted.
+ *
+ * This routine prepares the mailbox command for enabling HBA asynchronous
+ * event support on a IOCB ring.
+ **/
void
lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
uint32_t ring)
@@ -99,10 +126,19 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
return;
}
-/**********************************************/
-/* lpfc_heart_beat Issue a HEART_BEAT */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_heart_beat: Prepare a mailbox command for heart beat.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The heart beat mailbox command is used to detect an unresponsive HBA, which
+ * is defined as any device where no error attention is sent and both mailbox
+ * and rings are not processed.
+ *
+ * This routine prepares the mailbox command for issuing a heart beat in the
+ * form of mailbox command to the HBA. The timely completion of the heart
+ * beat mailbox command indicates the health of the HBA.
+ **/
void
lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -115,10 +151,26 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/**********************************************/
-/* lpfc_read_la Issue a READ LA */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_read_la: Prepare a mailbox command for reading HBA link attention.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @mp: DMA buffer memory for reading the link attention information into.
+ *
+ * The read link attention mailbox command is issued to read the Link Event
+ * Attention information indicated by the HBA port when the Link Event bit
+ * of the Host Attention (HSTATT) register is set to 1. A Link Event
+ * Attention occurs based on an exception detected at the Fibre Channel link
+ * interface.
+ *
+ * This routine prepares the mailbox command for reading HBA link attention
+ * information. A DMA memory has been set aside and address passed to the
+ * HBA through @mp for the HBA to DMA link attention information into the
+ * memory as part of the execution of the mailbox command.
+ *
+ * Return codes
+ * 0 - Success (currently always return 0)
+ **/
int
lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
{
@@ -143,10 +195,21 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
return (0);
}
-/**********************************************/
-/* lpfc_clear_la Issue a CLEAR LA */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_clear_la: Prepare a mailbox command for clearing HBA link attention.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The clear link attention mailbox command is issued to clear the link event
+ * attention condition indicated by the Link Event bit of the Host Attention
+ * (HSTATT) register. The link event attention condition is cleared only if
+ * the event tag specified matches that of the current link event counter.
+ * The current event tag is read using the read link attention event mailbox
+ * command.
+ *
+ * This routine prepares the mailbox command for clearing HBA link attention
+ * information.
+ **/
void
lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -161,10 +224,20 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/**************************************************/
-/* lpfc_config_link Issue a CONFIG LINK */
-/* mailbox command */
-/**************************************************/
+/**
+ * lpfc_config_link: Prepare a mailbox command for configuring link on a HBA.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure link mailbox command is used before the initialize link
+ * mailbox command to override default value and to configure link-oriented
+ * parameters such as DID address and various timers. Typically, this
+ * command would be used after an F_Port login to set the returned DID address
+ * and the fabric timeout values. This command is not valid before a configure
+ * port command has configured the HBA port.
+ *
+ * This routine prepares the mailbox command for configuring link on a HBA.
+ **/
void
lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -199,10 +272,98 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/**********************************************/
-/* lpfc_init_link Issue an INIT LINK */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_config_msi: Prepare a mailbox command for configuring msi-x.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure MSI-X mailbox command is used to configure the HBA's SLI-3
+ * MSI-X multi-message interrupt vector association to interrupt attention
+ * conditions.
+ *
+ * Return codes
+ * 0 - Success
+ * -EINVAL - Failure
+ **/
+int
+lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ uint32_t attentionConditions[2];
+
+ /* Sanity check */
+ if (phba->cfg_use_msi != 2) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0475 Not configured for supporting MSI-X "
+ "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
+ return -EINVAL;
+ }
+
+ if (phba->sli_rev < 3) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0476 HBA not supporting SLI-3 or later "
+ "SLI Revision: 0x%x\n", phba->sli_rev);
+ return -EINVAL;
+ }
+
+ /* Clear mailbox command fields */
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+
+ /*
+ * SLI-3, Message Signaled Interrupt Fearure.
+ */
+
+ /* Multi-message attention configuration */
+ attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
+ HA_LATT | HA_MBATT);
+ attentionConditions[1] = 0;
+
+ mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
+ mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
+
+ /*
+ * Set up message number to HA bit association
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* RA0 (FCP Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
+ /* RA1 (Other Protocol Extra Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ /* RA0 (FCP Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
+ /* RA1 (Other Protocol Extra Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
+#endif
+ /* Multi-message interrupt autoclear configuration*/
+ mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
+ mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
+
+ /* For now, HBA autoclear does not work reliably, disable it */
+ mb->un.varCfgMSI.autoClearHA[0] = 0;
+ mb->un.varCfgMSI.autoClearHA[1] = 0;
+
+ /* Set command and owner bit */
+ mb->mbxCommand = MBX_CONFIG_MSI;
+ mb->mbxOwner = OWN_HOST;
+
+ return 0;
+}
+
+/**
+ * lpfc_init_link: Prepare a mailbox command for initialize link on a HBA.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @topology: the link topology for the link to be initialized to.
+ * @linkspeed: the link speed for the link to be initialized to.
+ *
+ * The initialize link mailbox command is used to initialize the Fibre
+ * Channel link. This command must follow a configure port command that
+ * establishes the mode of operation.
+ *
+ * This routine prepares the mailbox command for initializing link on a HBA
+ * with the specified link topology and speed.
+ **/
void
lpfc_init_link(struct lpfc_hba * phba,
LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
@@ -269,10 +430,27 @@ lpfc_init_link(struct lpfc_hba * phba,
return;
}
-/**********************************************/
-/* lpfc_read_sparam Issue a READ SPARAM */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_read_sparam: Prepare a mailbox command for reading HBA parameters.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @vpi: virtual N_Port identifier.
+ *
+ * The read service parameter mailbox command is used to read the HBA port
+ * service parameters. The service parameters are read into the buffer
+ * specified directly by a BDE in the mailbox command. These service
+ * parameters may then be used to build the payload of an N_Port/F_POrt
+ * login request and reply (LOGI/ACC).
+ *
+ * This routine prepares the mailbox command for reading HBA port service
+ * parameters. The DMA memory is allocated in this function and the addresses
+ * are populated into the mailbox command for the HBA to DMA the service
+ * parameters into.
+ *
+ * Return codes
+ * 0 - Success
+ * 1 - DMA memory allocation failed
+ **/
int
lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
@@ -312,10 +490,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
return (0);
}
-/********************************************/
-/* lpfc_unreg_did Issue a UNREG_DID */
-/* mailbox command */
-/********************************************/
+/**
+ * lpfc_unreg_did: Prepare a mailbox command for unregistering DID.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregister DID mailbox command is used to unregister an N_Port/F_Port
+ * login for an unknown RPI by specifying the DID of a remote port. This
+ * command frees an RPI context in the HBA port. This has the effect of
+ * performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering a remote
+ * N_Port/F_Port (DID) login.
+ **/
void
lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
LPFC_MBOXQ_t * pmb)
@@ -333,10 +522,19 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
return;
}
-/**********************************************/
-/* lpfc_read_nv Issue a READ CONFIG */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_read_config: Prepare a mailbox command for reading HBA configuration.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read configuration mailbox command is used to read the HBA port
+ * configuration parameters. This mailbox command provides a method for
+ * seeing any parameters that may have changed via various configuration
+ * mailbox commands.
+ *
+ * This routine prepares the mailbox command for reading out HBA configuration
+ * parameters.
+ **/
void
lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -350,10 +548,18 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/*************************************************/
-/* lpfc_read_lnk_stat Issue a READ LINK STATUS */
-/* mailbox command */
-/*************************************************/
+/**
+ * lpfc_read_lnk_stat: Prepare a mailbox command for reading HBA link stats.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read link status mailbox command is used to read the link status from
+ * the HBA. Link status includes all link-related error counters. These
+ * counters are maintained by the HBA and originated in the link hardware
+ * unit. Note that all of these counters wrap.
+ *
+ * This routine prepares the mailbox command for reading out HBA link status.
+ **/
void
lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -367,10 +573,30 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/********************************************/
-/* lpfc_reg_login Issue a REG_LOGIN */
-/* mailbox command */
-/********************************************/
+/**
+ * lpfc_reg_login: Prepare a mailbox command for registering remote login.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @param: pointer to memory holding the server parameters.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @flag: action flag to be passed back for the complete function.
+ *
+ * The registration login mailbox command is used to register an N_Port or
+ * F_Port login. This registration allows the HBA to cache the remote N_Port
+ * service parameters internally and thereby make the appropriate FC-2
+ * decisions. The remote port service parameters are handed off by the driver
+ * to the HBA using a descriptor entry that directly identifies a buffer in
+ * host memory. In exchange, the HBA returns an RPI identifier.
+ *
+ * This routine prepares the mailbox command for registering remote port login.
+ * The function allocates DMA buffer for passing the service parameters to the
+ * HBA with the mailbox command.
+ *
+ * Return codes
+ * 0 - Success
+ * 1 - DMA memory allocation failed
+ **/
int
lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
@@ -418,10 +644,20 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
return (0);
}
-/**********************************************/
-/* lpfc_unreg_login Issue a UNREG_LOGIN */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_unreg_login: Prepare a mailbox command for unregistering remote login.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @rpi: remote port identifier
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration login mailbox command is used to unregister an N_Port
+ * or F_Port login. This command frees an RPI context in the HBA. It has the
+ * effect of performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering remote port
+ * login.
+ **/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
LPFC_MBOXQ_t * pmb)
@@ -440,10 +676,21 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
return;
}
-/**************************************************/
-/* lpfc_reg_vpi Issue a REG_VPI */
-/* mailbox command */
-/**************************************************/
+/**
+ * lpfc_reg_vpi: Prepare a mailbox command for registering vport identifier.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The registration vport identifier mailbox command is used to activate a
+ * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the
+ * N_Port_ID against the information in the selected virtual N_Port context
+ * block and marks it active to allow normal processing of IOCB commands and
+ * received unsolicited exchanges.
+ *
+ * This routine prepares the mailbox command for registering a virtual N_Port.
+ **/
void
lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
LPFC_MBOXQ_t *pmb)
@@ -461,10 +708,22 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
}
-/**************************************************/
-/* lpfc_unreg_vpi Issue a UNREG_VNPI */
-/* mailbox command */
-/**************************************************/
+/**
+ * lpfc_unreg_vpi: Prepare a mailbox command for unregistering vport id.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration vport identifier mailbox command is used to inactivate
+ * a virtual N_Port. The driver must have logged out and unregistered all
+ * remote N_Ports to abort any activity on the virtual N_Port. The HBA will
+ * unregisters any default RPIs associated with the specified vpi, aborting
+ * any active exchanges. The HBA will post the mailbox response after making
+ * the virtual N_Port inactive.
+ *
+ * This routine prepares the mailbox command for unregistering a virtual
+ * N_Port.
+ **/
void
lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
{
@@ -479,12 +738,19 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
}
+/**
+ * lpfc_config_pcb_setup: Set up IOCB rings in the Port Control Block (PCB)
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets up and initializes the IOCB rings in the Port Control
+ * Block (PCB).
+ **/
static void
lpfc_config_pcb_setup(struct lpfc_hba * phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- PCB_t *pcbp = &phba->slim2p->pcb;
+ PCB_t *pcbp = phba->pcb;
dma_addr_t pdma_addr;
uint32_t offset;
uint32_t iocbCnt = 0;
@@ -513,29 +779,43 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
continue;
}
/* Command ring setup for ring */
- pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
+ pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
pcbp->rdsc[i].cmdEntries = pring->numCiocb;
- offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] -
- (uint8_t *) phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
+ offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
+ (uint8_t *) phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numCiocb;
/* Response ring setup for ring */
- pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
+ pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
pcbp->rdsc[i].rspEntries = pring->numRiocb;
- offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
- (uint8_t *)phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
+ offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
+ (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numRiocb;
}
}
+/**
+ * lpfc_read_rev: Prepare a mailbox command for reading HBA revision.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read revision mailbox command is used to read the revision levels of
+ * the HBA components. These components include hardware units, resident
+ * firmware, and available firmware. HBAs that supports SLI-3 mode of
+ * operation provide different response information depending on the version
+ * requested by the driver.
+ *
+ * This routine prepares the mailbox command for reading HBA revision
+ * information.
+ **/
void
lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -548,6 +828,16 @@ lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
+/**
+ * lpfc_build_hbq_profile2: Set up the HBQ Selection Profile 2.
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA
+ * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs
+ * the Sequence Length Test using the fields in the Selection Profile 2
+ * extension in words 20:31.
+ **/
static void
lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
@@ -557,6 +847,16 @@ lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
}
+/**
+ * lpfc_build_hbq_profile3: Set up the HBQ Selection Profile 3.
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA
+ * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs
+ * the Sequence Length Test and Byte Field Test using the fields in the
+ * Selection Profile 3 extension in words 20:31.
+ **/
static void
lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
@@ -569,6 +869,17 @@ lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
sizeof(hbqmb->profiles.profile3.cmdmatch));
}
+/**
+ * lpfc_build_hbq_profile5: Set up the HBQ Selection Profile 5.
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The
+ * HBA tests the initial frame of an incoming sequence using the frame's
+ * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test
+ * and Byte Field Test using the fields in the Selection Profile 5 extension
+ * words 20:31.
+ **/
static void
lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
@@ -581,6 +892,20 @@ lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
sizeof(hbqmb->profiles.profile5.cmdmatch));
}
+/**
+ * lpfc_config_hbq: Prepare a mailbox command for configuring an HBQ.
+ * @phba: pointer to lpfc hba data structure.
+ * @id: HBQ identifier.
+ * @hbq_desc: pointer to the HBA descriptor data structure.
+ * @hbq_entry_index: index of the HBQ entry data structures.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure HBQ (Host Buffer Queue) mailbox command is used to configure
+ * an HBQ. The configuration binds events that require buffers to a particular
+ * ring and HBQ based on a selection profile.
+ *
+ * This routine prepares the mailbox command for configuring an HBQ.
+ **/
void
lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
struct lpfc_hbq_init *hbq_desc,
@@ -641,8 +966,23 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
return;
}
-
-
+/**
+ * lpfc_config_ring: Prepare a mailbox command for configuring an IOCB ring.
+ * @phba: pointer to lpfc hba data structure.
+ * @ring:
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure ring mailbox command is used to configure an IOCB ring. This
+ * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the
+ * ring. This is used to map incoming sequences to a particular ring whose
+ * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not
+ * attempt to configure a ring whose number is greater than the number
+ * specified in the Port Control Block (PCB). It is an error to issue the
+ * configure ring command more than once with the same ring number. The HBA
+ * returns an error if the driver attempts this.
+ *
+ * This routine prepares the mailbox command for configuring IOCB ring.
+ **/
void
lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
{
@@ -684,6 +1024,20 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
return;
}
+/**
+ * lpfc_config_port: Prepare a mailbox command for configuring port.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure port mailbox command is used to identify the Port Control
+ * Block (PCB) in the driver memory. After this command is issued, the
+ * driver must not access the mailbox in the HBA without first resetting
+ * the HBA. The HBA may copy the PCB information to internal storage for
+ * subsequent use; the driver can not change the PCB information unless it
+ * resets the HBA.
+ *
+ * This routine prepares the mailbox command for configuring port.
+ **/
void
lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -702,8 +1056,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
- offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
+ offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
@@ -711,12 +1065,13 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
+ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
+ mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
if (phba->max_vpi && phba->cfg_enable_npiv &&
phba->vpd.sli3Feat.cmv) {
mb->un.varCfgPort.max_vpi = phba->max_vpi;
mb->un.varCfgPort.cmv = 1;
- phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
} else
mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
} else
@@ -724,16 +1079,15 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varCfgPort.sli_mode = phba->sli_rev;
/* Now setup pcb */
- phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
- phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
+ phba->pcb->type = TYPE_NATIVE_SLI2;
+ phba->pcb->feature = FEATURE_INITIAL_SLI2;
/* Setup Mailbox pointers */
- phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) +
- sizeof(struct sli2_desc);
- offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
- phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
- phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr);
+ phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
+ offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
/*
* Setup Host Group ring pointer.
@@ -794,13 +1148,13 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* mask off BAR0's flag bits 0 - 3 */
- phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
- (void __iomem *) phba->host_gp -
+ phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+ (void __iomem *)phba->host_gp -
(void __iomem *)phba->MBslimaddr;
if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
- phba->slim2p->pcb.hgpAddrHigh = bar_high;
+ phba->pcb->hgpAddrHigh = bar_high;
else
- phba->slim2p->pcb.hgpAddrHigh = 0;
+ phba->pcb->hgpAddrHigh = 0;
/* write HGP data to SLIM at the required longword offset */
memset(&hgp, 0, sizeof(struct lpfc_hgp));
@@ -810,17 +1164,19 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Setup Port Group ring pointer */
- if (phba->sli_rev == 3)
- pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port -
- (uint8_t *)phba->slim2p;
- else
- pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
- (uint8_t *)phba->slim2p;
-
- pdma_addr = phba->slim2p_mapping + pgp_offset;
- phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
- phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
- phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0];
+ if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) {
+ pgp_offset = offsetof(struct lpfc_sli2_slim,
+ mbx.us.s3_inb_pgp.port);
+ phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
+ } else if (phba->sli_rev == 3) {
+ pgp_offset = offsetof(struct lpfc_sli2_slim,
+ mbx.us.s3_pgp.port);
+ phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
+ } else
+ pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
+ pdma_addr = phba->slim2p.phys + pgp_offset;
+ phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
/* Use callback routine to setp rings in the pcb */
lpfc_config_pcb_setup(phba);
@@ -835,10 +1191,24 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Swap PCB if needed */
- lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
- sizeof(PCB_t));
+ lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
}
+/**
+ * lpfc_kill_board: Prepare a mailbox command for killing board.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The kill board mailbox command is used to tell firmware to perform a
+ * graceful shutdown of a channel on a specified board to prepare for reset.
+ * When the kill board mailbox command is received, the ER3 bit is set to 1
+ * in the Host Status register and the ER Attention bit is set to 1 in the
+ * Host Attention register of the HBA function that received the kill board
+ * command.
+ *
+ * This routine prepares the mailbox command for killing the board in
+ * preparation for a graceful shutdown.
+ **/
void
lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -850,6 +1220,16 @@ lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
+/**
+ * lpfc_mbox_put: Put a mailbox cmd into the tail of driver's mailbox queue.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time.
+ **/
void
lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
@@ -864,6 +1244,20 @@ lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
return;
}
+/**
+ * lpfc_mbox_get: Remove a mailbox cmd from the head of driver's mailbox queue.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time. After HBA finished processing a mailbox
+ * command, the driver will remove a pending mailbox command from the head of
+ * the mailbox command queue and send to the HBA for processing.
+ *
+ * Return codes
+ * pointer to the driver internal queue element for mailbox command.
+ **/
LPFC_MBOXQ_t *
lpfc_mbox_get(struct lpfc_hba * phba)
{
@@ -877,6 +1271,17 @@ lpfc_mbox_get(struct lpfc_hba * phba)
return mbq;
}
+/**
+ * lpfc_mbox_cmpl_put: Put mailbox command into mailbox command complete list.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This routine is called from driver interrupt handler
+ * context.The mailbox complete list is used by the driver worker thread
+ * to process mailbox complete callback functions outside the driver interrupt
+ * handler.
+ **/
void
lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
@@ -887,6 +1292,17 @@ lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
return;
}
+/**
+ * lpfc_mbox_tmo_val: Retrieve mailbox command timeout value.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmd: mailbox command code.
+ *
+ * This routine retrieves the proper timeout value according to the mailbox
+ * command code.
+ *
+ * Return codes
+ * Timeout value to be used for the given mailbox command
+ **/
int
lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
{
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 3c0cebc7180..a4bba206924 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -39,7 +40,21 @@
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
-
+/**
+ * lpfc_mem_alloc: create and allocate all PCI and memory pools
+ * @phba: HBA to allocate pools for
+ *
+ * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
+ * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools
+ * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held. If any
+ * allocation fails, frees all successfully allocated memory before returning.
+ *
+ * Returns:
+ * 0 on success
+ * -ENOMEM on failure (if any memory allocations fail)
+ **/
int
lpfc_mem_alloc(struct lpfc_hba * phba)
{
@@ -120,6 +135,16 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
return -ENOMEM;
}
+/**
+ * lpfc_mem_free: Frees all PCI and memory allocated by lpfc_mem_alloc
+ * @phba: HBA to free memory for
+ *
+ * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
+ * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
+ * lpfc_nodelist. Also frees the VPI bitmask.
+ *
+ * Returns: None
+ **/
void
lpfc_mem_free(struct lpfc_hba * phba)
{
@@ -181,12 +206,29 @@ lpfc_mem_free(struct lpfc_hba * phba)
phba->lpfc_scsi_dma_buf_pool = NULL;
phba->lpfc_mbuf_pool = NULL;
- /* Free the iocb lookup array */
+ /* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
-
}
+/**
+ * lpfc_mbuf_alloc: Allocate an mbuf from the lpfc_mbuf_pool PCI pool
+ * @phba: HBA which owns the pool to allocate from
+ * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
+ * @handle: used to return the DMA-mapped address of the mbuf
+ *
+ * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
+ * Allocates from generic pci_pool_alloc function first and if that fails and
+ * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
+ * HBA's pool.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held. Takes
+ * phba->hbalock.
+ *
+ * Returns:
+ * pointer to the allocated mbuf on success
+ * NULL on failure
+ **/
void *
lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
{
@@ -206,6 +248,20 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
return ret;
}
+/**
+ * __lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Must be called with phba->hbalock held to synchronize access to
+ * lpfc_mbuf_safety_pool.
+ *
+ * Returns: None
+ **/
void
__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{
@@ -221,7 +277,21 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return;
}
+/**
+ * lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Takes phba->hbalock. Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
void
+
lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{
unsigned long iflags;
@@ -232,6 +302,19 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return;
}
+/**
+ * lpfc_els_hbq_alloc: Allocate an HBQ buffer
+ * @phba: HBA to allocate HBQ buffer for
+ *
+ * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held.
+ *
+ * Returns:
+ * pointer to HBQ on success
+ * NULL on failure
+ **/
struct hbq_dmabuf *
lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{
@@ -251,6 +334,18 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
return hbqbp;
}
+/**
+ * lpfc_mem_hbq_free: Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
+ * @phba: HBA buffer was allocated for
+ * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffer returned by
+ * lpfc_els_hbq_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
void
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{
@@ -259,7 +354,18 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
return;
}
-/* This is ONLY called for the LPFC_ELS_HBQ */
+/**
+ * lpfc_in_buf_free: Free a DMA buffer
+ * @phba: HBA buffer is associated with
+ * @mp: Buffer to free
+ *
+ * Description: Frees the given DMA buffer in the appropriate way given if the
+ * HBA is running in SLI3 mode with HBQs enabled.
+ *
+ * Notes: Takes phba->hbalock. Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
void
lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
{
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
new file mode 100644
index 00000000000..1accb5a9f4e
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -0,0 +1,163 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2008 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+/* Event definitions for RegisterForEvent */
+#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
+#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
+#define FC_REG_CT_EVENT 0x0004 /* CT request events */
+#define FC_REG_DUMP_EVENT 0x0008 /* Dump events */
+#define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */
+#define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */
+#define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */
+#define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */
+#define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */
+#define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */
+#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \
+ FC_REG_RSCN_EVENT | \
+ FC_REG_CT_EVENT | \
+ FC_REG_DUMP_EVENT | \
+ FC_REG_TEMPERATURE_EVENT | \
+ FC_REG_ELS_EVENT | \
+ FC_REG_FABRIC_EVENT | \
+ FC_REG_SCSI_EVENT | \
+ FC_REG_BOARD_EVENT | \
+ FC_REG_ADAPTER_EVENT)
+/* Temperature events */
+#define LPFC_CRIT_TEMP 0x1
+#define LPFC_THRESHOLD_TEMP 0x2
+#define LPFC_NORMAL_TEMP 0x3
+/*
+ * All net link event payloads will begin with and event type
+ * and subcategory. The event type must come first.
+ * The subcategory further defines the data that follows in the rest
+ * of the payload. Each category will have its own unique header plus
+ * any addtional data unique to the subcategory.
+ * The payload sent via the fc transport is one-way driver->application.
+ */
+
+/* els event header */
+struct lpfc_els_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_ELS_EVENT */
+#define LPFC_EVENT_PLOGI_RCV 0x01
+#define LPFC_EVENT_PRLO_RCV 0x02
+#define LPFC_EVENT_ADISC_RCV 0x04
+#define LPFC_EVENT_LSRJT_RCV 0x08
+
+/* special els lsrjt event */
+struct lpfc_lsrjt_event {
+ struct lpfc_els_event_header header;
+ uint32_t command;
+ uint32_t reason_code;
+ uint32_t explanation;
+};
+
+
+/* fabric event header */
+struct lpfc_fabric_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_FABRIC_EVENT */
+#define LPFC_EVENT_FABRIC_BUSY 0x01
+#define LPFC_EVENT_PORT_BUSY 0x02
+#define LPFC_EVENT_FCPRDCHKERR 0x04
+
+/* special case fabric fcprdchkerr event */
+struct lpfc_fcprdchkerr_event {
+ struct lpfc_fabric_event_header header;
+ uint32_t lun;
+ uint32_t opcode;
+ uint32_t fcpiparam;
+};
+
+
+/* scsi event header */
+struct lpfc_scsi_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+ uint32_t lun;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_SCSI_EVENT */
+#define LPFC_EVENT_QFULL 0x0001
+#define LPFC_EVENT_DEVBSY 0x0002
+#define LPFC_EVENT_CHECK_COND 0x0004
+#define LPFC_EVENT_LUNRESET 0x0008
+#define LPFC_EVENT_TGTRESET 0x0010
+#define LPFC_EVENT_BUSRESET 0x0020
+#define LPFC_EVENT_VARQUEDEPTH 0x0040
+
+/* special case scsi varqueuedepth event */
+struct lpfc_scsi_varqueuedepth_event {
+ struct lpfc_scsi_event_header scsi_event;
+ uint32_t oldval;
+ uint32_t newval;
+};
+
+/* special case scsi check condition event */
+struct lpfc_scsi_check_condition_event {
+ struct lpfc_scsi_event_header scsi_event;
+ uint8_t sense_key;
+ uint8_t asc;
+ uint8_t ascq;
+};
+
+/* event codes for FC_REG_BOARD_EVENT */
+#define LPFC_EVENT_PORTINTERR 0x01
+
+/* board event header */
+struct lpfc_board_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+};
+
+
+/* event codes for FC_REG_ADAPTER_EVENT */
+#define LPFC_EVENT_ARRIVAL 0x01
+
+/* adapter event header */
+struct lpfc_adapter_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+};
+
+
+/* event codes for temp_event */
+#define LPFC_CRIT_TEMP 0x1
+#define LPFC_THRESHOLD_TEMP 0x2
+#define LPFC_NORMAL_TEMP 0x3
+
+struct temp_event {
+ uint32_t event_type;
+ uint32_t event_code;
+ uint32_t data;
+};
+
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 6688a8689b5..0c25d97acb4 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -1003,20 +1004,8 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
-
- if (vport->num_disc_nodes) {
+ if (vport->num_disc_nodes)
lpfc_more_adisc(vport);
- if ((vport->num_disc_nodes == 0) &&
- (vport->fc_npr_cnt))
- lpfc_els_disc_plogi(vport);
- if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
- lpfc_can_disctmo(vport);
- lpfc_end_rscn(vport);
- }
- }
}
return ndlp->nlp_state;
}
@@ -1865,8 +1854,13 @@ static uint32_t
lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ if (ndlp->nlp_DID == Fabric_DID) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_unreg_rpi(vport, ndlp);
- /* This routine does nothing, just return the current state */
return ndlp->nlp_state;
}
@@ -2155,7 +2149,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_put(ndlp);
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0212 DSM out state %d on NPort free\n", rc);
+ "0213 DSM out state %d on NPort free\n", rc);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM out: ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1bcebbd3dfa..bd186741182 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -32,6 +32,7 @@
#include "lpfc_version.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -42,6 +43,111 @@
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
+/**
+ * lpfc_update_stats: Update statistical data for the command completion.
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called when there is a command completion and this
+ * function updates the statistical data for the command completion.
+ **/
+static void
+lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ unsigned long flags;
+ struct Scsi_Host *shost = cmd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ unsigned long latency;
+ int i;
+
+ if (cmd->result)
+ return;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (!vport->stat_data_enabled ||
+ vport->stat_data_blocked ||
+ !pnode->lat_data ||
+ (phba->bucket_type == LPFC_NO_BUCKET)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return;
+ }
+ latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
+
+ if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
+ i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
+ phba->bucket_step;
+ if (i >= LPFC_MAX_BUCKET_COUNT)
+ i = LPFC_MAX_BUCKET_COUNT;
+ } else {
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
+ if (latency <= (phba->bucket_base +
+ ((1<<i)*phba->bucket_step)))
+ break;
+ }
+
+ pnode->lat_data[i].cmd_count++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+
+/**
+ * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
+ * event.
+ * @phba: Pointer to HBA context object.
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer to FC node associated with the target.
+ * @lun: Lun number of the scsi device.
+ * @old_val: Old value of the queue depth.
+ * @new_val: New value of the queue depth.
+ *
+ * This function sends an event to the mgmt application indicating
+ * there is a change in the scsi device queue depth.
+ **/
+static void
+lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
+ struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ uint32_t lun,
+ uint32_t old_val,
+ uint32_t new_val)
+{
+ struct lpfc_fast_path_event *fast_path_evt;
+ unsigned long flags;
+
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+
+ fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
+ LPFC_EVENT_VARQUEDEPTH;
+
+ /* Report all luns with change in queue depth */
+ fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
+ &ndlp->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
+ &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+ }
+
+ fast_path_evt->un.queue_depth_evt.oldval = old_val;
+ fast_path_evt->un.queue_depth_evt.newval = new_val;
+ fast_path_evt->vport = vport;
+
+ fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+
+ return;
+}
+
/*
* This function is called with no lock held when there is a resource
* error in driver or in firmware.
@@ -117,9 +223,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct scsi_device *sdev;
- unsigned long new_queue_depth;
+ unsigned long new_queue_depth, old_queue_depth;
unsigned long num_rsrc_err, num_cmd_success;
int i;
+ struct lpfc_rport_data *rdata;
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -137,6 +244,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
else
new_queue_depth = sdev->queue_depth -
new_queue_depth;
+ old_queue_depth = sdev->queue_depth;
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev,
MSG_ORDERED_TAG,
@@ -145,6 +253,13 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
new_queue_depth);
+ rdata = sdev->hostdata;
+ if (rdata)
+ lpfc_send_sdev_queuedepth_change_event(
+ phba, vports[i],
+ rdata->pnode,
+ sdev->lun, old_queue_depth,
+ new_queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -159,6 +274,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
struct Scsi_Host *shost;
struct scsi_device *sdev;
int i;
+ struct lpfc_rport_data *rdata;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@@ -176,6 +292,14 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
sdev->queue_depth+1);
+ rdata = sdev->hostdata;
+ if (rdata)
+ lpfc_send_sdev_queuedepth_change_event(
+ phba, vports[i],
+ rdata->pnode,
+ sdev->lun,
+ sdev->queue_depth - 1,
+ sdev->queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -183,6 +307,35 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
atomic_set(&phba->num_cmd_success, 0);
}
+/**
+ * lpfc_scsi_dev_block: set all scsi hosts to block state.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function walks vport list and set each SCSI host to block state
+ * by invoking fc_remote_port_delete() routine. This function is invoked
+ * with EEH when device's PCI slot has been permanently disabled.
+ **/
+void
+lpfc_scsi_dev_block(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct fc_rport *rport;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ shost_for_each_device(sdev, shost) {
+ rport = starget_to_rport(scsi_target(sdev));
+ fc_remote_port_delete(rport);
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
/*
* This routine allocates a scsi buffer, which contains all the necessary
* information needed to initiate a SCSI I/O. The non-DMAable buffer region
@@ -198,7 +351,9 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
struct lpfc_scsi_buf *psb;
struct ulp_bde64 *bpl;
IOCB_t *iocb;
- dma_addr_t pdma_phys;
+ dma_addr_t pdma_phys_fcp_cmd;
+ dma_addr_t pdma_phys_fcp_rsp;
+ dma_addr_t pdma_phys_bpl;
uint16_t iotag;
psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -238,40 +393,60 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
/* Initialize local short-hand pointers. */
bpl = psb->fcp_bpl;
- pdma_phys = psb->dma_handle;
+ pdma_phys_fcp_cmd = psb->dma_handle;
+ pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
+ pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp);
/*
* The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
* list bdes. Initialize the first two and leave the rest for
* queuecommand.
*/
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
- bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
- bpl->tus.f.bdeFlags = BUFF_USE_CMND;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
- bpl++;
+ bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
+ bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
+ bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
+ bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
/* Setup the physical region for the FCP RSP */
- pdma_phys += sizeof (struct fcp_cmnd);
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
- bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
- bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
+ bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
+ bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
+ bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
/*
* Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
* initialize it with all known data now.
*/
- pdma_phys += (sizeof (struct fcp_rsp));
iocb = &psb->cur_iocbq.iocb;
iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
- iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
- iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
- iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
- iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
- iocb->ulpBdeCount = 1;
+ if (phba->sli_rev == 3) {
+ /* fill in immediate fcp command BDE */
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+ iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+ iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
+ unsli3.fcp_ext.icd);
+ iocb->un.fcpi64.bdl.addrHigh = 0;
+ iocb->ulpBdeCount = 0;
+ iocb->ulpLe = 0;
+ /* fill in responce BDE */
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
+ sizeof(struct fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrLow =
+ putPaddrLow(pdma_phys_fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrHigh =
+ putPaddrHigh(pdma_phys_fcp_rsp);
+ } else {
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
+ iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
+ iocb->ulpBdeCount = 1;
+ iocb->ulpLe = 1;
+ }
iocb->ulpClass = CLASS3;
return psb;
@@ -313,8 +488,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dma_addr_t physaddr;
- uint32_t i, num_bde = 0;
+ uint32_t num_bde = 0;
int nseg, datadir = scsi_cmnd->sc_data_direction;
/*
@@ -352,37 +528,159 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* during probe that limits the number of sg elements in any
* single scsi command. Just run through the seg_cnt and format
* the bde's.
+ * When using SLI-3 the driver will try to fit all the BDEs into
+ * the IOCB. If it can't then the BDEs get added to a BPL as it
+ * does for SLI-2 mode.
*/
- scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
+ scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
physaddr = sg_dma_address(sgel);
- bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
- if (datadir == DMA_TO_DEVICE)
- bpl->tus.f.bdeFlags = 0;
- else
- bpl->tus.f.bdeFlags = BUFF_USE_RCV;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
- bpl++;
- num_bde++;
+ if (phba->sli_rev == 3 &&
+ nseg <= LPFC_EXT_DATA_BDE_COUNT) {
+ data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ data_bde->tus.f.bdeSize = sg_dma_len(sgel);
+ data_bde->addrLow = putPaddrLow(physaddr);
+ data_bde->addrHigh = putPaddrHigh(physaddr);
+ data_bde++;
+ } else {
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl->addrLow =
+ le32_to_cpu(putPaddrLow(physaddr));
+ bpl->addrHigh =
+ le32_to_cpu(putPaddrHigh(physaddr));
+ bpl++;
+ }
}
}
/*
* Finish initializing those IOCB fields that are dependent on the
- * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
- * reinitialized since all iocb memory resources are used many times
- * for transmit, receive, and continuation bpl's.
+ * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
+ * explicitly reinitialized and for SLI-3 the extended bde count is
+ * explicitly reinitialized since all iocb memory resources are reused.
*/
- iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
- iocb_cmd->un.fcpi64.bdl.bdeSize +=
- (num_bde * sizeof (struct ulp_bde64));
- iocb_cmd->ulpBdeCount = 1;
- iocb_cmd->ulpLe = 1;
+ if (phba->sli_rev == 3) {
+ if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
+ /*
+ * The extended IOCB format can only fit 3 BDE or a BPL.
+ * This I/O has more than 3 BDE so the 1st data bde will
+ * be a BPL that is filled in here.
+ */
+ physaddr = lpfc_cmd->dma_handle;
+ data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+ data_bde->tus.f.bdeSize = (num_bde *
+ sizeof(struct ulp_bde64));
+ physaddr += (sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ (2 * sizeof(struct ulp_bde64)));
+ data_bde->addrHigh = putPaddrHigh(physaddr);
+ data_bde->addrLow = putPaddrLow(physaddr);
+ /* ebde count includes the responce bde and data bpl */
+ iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
+ } else {
+ /* ebde count includes the responce bde and data bdes */
+ iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
+ }
+ } else {
+ iocb_cmd->un.fcpi64.bdl.bdeSize =
+ ((num_bde + 2) * sizeof(struct ulp_bde64));
+ }
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
return 0;
}
+/**
+ * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
+ * @phba: Pointer to hba context object.
+ * @vport: Pointer to vport object.
+ * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
+ * @rsp_iocb: Pointer to response iocb object which reported error.
+ *
+ * This function posts an event when there is a SCSI command reporting
+ * error from the scsi device.
+ **/
+static void
+lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+ struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+ uint32_t resp_info = fcprsp->rspStatus2;
+ uint32_t scsi_status = fcprsp->rspStatus3;
+ uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+ struct lpfc_fast_path_event *fast_path_evt = NULL;
+ struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
+ unsigned long flags;
+
+ /* If there is queuefull or busy condition send a scsi event */
+ if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
+ (cmnd->result == SAM_STAT_BUSY)) {
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.scsi_evt.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.scsi_evt.subcategory =
+ (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
+ LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
+ fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
+ memcpy(&fast_path_evt->un.scsi_evt.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.scsi_evt.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
+ ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.check_cond_evt.scsi_event.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
+ LPFC_EVENT_CHECK_COND;
+ fast_path_evt->un.check_cond_evt.scsi_event.lun =
+ cmnd->device->lun;
+ memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ fast_path_evt->un.check_cond_evt.sense_key =
+ cmnd->sense_buffer[2] & 0xf;
+ fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
+ fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
+ } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+ fcpi_parm &&
+ ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
+ ((scsi_status == SAM_STAT_GOOD) &&
+ !(resp_info & (RESID_UNDER | RESID_OVER))))) {
+ /*
+ * If status is good or resid does not match with fcp_param and
+ * there is valid fcpi_parm, then there is a read_check error
+ */
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.read_check_error.header.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.read_check_error.header.subcategory =
+ LPFC_EVENT_FCPRDCHKERR;
+ memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
+ fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
+ fast_path_evt->un.read_check_error.fcpiparam =
+ fcpi_parm;
+ } else
+ return;
+
+ fast_path_evt->vport = vport;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+ return;
+}
static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
{
@@ -411,6 +709,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
uint32_t rsplen = 0;
uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
+
/*
* If this is a task management command, there is no
* scsi packet associated with this lpfc_cmd. The driver
@@ -526,6 +825,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
out:
cmnd->result = ScsiResult(host_status, scsi_status);
+ lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}
static void
@@ -542,9 +842,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
unsigned long flags;
+ struct lpfc_fast_path_event *fast_path_evt;
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+ atomic_dec(&pnode->cmd_pending);
if (lpfc_cmd->status) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -570,12 +872,36 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
- cmd->result = ScsiResult(DID_BUS_BUSY, 0);
+ cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ break;
+ fast_path_evt->un.fabric_evt.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.fabric_evt.subcategory =
+ (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
+ LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
+ if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ memcpy(&fast_path_evt->un.fabric_evt.wwpn,
+ &pnode->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.fabric_evt.wwnn,
+ &pnode->nlp_nodename,
+ sizeof(struct lpfc_name));
+ }
+ fast_path_evt->vport = vport;
+ fast_path_evt->work_evt.evt =
+ LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp,
+ &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
break;
case IOSTAT_LOCAL_REJECT:
- if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
+ if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
- lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
+ lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
} /* else: fall through */
@@ -586,7 +912,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
- cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
+ cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
+ SAM_STAT_BUSY);
} else {
cmd->result = ScsiResult(DID_OK, 0);
}
@@ -602,8 +929,32 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
+ lpfc_update_stats(phba, lpfc_cmd);
result = cmd->result;
sdev = cmd->device;
+ if (vport->cfg_max_scsicmpl_time &&
+ time_after(jiffies, lpfc_cmd->start_time +
+ msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
+ (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
+ ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
+ pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
+
+ pnode->last_change_time = jiffies;
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
+ time_after(jiffies, pnode->last_change_time +
+ msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ pnode->cmd_qdepth += pnode->cmd_qdepth *
+ LPFC_TGTQ_RAMPUP_PCENT / 100;
+ if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
+ pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+ pnode->last_change_time = jiffies;
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ }
+
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
cmd->scsi_done(cmd);
@@ -647,6 +998,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
pnode->last_ramp_up_time = jiffies;
}
}
+ lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
+ 0xFFFFFFFF,
+ sdev->queue_depth - 1, sdev->queue_depth);
}
/*
@@ -676,6 +1030,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0711 detected queue full - lun queue "
"depth adjusted to %d.\n", depth);
+ lpfc_send_sdev_queuedepth_change_event(phba, vport,
+ pnode, 0xFFFFFFFF,
+ depth+1, depth);
}
}
@@ -692,6 +1049,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
+/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+ int i, j;
+ for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+ i += sizeof(uint32_t), j++) {
+ ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+ }
+}
+
static void
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
@@ -758,7 +1133,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcp_cmnd->fcpCntl3 = 0;
phba->fc4ControlRequests++;
}
-
+ if (phba->sli_rev == 3)
+ lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
@@ -798,11 +1174,13 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
piocb = &piocbq->iocb;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
- int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
+ /* Clear out any old data in the FCP command area */
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
-
+ if (vport->phba->sli_rev == 3)
+ lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
-
piocb->ulpContext = ndlp->nlp_rpi;
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
piocb->ulpFCP2Rcvy = 1;
@@ -967,9 +1345,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
* transport is still transitioning.
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
+ cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
goto out_fail_command;
}
+ if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
+ goto out_host_busy;
+
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL) {
lpfc_adjust_queue_depth(phba);
@@ -980,6 +1361,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
goto out_host_busy;
}
+ lpfc_cmd->start_time = jiffies;
/*
* Store the midlayer's command structure for the completion phase
* and complete the command initialization.
@@ -987,6 +1369,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->timeout = 0;
+ lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
cmnd->scsi_done = done;
@@ -996,6 +1379,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+ atomic_inc(&ndlp->cmd_pending);
err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err)
@@ -1010,6 +1394,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
out_host_busy_free_buf:
+ atomic_dec(&ndlp->cmd_pending);
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
@@ -1145,6 +1530,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int ret = SUCCESS;
int status;
int cnt;
+ struct lpfc_scsi_event_header scsi_event;
lpfc_block_error_handler(cmnd);
/*
@@ -1163,6 +1549,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
break;
pnode = rdata->pnode;
}
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_TGTRESET;
+ scsi_event.lun = 0;
+ memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(scsi_event),
+ (char *)&scsi_event,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0721 LUN Reset rport "
@@ -1242,10 +1641,23 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
int match;
- int ret = SUCCESS, status, i;
+ int ret = SUCCESS, status = SUCCESS, i;
int cnt;
struct lpfc_scsi_buf * lpfc_cmd;
unsigned long later;
+ struct lpfc_scsi_event_header scsi_event;
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_BUSRESET;
+ scsi_event.lun = 0;
+ memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(scsi_event),
+ (char *)&scsi_event,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
lpfc_block_error_handler(cmnd);
/*
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index daba9237498..437f182e232 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -107,6 +107,10 @@ struct fcp_cmnd {
};
+struct lpfc_scsicmd_bkt {
+ uint32_t cmd_count;
+};
+
struct lpfc_scsi_buf {
struct list_head list;
struct scsi_cmnd *pCmd;
@@ -139,6 +143,7 @@ struct lpfc_scsi_buf {
*/
struct lpfc_iocbq cur_iocbq;
wait_queue_head_t *waitq;
+ unsigned long start_time;
};
#define LPFC_SCSI_DMA_EXT_SIZE 264
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 50fe0764673..8ab5babdeeb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -32,6 +32,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -66,10 +67,16 @@ typedef enum _lpfc_iocb_type {
LPFC_ABORT_IOCB
} lpfc_iocb_type;
- /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
- * to the start of the ring, and the slot number of the
- * desired iocb entry, calc a pointer to that entry.
- */
+/**
+ * lpfc_cmd_iocb: Get next command iocb entry in the ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next command iocb entry
+ * in the command ring. The caller must hold hbalock to prevent
+ * other threads consume the next command iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -77,6 +84,16 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
pring->cmdidx * phba->iocb_cmd_size);
}
+/**
+ * lpfc_resp_iocb: Get next response iocb entry in the ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next response iocb entry
+ * in the response ring. The caller must hold hbalock to make sure
+ * that no other thread consume the next response iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
static inline IOCB_t *
lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -84,6 +101,15 @@ lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
pring->rspidx * phba->iocb_rsp_size);
}
+/**
+ * __lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
static struct lpfc_iocbq *
__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
{
@@ -94,6 +120,15 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
return iocbq;
}
+/**
+ * lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
struct lpfc_iocbq *
lpfc_sli_get_iocbq(struct lpfc_hba *phba)
{
@@ -106,6 +141,16 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
return iocbq;
}
+/**
+ * __lpfc_sli_release_iocbq: Release iocb to the iocb pool.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
static void
__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
@@ -118,6 +163,14 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
+/**
+ * lpfc_sli_release_iocbq: Release iocb to the iocb pool.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with no lock held to release the iocb to
+ * iocb pool.
+ **/
void
lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
@@ -131,10 +184,21 @@ lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
-/*
- * Translate the iocb command to an iocb command type used to decide the final
- * disposition of each completed IOCB.
- */
+/**
+ * lpfc_sli_iocb_cmd_type: Get the iocb type.
+ * @iocb_cmnd : iocb command code.
+ *
+ * This function is called by ring event handler function to get the iocb type.
+ * This function translates the iocb command to an iocb command type used to
+ * decide the final disposition of each completed IOCB.
+ * The function returns
+ * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
+ * LPFC_SOL_IOCB if it is a solicited iocb completion
+ * LPFC_ABORT_IOCB if it is an abort iocb
+ * LPFC_UNSOL_IOCB if it is an unsolicited iocb
+ *
+ * The caller is not required to hold any lock.
+ **/
static lpfc_iocb_type
lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
{
@@ -230,6 +294,17 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
return type;
}
+/**
+ * lpfc_sli_ring_map: Issue config_ring mbox for all rings.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from SLI initialization code
+ * to configure every ring of the HBA's SLI interface. The
+ * caller is not required to hold any lock. This function issues
+ * a config_ring mailbox command for each ring.
+ * This function returns zero if successful else returns a negative
+ * error code.
+ **/
static int
lpfc_sli_ring_map(struct lpfc_hba *phba)
{
@@ -262,6 +337,18 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
return ret;
}
+/**
+ * lpfc_sli_ringtxcmpl_put: Adds new iocb to the txcmplq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to the driver iocb object.
+ *
+ * This function is called with hbalock held. The function adds the
+ * new iocb to txcmplq of the given ring. This function always returns
+ * 0. If this function is called for ELS ring, this function checks if
+ * there is a vport associated with the ELS command. This function also
+ * starts els_tmofunc timer if this is an ELS command.
+ **/
static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
@@ -282,6 +369,16 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
}
+/**
+ * lpfc_sli_ringtx_get: Get first element of the txq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to get next
+ * iocb in txq of the given ring. If there is any iocb in
+ * the txq, the function returns first iocb in the list after
+ * removing the iocb from the list, else it returns NULL.
+ **/
static struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -293,14 +390,25 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return cmd_iocb;
}
+/**
+ * lpfc_sli_next_iocb_slot: Get next iocb slot in the ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held and the caller must post the
+ * iocb without releasing the lock. If the caller releases the lock,
+ * iocb slot returned by the function is not guaranteed to be available.
+ * The function returns pointer to the next available iocb slot if there
+ * is available slot in the ring, else it returns NULL.
+ * If the get index of the ring is ahead of the put index, the function
+ * will post an error attention event to the worker thread to take the
+ * HBA to offline state.
+ **/
static IOCB_t *
lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
uint32_t max_cmd_idx = pring->numCiocb;
-
if ((pring->next_cmdidx == pring->cmdidx) &&
(++pring->next_cmdidx >= max_cmd_idx))
pring->next_cmdidx = 0;
@@ -336,6 +444,18 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return lpfc_cmd_iocb(phba, pring);
}
+/**
+ * lpfc_sli_next_iotag: Get an iotag for the iocb.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function gets an iotag for the iocb. If there is no unused iotag and
+ * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
+ * array and assigns a new iotag.
+ * The function returns the allocated iotag if successful, else returns zero.
+ * Zero is not a valid iotag.
+ * The caller is not required to hold any lock.
+ **/
uint16_t
lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
@@ -399,6 +519,20 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
return 0;
}
+/**
+ * lpfc_sli_submit_iocb: Submit an iocb to the firmware.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocb: Pointer to iocb slot in the ring.
+ * @nextiocb: Pointer to driver iocb object which need to be
+ * posted to firmware.
+ *
+ * This function is called with hbalock held to post a new iocb to
+ * the firmware. This function copies the new iocb to ring iocb slot and
+ * updates the ring pointers. It adds the new iocb to txcmplq if there is
+ * a completion call back for this iocb else the function will free the
+ * iocb object.
+ **/
static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
@@ -441,6 +575,18 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
}
+/**
+ * lpfc_sli_update_full_ring: Update the chip attention register.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * The caller is not required to hold any lock for calling this function.
+ * This function updates the chip attention bits for the ring to inform firmware
+ * that there are pending work to be done for this ring and requests an
+ * interrupt when there is space available in the ring. This function is
+ * called when the driver is unable to post more iocbs to the ring due
+ * to unavailability of space in the ring.
+ **/
static void
lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -460,6 +606,15 @@ lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
pring->stats.iocb_cmd_full++;
}
+/**
+ * lpfc_sli_update_ring: Update chip attention register.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function updates the chip attention register bit for the
+ * given ring to inform HBA that there is more work to be done
+ * in this ring. The caller is not required to hold any lock.
+ **/
static void
lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -468,11 +623,22 @@ lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
/*
* Tell the HBA that there is work to do in this ring.
*/
- wmb();
- writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
- readl(phba->CAregaddr); /* flush */
+ if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
+ wmb();
+ writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+ }
}
+/**
+ * lpfc_sli_resume_iocb: Process iocbs in the txq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to post pending iocbs
+ * in the txq to the firmware. This function is called when driver
+ * detects space available in the ring.
+ **/
static void
lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -504,6 +670,16 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return;
}
+/**
+ * lpfc_sli_next_hbq_slot: Get next hbq entry for the HBQ.
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function is called with hbalock held to get the next
+ * available slot for the given HBQ. If there is free slot
+ * available for the HBQ it will return pointer to the next available
+ * HBQ entry else it will return NULL.
+ **/
static struct lpfc_hbq_entry *
lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
{
@@ -539,6 +715,15 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
hbqp->hbqPutIdx;
}
+/**
+ * lpfc_sli_hbqbuf_free_all: Free all the hbq buffers.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held to free all the
+ * hbq buffers while uninitializing the SLI interface. It also
+ * frees the HBQ buffers returned by the firmware but not yet
+ * processed by the upper layers.
+ **/
void
lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
{
@@ -584,6 +769,18 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->hbalock, flags);
}
+/**
+ * lpfc_sli_hbq_to_firmware: Post the hbq buffer to firmware.
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a
+ * hbq buffer to the firmware. If the function finds an empty
+ * slot in the HBQ, it will post the buffer. The function will return
+ * pointer to the hbq entry if it successfully post the buffer
+ * else it will return NULL.
+ **/
static struct lpfc_hbq_entry *
lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf)
@@ -612,6 +809,7 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
return hbqe;
}
+/* HBQ for ELS and CT traffic. */
static struct lpfc_hbq_init lpfc_els_hbq = {
.rn = 1,
.entry_count = 200,
@@ -623,6 +821,7 @@ static struct lpfc_hbq_init lpfc_els_hbq = {
.add_count = 5,
};
+/* HBQ for the extra ring if needed */
static struct lpfc_hbq_init lpfc_extra_hbq = {
.rn = 1,
.entry_count = 200,
@@ -634,51 +833,81 @@ static struct lpfc_hbq_init lpfc_extra_hbq = {
.add_count = 5,
};
+/* Array of HBQs */
struct lpfc_hbq_init *lpfc_hbq_defs[] = {
&lpfc_els_hbq,
&lpfc_extra_hbq,
};
+/**
+ * lpfc_sli_hbqbuf_fill_hbqs: Post more hbq buffers to HBQ.
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @count: Number of HBQ buffers to be posted.
+ *
+ * This function is called with no lock held to post more hbq buffers to the
+ * given HBQ. The function returns the number of HBQ buffers successfully
+ * posted.
+ **/
static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
{
- uint32_t i, start, end;
+ uint32_t i, posted = 0;
unsigned long flags;
struct hbq_dmabuf *hbq_buffer;
-
+ LIST_HEAD(hbq_buf_list);
if (!phba->hbqs[hbqno].hbq_alloc_buffer)
return 0;
- start = phba->hbqs[hbqno].buffer_count;
- end = count + start;
- if (end > lpfc_hbq_defs[hbqno]->entry_count)
- end = lpfc_hbq_defs[hbqno]->entry_count;
-
+ if ((phba->hbqs[hbqno].buffer_count + count) >
+ lpfc_hbq_defs[hbqno]->entry_count)
+ count = lpfc_hbq_defs[hbqno]->entry_count -
+ phba->hbqs[hbqno].buffer_count;
+ if (!count)
+ return 0;
+ /* Allocate HBQ entries */
+ for (i = 0; i < count; i++) {
+ hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
+ if (!hbq_buffer)
+ break;
+ list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
+ }
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use)
- goto out;
-
- /* Populate HBQ entries */
- for (i = start; i < end; i++) {
- hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
- if (!hbq_buffer)
- goto err;
- hbq_buffer->tag = (i | (hbqno << 16));
- if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
+ goto err;
+ while (!list_empty(&hbq_buf_list)) {
+ list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+ dbuf.list);
+ hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
+ (hbqno << 16));
+ if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
phba->hbqs[hbqno].buffer_count++;
- else
+ posted++;
+ } else
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
}
-
- out:
spin_unlock_irqrestore(&phba->hbalock, flags);
- return 0;
- err:
+ return posted;
+err:
spin_unlock_irqrestore(&phba->hbalock, flags);
- return 1;
+ while (!list_empty(&hbq_buf_list)) {
+ list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+ dbuf.list);
+ (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+ }
+ return 0;
}
+/**
+ * lpfc_sli_hbqbuf_add_hbqs: Post more HBQ buffers to firmware.
+ * @phba: Pointer to HBA context object.
+ * @qno: HBQ number.
+ *
+ * This function posts more buffers to the HBQ. This function
+ * is called with no lock held. The function returns the number of HBQ entries
+ * successfully allocated.
+ **/
int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
@@ -686,6 +915,15 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
lpfc_hbq_defs[qno]->add_count));
}
+/**
+ * lpfc_sli_hbqbuf_init_hbqs: Post initial buffers to the HBQ.
+ * @phba: Pointer to HBA context object.
+ * @qno: HBQ queue number.
+ *
+ * This function is called from SLI initialization code path with
+ * no lock held to post initial HBQ buffers to firmware. The
+ * function returns the number of HBQ entries successfully allocated.
+ **/
static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
@@ -693,6 +931,16 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
lpfc_hbq_defs[qno]->init_count));
}
+/**
+ * lpfc_sli_hbqbuf_find: Find the hbq buffer associated with a tag.
+ * @phba: Pointer to HBA context object.
+ * @tag: Tag of the hbq buffer.
+ *
+ * This function is called with hbalock held. This function searches
+ * for the hbq buffer associated with the given tag in the hbq buffer
+ * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
+ * it returns NULL.
+ **/
static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
{
@@ -716,6 +964,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
return NULL;
}
+/**
+ * lpfc_sli_free_hbq: Give back the hbq buffer to firmware.
+ * @phba: Pointer to HBA context object.
+ * @hbq_buffer: Pointer to HBQ buffer.
+ *
+ * This function is called with hbalock. This function gives back
+ * the hbq buffer to firmware. If the HBQ does not have space to
+ * post the buffer, it will free the buffer.
+ **/
void
lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
{
@@ -729,6 +986,15 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
}
}
+/**
+ * lpfc_sli_chk_mbx_command: Check if the mailbox is a legitimate mailbox.
+ * @mbxCommand: mailbox command code.
+ *
+ * This function is called by the mailbox event handler function to verify
+ * that the completed mailbox command is a legitimate mailbox command. If the
+ * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
+ * and the mailbox event handler will take the HBA offline.
+ **/
static int
lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
{
@@ -785,6 +1051,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_REG_VPI:
case MBX_UNREG_VPI:
case MBX_HEARTBEAT:
+ case MBX_PORT_CAPABILITIES:
+ case MBX_PORT_IOV_CONTROL:
ret = mbxCommand;
break;
default:
@@ -793,6 +1061,19 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
}
return ret;
}
+
+/**
+ * lpfc_sli_wake_mbox_wait: Completion handler for mbox issued from
+ * lpfc_sli_issue_mbox_wait.
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_sli_issue_mbox_wait function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
static void
lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
@@ -812,6 +1093,17 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
+
+/**
+ * lpfc_sli_def_mbox_cmpl: Default mailbox completion handler.
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the default mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. If the completed command is a REG_LOGIN mailbox command,
+ * this function will issue a UREG_LOGIN to re-claim the RPI.
+ **/
void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -846,6 +1138,19 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+/**
+ * lpfc_sli_handle_mb_event: Handle mailbox completions from firmware.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the completed mailbox commands and gives it to upper layers. The interrupt
+ * service routine processes mailbox completion interrupt and adds completed
+ * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
+ * Worker thread call lpfc_sli_handle_mb_event, which will return the
+ * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
+ * function returns the mailbox commands to the upper layer by calling the
+ * completion handler function of each mailbox.
+ **/
int
lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
{
@@ -953,6 +1258,18 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer.
+ * @phba: Pointer to HBA context object.
+ * @tag: Tag for the HBQ buffer.
+ *
+ * This function is called from unsolicited event handler code path to get the
+ * HBQ buffer associated with an unsolicited iocb. This function is called with
+ * no lock held. It returns the buffer associated with the given tag and posts
+ * another buffer to the firmware. Note that the new buffer must be allocated
+ * before taking the hbalock and that the hba lock must be held until it is
+ * finished with the hbq entry swap.
+ **/
static struct lpfc_dmabuf *
lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
{
@@ -962,22 +1279,28 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
dma_addr_t phys; /* mapped address */
unsigned long flags;
+ hbqno = tag >> 16;
+ new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use) {
+ if (new_hbq_entry)
+ (phba->hbqs[hbqno].hbq_free_buffer)(phba,
+ new_hbq_entry);
spin_unlock_irqrestore(&phba->hbalock, flags);
return NULL;
}
hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
if (hbq_entry == NULL) {
+ if (new_hbq_entry)
+ (phba->hbqs[hbqno].hbq_free_buffer)(phba,
+ new_hbq_entry);
spin_unlock_irqrestore(&phba->hbalock, flags);
return NULL;
}
list_del(&hbq_entry->dbuf.list);
- hbqno = tag >> 16;
- new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
if (new_hbq_entry == NULL) {
list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -997,6 +1320,18 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
return &new_hbq_entry->dbuf;
}
+/**
+ * lpfc_sli_get_buff: Get the buffer associated with the buffer tag.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: buffer tag.
+ *
+ * This function is called with no lock held. When QUE_BUFTAG_BIT bit
+ * is set in the tag the buffer is posted for a particular exchange,
+ * the function will return the buffer without replacing the buffer.
+ * If the buffer is for unsolicited ELS or CT traffic, this function
+ * returns the buffer and also posts another buffer to the firmware.
+ **/
static struct lpfc_dmabuf *
lpfc_sli_get_buff(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
@@ -1008,6 +1343,21 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
return lpfc_sli_replace_hbqbuff(phba, tag);
}
+
+/**
+ * lpfc_sli_process_unsol_iocb: Unsolicited iocb handler.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the unsolicited iocb.
+ *
+ * This function is called with no lock held by the ring event handler
+ * when there is an unsolicited iocb posted to the response ring by the
+ * firmware. This function gets the buffer associated with the iocbs
+ * and calls the event handler for the ring. This function handles both
+ * qring buffers and hbq buffers.
+ * When the function returns 1 the caller can free the iocb object otherwise
+ * upper layer functions will free the iocb objects.
+ **/
static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
@@ -1192,6 +1542,18 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 1;
}
+/**
+ * lpfc_sli_iocbq_lookup: Find command iocb for the given response iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @prspiocb: Pointer to response iocb object.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given response iocb using the iotag of the
+ * response iocb. This function is called with the hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
@@ -1217,6 +1579,23 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
return NULL;
}
+/**
+ * lpfc_sli_process_sol_iocb: process solicited iocb completion.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the response iocb to be processed.
+ *
+ * This function is called by the ring event handler for non-fcp
+ * rings when there is a new response iocb in the response ring.
+ * The caller is not required to hold any locks. This function
+ * gets the command iocb associated with the response iocb and
+ * calls the completion handler for the command iocb. If there
+ * is no completion handler, the function will free the resources
+ * associated with command iocb. If the response iocb is for
+ * an already aborted command iocb, the status of the completion
+ * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
+ * This function always returns 1.
+ **/
static int
lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
@@ -1233,6 +1612,17 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (cmdiocbp) {
if (cmdiocbp->iocb_cmpl) {
/*
+ * If an ELS command failed send an event to mgmt
+ * application.
+ */
+ if (saveq->iocb.ulpStatus &&
+ (pring->ringno == LPFC_ELS_RING) &&
+ (cmdiocbp->iocb.ulpCommand ==
+ CMD_ELS_REQUEST64_CR))
+ lpfc_send_els_failure_event(phba,
+ cmdiocbp, saveq);
+
+ /*
* Post all ELS completions to the worker thread.
* All other are passed to the completion callback.
*/
@@ -1282,12 +1672,20 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return rc;
}
+/**
+ * lpfc_sli_rsp_pointers_error: Response ring pointer error handler.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called from the iocb ring event handlers when
+ * put pointer is ahead of the get pointer for a ring. This function signal
+ * an error attention condition to the worker thread and the worker
+ * thread will transition the HBA to offline state.
+ **/
static void
lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
/*
* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
* rsp ring <portRspMax>
@@ -1312,6 +1710,51 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return;
}
+/**
+ * lpfc_poll_eratt: Error attention polling timer timeout handler.
+ * @ptr: Pointer to address of HBA context object.
+ *
+ * This function is invoked by the Error Attention polling timer when the
+ * timer times out. It will check the SLI Error Attention register for
+ * possible attention events. If so, it will post an Error Attention event
+ * and wake up worker thread to process it. Otherwise, it will set up the
+ * Error Attention polling timer for the next poll.
+ **/
+void lpfc_poll_eratt(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ uint32_t eratt = 0;
+
+ phba = (struct lpfc_hba *)ptr;
+
+ /* Check chip HA register for error event */
+ eratt = lpfc_sli_check_eratt(phba);
+
+ if (eratt)
+ /* Tell the worker thread there is work to do */
+ lpfc_worker_wake_up(phba);
+ else
+ /* Restart the timer for next eratt poll */
+ mod_timer(&phba->eratt_poll, jiffies +
+ HZ * LPFC_ERATT_POLL_INTERVAL);
+ return;
+}
+
+/**
+ * lpfc_sli_poll_fcp_ring: Handle FCP ring completion in polling mode.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
+ * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
+ * is enabled.
+ *
+ * The caller does not hold any lock.
+ * The function processes each response iocb in the response ring until it
+ * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
+ * LE bit set. The function will call the completion handler of the command iocb
+ * if the response iocb indicates a completion for a command iocb or it is
+ * an abort completion.
+ **/
void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
@@ -1320,7 +1763,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
struct lpfc_iocbq rspiocbq;
- struct lpfc_pgp *pgp;
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
uint32_t status;
uint32_t portRspPut, portRspMax;
int type;
@@ -1330,11 +1773,6 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
pring->stats.iocb_event++;
- pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
-
-
/*
* The next available response entry should never exceed the maximum
* entries. If it does, treat it as an adapter hardware error.
@@ -1372,8 +1810,8 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
irsp->un.ulpWord[3],
irsp->un.ulpWord[4],
irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7));
+ *(uint32_t *)&irsp->un1,
+ *((uint32_t *)&irsp->un1 + 1));
}
switch (type) {
@@ -1465,17 +1903,28 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
return;
}
-/*
+/**
+ * lpfc_sli_handle_fast_ring_event: Handle ring events on FCP ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the interrupt context when there is a ring
+ * event for the fcp ring. The caller does not hold any lock.
+ * The function processes each response iocb in the response ring until it
+ * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
+ * LE bit set. The function will call the completion handler of the command iocb
+ * if the response iocb indicates a completion for a command iocb or it is
+ * an abort completion. The function will call lpfc_sli_process_unsol_iocb
+ * function if this is an unsolicited iocb.
* This routine presumes LPFC_FCP_RING handling and doesn't bother
- * to check it explicitly.
- */
+ * to check it explicitly. This function always returns 1.
+ **/
static int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
IOCB_t *irsp = NULL;
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -1548,8 +1997,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
irsp->un.ulpWord[3],
irsp->un.ulpWord[4],
irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7));
+ *(uint32_t *)&irsp->un1,
+ *((uint32_t *)&irsp->un1 + 1));
}
switch (type) {
@@ -1646,13 +2095,28 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
return rc;
}
+/**
+ * lpfc_sli_handle_slow_ring_event: Handle ring events for non-FCP rings.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a ring
+ * event for non-fcp rings. The caller does not hold any lock .
+ * The function processes each response iocb in the response ring until it
+ * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
+ * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
+ * response iocb indicates a completion of a command iocb. The function
+ * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
+ * iocb. The function frees the resources or calls the completion handler if
+ * this iocb is an abort completion. The function returns 0 when the allocated
+ * iocbs are not freed, otherwise returns 1.
+ **/
int
lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp;
IOCB_t *entry;
IOCB_t *irsp = NULL;
struct lpfc_iocbq *rspiocbp = NULL;
@@ -1666,6 +2130,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
int rc = 1;
unsigned long iflag;
+ pgp = &phba->port_gp[pring->ringno];
spin_lock_irqsave(&phba->hbalock, iflag);
pring->stats.iocb_event++;
@@ -1904,6 +2369,16 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
return rc;
}
+/**
+ * lpfc_sli_abort_iocb_ring: Abort all iocbs in the ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -1943,6 +2418,83 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
}
}
+/**
+ * lpfc_sli_flush_fcp_rings: flush all iocbs in the fcp ring.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all iocbs in the fcp ring and frees all the iocb
+ * objects in txq and txcmplq. This function will not issue abort iocbs
+ * for all the iocb commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+{
+ LIST_HEAD(txq);
+ LIST_HEAD(txcmplq);
+ struct lpfc_iocbq *iocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
+ /* Currently, only one fcp ring */
+ pring = &psli->ring[psli->fcp_ring];
+
+ spin_lock_irq(&phba->hbalock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ pring->txq_cnt = 0;
+
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Flush the txq */
+ while (!list_empty(&txq)) {
+ iocb = list_get_first(&txq, struct lpfc_iocbq, list);
+ cmd = &iocb->iocb;
+ list_del_init(&iocb->list);
+
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ }
+ }
+
+ /* Flush the txcmpq */
+ while (!list_empty(&txcmplq)) {
+ iocb = list_get_first(&txcmplq, struct lpfc_iocbq, list);
+ cmd = &iocb->iocb;
+ list_del_init(&iocb->list);
+
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ }
+ }
+}
+
+/**
+ * lpfc_sli_brdready: Check for host status bits.
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function reads the host status register and compares
+ * with the provided bit mask to check if HBA completed
+ * the restart. This function will wait in a loop for the
+ * HBA to complete restart. If the HBA does not restart within
+ * 15 iterations, the function will reset the HBA again. The
+ * function returns 1 when HBA fail to restart otherwise returns
+ * zero.
+ **/
int
lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
{
@@ -1990,6 +2542,13 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
#define BARRIER_TEST_PATTERN (0xdeadbeef)
+/**
+ * lpfc_reset_barrier: Make HBA ready for HBA reset.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called before resetting an HBA. This
+ * function requests HBA to quiesce DMAs before a reset.
+ **/
void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
@@ -2063,6 +2622,17 @@ restore_hc:
readl(phba->HCregaddr); /* flush */
}
+/**
+ * lpfc_sli_brdkill: Issue a kill_board mailbox command.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function issues a kill_board mailbox command and waits for
+ * the error attention interrupt. This function is called for stopping
+ * the firmware processing. The caller is not required to hold any
+ * locks. This function calls lpfc_hba_down_post function to free
+ * any pending commands after the kill. The function will return 1 when it
+ * fails to kill the board else will return 0.
+ **/
int
lpfc_sli_brdkill(struct lpfc_hba *phba)
{
@@ -2139,6 +2709,17 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
return ha_copy & HA_ERATT ? 0 : 1;
}
+/**
+ * lpfc_sli_brdreset: Reset the HBA.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets the HBA by writing HC_INITFF to the control
+ * register. After the HBA resets, this function resets all the iocb ring
+ * indices. This function disables PCI layer parity checking during
+ * the reset.
+ * This function returns 0 always.
+ * The caller is not required to hold any locks.
+ **/
int
lpfc_sli_brdreset(struct lpfc_hba *phba)
{
@@ -2191,6 +2772,19 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_brdrestart: Restart the HBA.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to
+ * restart the HBA. The caller is not required to hold any lock.
+ * This function writes MBX_RESTART mailbox command to the SLIM and
+ * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
+ * function to free any pending commands. The function enables
+ * POST only during the first initialization. The function returns zero.
+ * The function does not guarantee completion of MBX_RESTART mailbox
+ * command before the return of this function.
+ **/
int
lpfc_sli_brdrestart(struct lpfc_hba *phba)
{
@@ -2251,6 +2845,16 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_chipset_init: Wait for the restart of the HBA after a restart.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called after a HBA restart to wait for successful
+ * restart of the HBA. Successful restart of the HBA is indicated by
+ * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
+ * iteration, the function will restart the HBA again. The function returns
+ * zero if HBA successfully restarted else returns negative error code.
+ **/
static int
lpfc_sli_chipset_init(struct lpfc_hba *phba)
{
@@ -2336,12 +2940,25 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_hbq_count: Get the number of HBQs to be configured.
+ *
+ * This function calculates and returns the number of HBQs required to be
+ * configured.
+ **/
int
lpfc_sli_hbq_count(void)
{
return ARRAY_SIZE(lpfc_hbq_defs);
}
+/**
+ * lpfc_sli_hbq_entry_count: Calculate total number of hbq entries.
+ *
+ * This function adds the number of hbq entries in every HBQ to get
+ * the total number of hbq entries required for the HBA and returns
+ * the total count.
+ **/
static int
lpfc_sli_hbq_entry_count(void)
{
@@ -2354,12 +2971,27 @@ lpfc_sli_hbq_entry_count(void)
return count;
}
+/**
+ * lpfc_sli_hbq_size: Calculate memory required for all hbq entries.
+ *
+ * This function calculates amount of memory required for all hbq entries
+ * to be configured and returns the total memory required.
+ **/
int
lpfc_sli_hbq_size(void)
{
return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
}
+/**
+ * lpfc_sli_hbq_setup: configure and initialize HBQs.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
static int
lpfc_sli_hbq_setup(struct lpfc_hba *phba)
{
@@ -2415,15 +3047,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
mempool_free(pmb, phba->mbox_mem_pool);
/* Initially populate or replenish the HBQs */
- for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
- if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
- return -ENOMEM;
- }
+ for (hbqno = 0; hbqno < hbq_count; ++hbqno)
+ lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
return 0;
}
-static int
-lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
+/**
+ * lpfc_sli_config_port: Issue config port mailbox command.
+ * @phba: Pointer to HBA context object.
+ * @sli_mode: sli mode - 2/3
+ *
+ * This function is called by the sli intialization code path
+ * to issue config_port mailbox command. This function restarts the
+ * HBA firmware and issues a config_port mailbox command to configure
+ * the SLI interface in the sli mode specified by sli_mode
+ * variable. The caller is not required to hold any locks.
+ * The function returns 0 if successful, else returns negative error
+ * code.
+ **/
+int
+lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
{
LPFC_MBOXQ_t *pmb;
uint32_t resetcount = 0, rc = 0, done = 0;
@@ -2460,13 +3103,15 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
if (rc == -ERESTART) {
phba->link_state = LPFC_LINK_UNKNOWN;
continue;
- } else if (rc) {
+ } else if (rc)
break;
- }
-
phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
+ LPFC_SLI3_HBQ_ENABLED |
+ LPFC_SLI3_CRP_ENABLED |
+ LPFC_SLI3_INB_ENABLED);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0442 Adapter failed to init, mbxCmd x%x "
@@ -2476,30 +3121,64 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
rc = -ENXIO;
- } else {
+ } else
done = 1;
- phba->max_vpi = (phba->max_vpi &&
- pmb->mb.un.varCfgPort.gmv) != 0
- ? pmb->mb.un.varCfgPort.max_vpi
- : 0;
- }
}
-
if (!done) {
rc = -EINVAL;
goto do_prep_failed;
}
-
- if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
- (!pmb->mb.un.varCfgPort.cMA)) {
- rc = -ENXIO;
+ if (pmb->mb.un.varCfgPort.sli_mode == 3) {
+ if (!pmb->mb.un.varCfgPort.cMA) {
+ rc = -ENXIO;
+ goto do_prep_failed;
+ }
+ if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) {
+ phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+ phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi;
+ } else
+ phba->max_vpi = 0;
+ if (pmb->mb.un.varCfgPort.gerbm)
+ phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
+ if (pmb->mb.un.varCfgPort.gcrp)
+ phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
+ if (pmb->mb.un.varCfgPort.ginb) {
+ phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
+ phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
+ phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
+ phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
+ phba->inb_last_counter =
+ phba->mbox->us.s3_inb_pgp.counter;
+ } else {
+ phba->port_gp = phba->mbox->us.s3_pgp.port;
+ phba->inb_ha_copy = NULL;
+ phba->inb_counter = NULL;
+ }
+ } else {
+ phba->port_gp = phba->mbox->us.s2.port;
+ phba->inb_ha_copy = NULL;
+ phba->inb_counter = NULL;
+ phba->max_vpi = 0;
}
-
do_prep_failed:
mempool_free(pmb, phba->mbox_mem_pool);
return rc;
}
+
+/**
+ * lpfc_sli_hba_setup: SLI intialization function.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI intialization function. This function
+ * is called by the HBA intialization code, HBA reset code and HBA
+ * error attention handler code. Caller is not required to hold any
+ * locks. This function issues config_port mailbox command to configure
+ * the SLI, setup iocb rings and HBQ rings. In the end the function
+ * calls the config_port_post function to issue init_link mailbox
+ * command and to start the discovery. The function will return zero
+ * if successful, else it will return negative error code.
+ **/
int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
@@ -2528,22 +3207,20 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
break;
}
- rc = lpfc_do_config_port(phba, mode);
+ rc = lpfc_sli_config_port(phba, mode);
+
if (rc && lpfc_sli_mode == 3)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"1820 Unable to select SLI-3. "
"Not supported by adapter.\n");
if (rc && mode != 2)
- rc = lpfc_do_config_port(phba, 2);
+ rc = lpfc_sli_config_port(phba, 2);
if (rc)
goto lpfc_sli_hba_setup_error;
if (phba->sli_rev == 3) {
phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
- phba->sli3_options |= LPFC_SLI3_ENABLED;
- phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
-
} else {
phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
@@ -2558,8 +3235,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
if (rc)
goto lpfc_sli_hba_setup_error;
- /* Init HBQs */
-
+ /* Init HBQs */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
rc = lpfc_sli_hbq_setup(phba);
if (rc)
@@ -2581,19 +3257,19 @@ lpfc_sli_hba_setup_error:
return rc;
}
-/*! lpfc_mbox_timeout
- *
- * \pre
- * \post
- * \param hba Pointer to per struct lpfc_hba structure
- * \param l1 Pointer to the driver's mailbox queue.
- * \return
- * void
- *
- * \b Description:
+
+/**
+ * lpfc_mbox_timeout: Timeout call back function for mbox timer.
+ * @ptr: context object - pointer to hba structure.
*
- * This routine handles mailbox timeout events at timer interrupt context.
- */
+ * This is the callback function for mailbox timer. The mailbox
+ * timer is armed when a new mailbox command is issued and the timer
+ * is deleted when the mailbox complete. The function is called by
+ * the kernel timer code when a mailbox does not complete within
+ * expected time. This function wakes up the worker thread to
+ * process the mailbox timeout and returns. All the processing is
+ * done by the worker thread function lpfc_mbox_timeout_handler.
+ **/
void
lpfc_mbox_timeout(unsigned long ptr)
{
@@ -2612,6 +3288,15 @@ lpfc_mbox_timeout(unsigned long ptr)
return;
}
+
+/**
+ * lpfc_mbox_timeout_handler: Worker thread function to handle mailbox timeout.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from worker thread when a mailbox command times out.
+ * The caller is not required to hold any locks. This function will reset the
+ * HBA and recover all the pending commands.
+ **/
void
lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
{
@@ -2666,6 +3351,32 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
return;
}
+/**
+ * lpfc_sli_issue_mbox: Issue a mailbox command to firmware.
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code
+ * to submit a mailbox command to firmware. This function gets the
+ * hbalock to protect the data structures.
+ * The mailbox command can be submitted in polling mode, in which case
+ * this function will wait in a polling loop for the completion of the
+ * mailbox.
+ * If the mailbox is submitted in no_wait mode (not polling) the
+ * function will submit the command and returns immediately without waiting
+ * for the mailbox completion. The no_wait is supported only when HBA
+ * is in SLI2/SLI3 mode - interrupts are enabled.
+ * The SLI interface allows only one mailbox pending at a time. If the
+ * mailbox is issued in polling mode and there is already a mailbox
+ * pending, then the function will return an error. If the mailbox is issued
+ * in NO_WAIT mode and there is a mailbox pending already, the function
+ * will return MBX_BUSY after queuing the mailbox into mailbox queue.
+ * The sli layer owns the mailbox object until the completion of mailbox
+ * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
+ * return codes the caller owns the mailbox command after the return of
+ * the function.
+ **/
int
lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
{
@@ -2676,7 +3387,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
int i;
unsigned long timeout;
unsigned long drvr_flag = 0;
- volatile uint32_t word0, ldata;
+ uint32_t word0, ldata;
void __iomem *to_slim;
int processing_queue = 0;
@@ -2836,12 +3547,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First copy command data to host SLIM area */
- lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
} else {
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
- lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
- MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
@@ -2851,7 +3561,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
MAILBOX_CMD_SIZE - sizeof (uint32_t));
/* Next copy over first word, with mbxOwner set */
- ldata = *((volatile uint32_t *)mb);
+ ldata = *((uint32_t *)mb);
to_slim = phba->MBslimaddr;
writel(ldata, to_slim);
readl(to_slim); /* flush */
@@ -2883,7 +3593,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First read mbox status word */
- word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
+ word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
} else {
/* First read mbox status word */
@@ -2922,12 +3632,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First copy command data */
- word0 = *((volatile uint32_t *)
- &phba->slim2p->mbx);
+ word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
if (mb->mbxCommand == MBX_CONFIG_PORT) {
MAILBOX_t *slimmb;
- volatile uint32_t slimword0;
+ uint32_t slimword0;
/* Check real SLIM for any errors */
slimword0 = readl(phba->MBslimaddr);
slimmb = (MAILBOX_t *) & slimword0;
@@ -2948,8 +3657,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* copy results back to user */
- lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
- MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
@@ -2980,9 +3688,16 @@ out_not_finished:
return MBX_NOT_FINISHED;
}
-/*
- * Caller needs to hold lock.
- */
+/**
+ * __lpfc_sli_ringtx_put: Add an iocb to the txq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held to add a command
+ * iocb to the txq when SLI layer cannot submit the command iocb
+ * to the ring.
+ **/
static void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
@@ -2992,6 +3707,23 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
pring->txq_cnt++;
}
+/**
+ * lpfc_sli_next_iocb: Get the next iocb in the txq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held before a new
+ * iocb is submitted to the firmware. This function checks
+ * txq to flush the iocbs in txq to Firmware before
+ * submitting new iocbs to the Firmware.
+ * If there are iocbs in the txq which need to be submitted
+ * to firmware, lpfc_sli_next_iocb returns the first element
+ * of the txq after dequeuing it from txq.
+ * If there is no iocb in the txq then the function will return
+ * *piocb and *piocb is set to NULL. Caller needs to check
+ * *piocb to find if there are more commands in the txq.
+ **/
static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq **piocb)
@@ -3007,9 +3739,30 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return nextiocb;
}
-/*
- * Lockless version of lpfc_sli_issue_iocb.
- */
+/**
+ * __lpfc_sli_issue_iocb: Lockless version of lpfc_sli_issue_iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb is used by other functions in the driver
+ * to issue an iocb command to the HBA. If the PCI slot is recovering
+ * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT
+ * flag is turned on, the function returns IOCB_ERROR.
+ * When the link is down, this function allows only iocbs for
+ * posting buffers.
+ * This function finds next available slot in the command ring and
+ * posts the command to the available slot and writes the port
+ * attention register to request HBA start processing new iocb.
+ * If there is no slot available in the ring and
+ * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the
+ * txq, otherwise the function returns IOCB_BUSY.
+ *
+ * This function is called with hbalock held.
+ * The function will return success after it successfully submit the
+ * iocb to firmware or after adding to the txq.
+ **/
static int
__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag)
@@ -3052,6 +3805,16 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
+ case CMD_GEN_REQUEST64_CR:
+ case CMD_GEN_REQUEST64_CX:
+ if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
+ (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
+ FC_FCP_CMND) ||
+ (piocb->iocb.un.genreq64.w5.hcsw.Type !=
+ MENLO_TRANSPORT_TYPE))
+
+ goto iocb_busy;
+ break;
case CMD_QUE_RING_BUF_CN:
case CMD_QUE_RING_BUF64_CN:
/*
@@ -3106,6 +3869,19 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
+/**
+ * lpfc_sli_issue_iocb: Wrapper function for __lpfc_sli_issue_iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
+ * function. This function gets the hbalock and calls
+ * __lpfc_sli_issue_iocb function and will return the error returned
+ * by __lpfc_sli_issue_iocb function. This wrapper is used by
+ * functions which do not hold hbalock.
+ **/
int
lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag)
@@ -3120,6 +3896,17 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return rc;
}
+/**
+ * lpfc_extra_ring_setup: Extra ring setup function.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called while driver attaches with the
+ * HBA to setup the extra ring. The extra ring is used
+ * only when driver needs to support target mode functionality
+ * or IP over FC functionalities.
+ *
+ * This function is called with no lock held.
+ **/
static int
lpfc_extra_ring_setup( struct lpfc_hba *phba)
{
@@ -3155,6 +3942,19 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_async_event_handler: ASYNC iocb handler function.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * This function is called by the slow ring event handler
+ * function when there is an ASYNC event iocb in the ring.
+ * This function is called with no lock held.
+ * Currently this function handles only temperature related
+ * ASYNC events. The function decodes the temperature sensor
+ * event message and posts events for the management applications.
+ **/
static void
lpfc_sli_async_event_handler(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
@@ -3210,6 +4010,17 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
}
+/**
+ * lpfc_sli_setup: SLI ring setup function.
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_setup sets up rings of the SLI interface with
+ * number of iocbs per ring and iotags. This function is
+ * called while driver attach to the HBA and before the
+ * interrupts are enabled. So there is no need for locking.
+ *
+ * This function always returns 0.
+ **/
int
lpfc_sli_setup(struct lpfc_hba *phba)
{
@@ -3321,6 +4132,17 @@ lpfc_sli_setup(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_queue_setup: Queue initialization function.
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
+ * ring. This function also initializes ring indices of each ring.
+ * This function is called during the initialization of the SLI
+ * interface of an HBA.
+ * This function is called with no lock held and always returns
+ * 1.
+ **/
int
lpfc_sli_queue_setup(struct lpfc_hba *phba)
{
@@ -3349,6 +4171,23 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
return 1;
}
+/**
+ * lpfc_sli_host_down: Vport cleanup function.
+ * @vport: Pointer to virtual port object.
+ *
+ * lpfc_sli_host_down is called to clean up the resources
+ * associated with a vport before destroying virtual
+ * port data structures.
+ * This function does following operations:
+ * - Free discovery resources associated with this virtual
+ * port.
+ * - Free iocbs associated with this virtual port in
+ * the txq.
+ * - Send abort for all iocb commands associated with this
+ * vport in txcmplq.
+ *
+ * This function is called with no lock held and always returns 1.
+ **/
int
lpfc_sli_host_down(struct lpfc_vport *vport)
{
@@ -3411,6 +4250,21 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
return 1;
}
+/**
+ * lpfc_sli_hba_down: Resource cleanup function for the HBA.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function cleans up all iocb, buffers, mailbox commands
+ * while shutting down the HBA. This function is called with no
+ * lock held and always returns 1.
+ * This function does the following to cleanup driver resources:
+ * - Free discovery resources for each virtual port
+ * - Cleanup any pending fabric iocbs
+ * - Iterate through the iocb txq and free each entry
+ * in the list.
+ * - Free up any buffer posted to the HBA
+ * - Free mailbox commands in the mailbox queue.
+ **/
int
lpfc_sli_hba_down(struct lpfc_hba *phba)
{
@@ -3501,6 +4355,18 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
return 1;
}
+/**
+ * lpfc_sli_pcimem_bcopy: SLI memory copy function.
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between driver memory
+ * and the SLI memory. This function also changes the endianness
+ * of each word if native endianness is different from SLI
+ * endianness. This function can be called with or without
+ * lock.
+ **/
void
lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
{
@@ -3518,6 +4384,17 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
}
}
+
+/**
+ * lpfc_sli_ringpostbuf_put: Function to add a buffer to postbufq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mp: Pointer to driver buffer object.
+ *
+ * This function is called with no lock held.
+ * It always return zero after adding the buffer to the postbufq
+ * buffer list.
+ **/
int
lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *mp)
@@ -3531,6 +4408,18 @@ lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
}
+/**
+ * lpfc_sli_get_buffer_tag: Tag allocation function for a buffer posted
+ * using CMD_QUE_XRI64_CX iocb.
+ * @phba: Pointer to HBA context object.
+ *
+ * When HBQ is enabled, buffers are searched based on tags. This function
+ * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
+ * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
+ * does not conflict with tags of buffer posted for unsolicited events.
+ * The function returns the allocated tag. The function is called with
+ * no locks held.
+ **/
uint32_t
lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
{
@@ -3545,6 +4434,22 @@ lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
return phba->buffer_tag_count;
}
+/**
+ * lpfc_sli_ring_taggedbuf_get: Search HBQ buffer associated with
+ * posted using CMD_QUE_XRI64_CX iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: Buffer tag.
+ *
+ * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
+ * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
+ * iocb is posted to the response ring with the tag of the buffer.
+ * This function searches the pring->postbufq list using the tag
+ * to find buffer associated with CMD_IOCB_RET_XRI64_CX
+ * iocb. If the buffer is found then lpfc_dmabuf object of the
+ * buffer is returned to the caller else NULL is returned.
+ * This function is called with no lock held.
+ **/
struct lpfc_dmabuf *
lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t tag)
@@ -3565,7 +4470,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0410 Cannot find virtual addr for buffer tag on "
+ "0402 Cannot find virtual addr for buffer tag on "
"ring %d Data x%lx x%p x%p x%x\n",
pring->ringno, (unsigned long) tag,
slp->next, slp->prev, pring->postbufq_cnt);
@@ -3573,6 +4478,23 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return NULL;
}
+/**
+ * lpfc_sli_ringpostbuf_get: SLI2 buffer search function for
+ * unsolicited ct and els events.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @phys: DMA address of the buffer.
+ *
+ * This function searches the buffer list using the dma_address
+ * of unsolicited event to find the driver's lpfc_dmabuf object
+ * corresponding to the dma_address. The function returns the
+ * lpfc_dmabuf object if a buffer is found else it returns NULL.
+ * This function is called by the ct and els unsolicited event
+ * handlers to get the buffer associated with the unsolicited
+ * event.
+ *
+ * This function is called with no lock held.
+ **/
struct lpfc_dmabuf *
lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dma_addr_t phys)
@@ -3600,6 +4522,17 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return NULL;
}
+/**
+ * lpfc_sli_abort_els_cmpl: Completion handler for the els abort iocbs.
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * This function is the completion handler for the abort iocbs for
+ * ELS commands. This function is called from the ELS ring event
+ * handler with no lock held. This function frees memory resources
+ * associated with the abort iocb.
+ **/
static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -3665,6 +4598,17 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_ignore_els_cmpl: Completion handler for aborted ELS command.
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for ELS commands
+ * which are aborted. The function frees memory resources used for
+ * the aborted ELS commands.
+ **/
static void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -3673,7 +4617,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "0133 Ignoring ELS cmd tag x%x completion Data: "
+ "0139 Ignoring ELS cmd tag x%x completion Data: "
"x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
@@ -3684,6 +4628,17 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_sli_issue_abort_iotag: Abort function for a command iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command
+ * iocb. This function is called with hbalock held.
+ * The function returns 0 when it fails due to memory allocation
+ * failure or when the command iocb is an abort request.
+ **/
int
lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *cmdiocb)
@@ -3748,6 +4703,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
+ if (retval)
+ __lpfc_sli_release_iocbq(phba, abtsiocbp);
abort_iotag_exit:
/*
* Caller to this routine should check for IOCB_ERROR
@@ -3757,6 +4714,29 @@ abort_iotag_exit:
return retval;
}
+/**
+ * lpfc_sli_validate_fcp_iocb: Filtering function, used to find commands
+ * associated with a vport/SCSI target/lun.
+ * @iocbq: Pointer to driver iocb object.
+ * @vport: Pointer to driver virtual port object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
+ *
+ * This function acts as iocb filter for functions which abort or count
+ * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
+ * 0 if the filtering criteria is met for the given iocb and will return
+ * 1 if the filtering criteria is not met.
+ * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
+ * given iocb is for the SCSI device specified by vport, tgt_id and
+ * lun_id parameter.
+ * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
+ * given iocb is for the SCSI target specified by vport and tgt_id
+ * parameters.
+ * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
+ * given iocb is for the SCSI host associated with the given vport.
+ * This function is called with no locks held.
+ **/
static int
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
uint16_t tgt_id, uint64_t lun_id,
@@ -3800,6 +4780,25 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
return rc;
}
+/**
+ * lpfc_sli_sum_iocb: Function to count the number of FCP iocbs pending.
+ * @vport: Pointer to virtual port.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function returns number of FCP commands pending for the vport.
+ * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
+ * commands pending on the vport associated with SCSI device specified
+ * by tgt_id and lun_id parameters.
+ * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
+ * commands pending on the vport associated with SCSI target specified
+ * by tgt_id parameter.
+ * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
+ * commands pending on the vport.
+ * This function returns the number of iocbs which satisfy the filter.
+ * This function is called without any lock held.
+ **/
int
lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
lpfc_ctx_cmd ctx_cmd)
@@ -3819,6 +4818,17 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
return sum;
}
+/**
+ * lpfc_sli_abort_fcp_cmpl: Completion handler function for an aborted
+ * FCP iocb.
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+ * @rspiocb: Pointer to response iocb object.
+ *
+ * This function is called when an aborted FCP iocb completes. This
+ * function is called by the ring event handler with no lock held.
+ * This function frees the iocb.
+ **/
void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -3827,6 +4837,28 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_sli_abort_iocb: This function issue abort for all SCSI commands
+ * pending on a SCSI host(vport)/target/lun.
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it failed to abort.
+ * This function is called with no locks held.
+ **/
int
lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
@@ -3878,6 +4910,24 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
return errcnt;
}
+/**
+ * lpfc_sli_wake_iocb_wait: iocb completion handler for iocb issued using
+ * lpfc_sli_issue_iocb_wait.
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_sli_issue_iocb_wait function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
static void
lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
@@ -3899,13 +4949,36 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
return;
}
-/*
- * Issue the caller's iocb and wait for its completion, but no longer than the
- * caller's timeout. Note that iocb_flags is cleared before the
- * lpfc_sli_issue_call since the wake routine sets a unique value and by
- * definition this is a wait function.
- */
-
+/**
+ * lpfc_sli_issue_iocb_wait: Synchronous function to issue iocb commands.
+ * @phba: Pointer to HBA context object..
+ * @pring: Pointer to sli ring.
+ * @piocb: Pointer to command iocb.
+ * @prspiocbq: Pointer to response iocb.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the iocb to firmware and waits for the
+ * iocb to complete. If the iocb command is not
+ * completed within timeout seconds, it returns IOCB_TIMEDOUT.
+ * Caller should not free the iocb resources if this function
+ * returns IOCB_TIMEDOUT.
+ * The function waits for the iocb completion using an
+ * non-interruptible wait.
+ * This function will sleep while waiting for iocb completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the iocb completions occur while
+ * this function sleep. So, this function cannot be called from
+ * the thread which process iocb completion for this ring.
+ * This function clears the iocb_flag of the iocb object before
+ * issuing the iocb and the iocb completion handler sets this
+ * flag and wakes this thread when the iocb completes.
+ * The contents of the response iocb will be copied to prspiocbq
+ * by the completion handler when the command completes.
+ * This function returns IOCB_SUCCESS when success.
+ * This function is called with no lock held.
+ **/
int
lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
@@ -3963,7 +5036,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- ":0332 IOCB wait issue failed, Data x%x\n",
+ "0332 IOCB wait issue failed, Data x%x\n",
retval);
retval = IOCB_ERROR;
}
@@ -3983,6 +5056,32 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
return retval;
}
+/**
+ * lpfc_sli_issue_mbox_wait: Synchronous function to issue mailbox.
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to driver mailbox object.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the mailbox to firmware and waits for the
+ * mailbox command to complete. If the mailbox command is not
+ * completed within timeout seconds, it returns MBX_TIMEOUT.
+ * The function waits for the mailbox completion using an
+ * interruptible wait. If the thread is woken up due to a
+ * signal, MBX_TIMEOUT error is returned to the caller. Caller
+ * should not free the mailbox resources, if this function returns
+ * MBX_TIMEOUT.
+ * This function will sleep while waiting for mailbox completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the mailbox completion occurs while
+ * this function sleep. So, this function cannot be called from
+ * the worker thread which processes mailbox completion.
+ * This function is called in the context of HBA management
+ * applications.
+ * This function returns MBX_SUCCESS when successful.
+ * This function is called with no lock held.
+ **/
int
lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
uint32_t timeout)
@@ -4027,6 +5126,18 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
return retval;
}
+/**
+ * lpfc_sli_flush_mbox_queue: mailbox queue cleanup function.
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to cleanup any pending mailbox
+ * objects in the driver queue before bringing the HBA offline.
+ * This function is called while resetting the HBA.
+ * The function is called without any lock held. The function
+ * takes hbalock to update SLI data structure.
+ * This function returns 1 when there is an active mailbox
+ * command pending else returns 0.
+ **/
int
lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
{
@@ -4058,8 +5169,74 @@ lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
}
+/**
+ * lpfc_sli_check_eratt: check error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called form timer soft interrupt context to check HBA's
+ * error attention register bit for error attention events.
+ *
+ * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+int
+lpfc_sli_check_eratt(struct lpfc_hba *phba)
+{
+ uint32_t ha_copy;
+
+ /* If somebody is waiting to handle an eratt, don't process it
+ * here. The brdkill function will do this.
+ */
+ if (phba->link_flag & LS_IGNORE_ERATT)
+ return 0;
+
+ /* Check if interrupt handler handles this ERATT */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_ERATT_HANDLED) {
+ /* Interrupt handler has handled ERATT */
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+
+ /* Read chip Host Attention (HA) register */
+ ha_copy = readl(phba->HAregaddr);
+ if (ha_copy & HA_ERATT) {
+ /* Read host status register to retrieve error event */
+ lpfc_sli_read_hs(phba);
+ /* Set the driver HA work bitmap */
+ phba->work_ha |= HA_ERATT;
+ /* Indicate polling handles this ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ spin_unlock_irq(&phba->hbalock);
+ return 1;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+}
+
+/**
+ * lpfc_sp_intr_handler: The slow-path interrupt handler of lpfc driver.
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when the device is enabled with MSI-X multi-message
+ * interrupt mode and there are slow-path events in the HBA. However,
+ * when the device is enabled with either MSI or Pin-IRQ interrupt mode,
+ * this function is called as part of the device-level interrupt handler.
+ * When the PCI slot is in error recovery or the HBA is undergoing
+ * initialization, the interrupt handler will not process the interrupt.
+ * The link attention and ELS ring attention events are handled by the
+ * worker thread. The interrupt handler signals the worker thread and
+ * and returns for these events. This function is called without any
+ * lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
irqreturn_t
-lpfc_intr_handler(int irq, void *dev_id)
+lpfc_sp_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
uint32_t ha_copy;
@@ -4078,48 +5255,52 @@ lpfc_intr_handler(int irq, void *dev_id)
* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
*/
- phba = (struct lpfc_hba *) dev_id;
+ phba = (struct lpfc_hba *)dev_id;
if (unlikely(!phba))
return IRQ_NONE;
- /* If the pci channel is offline, ignore all the interrupts. */
- if (unlikely(pci_channel_offline(phba->pcidev)))
- return IRQ_NONE;
-
- phba->sli.slistat.sli_intr++;
-
/*
- * Call the HBA to see if it is interrupting. If not, don't claim
- * the interrupt
- */
-
- /* Ignore all interrupts during initialization. */
- if (unlikely(phba->link_state < LPFC_LINK_DOWN))
- return IRQ_NONE;
-
- /*
- * Read host attention register to determine interrupt source
- * Clear Attention Sources, except Error Attention (to
- * preserve status) and Link Attention
- */
- spin_lock(&phba->hbalock);
- ha_copy = readl(phba->HAregaddr);
- /* If somebody is waiting to handle an eratt don't process it
- * here. The brdkill function will do this.
+ * Stuff needs to be attented to when this function is invoked as an
+ * individual interrupt handler in MSI-X multi-message interrupt mode
*/
- if (phba->link_flag & LS_IGNORE_ERATT)
- ha_copy &= ~HA_ERATT;
- writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
- readl(phba->HAregaddr); /* flush */
- spin_unlock(&phba->hbalock);
-
- if (unlikely(!ha_copy))
- return IRQ_NONE;
+ if (phba->intr_type == MSIX) {
+ /* If the pci channel is offline, ignore all the interrupts */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IRQ_NONE;
+ /* Update device-level interrupt statistics */
+ phba->sli.slistat.sli_intr++;
+ /* Ignore all interrupts during initialization. */
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ return IRQ_NONE;
+ /* Need to read HA REG for slow-path events */
+ spin_lock(&phba->hbalock);
+ ha_copy = readl(phba->HAregaddr);
+ /* If somebody is waiting to handle an eratt don't process it
+ * here. The brdkill function will do this.
+ */
+ if (phba->link_flag & LS_IGNORE_ERATT)
+ ha_copy &= ~HA_ERATT;
+ /* Check the need for handling ERATT in interrupt handler */
+ if (ha_copy & HA_ERATT) {
+ if (phba->hba_flag & HBA_ERATT_HANDLED)
+ /* ERATT polling has handled ERATT */
+ ha_copy &= ~HA_ERATT;
+ else
+ /* Indicate interrupt handler handles ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ }
+ /* Clear up only attention source related to slow-path */
+ writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
+ phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock(&phba->hbalock);
+ } else
+ ha_copy = phba->ha_copy;
work_ha_copy = ha_copy & phba->work_ha_mask;
- if (unlikely(work_ha_copy)) {
+ if (work_ha_copy) {
if (work_ha_copy & HA_LATT) {
if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
/*
@@ -4138,7 +5319,7 @@ lpfc_intr_handler(int irq, void *dev_id)
work_ha_copy &= ~HA_LATT;
}
- if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
+ if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
/*
* Turn off Slow Rings interrupts, LPFC_ELS_RING is
* the only slow ring.
@@ -4179,31 +5360,13 @@ lpfc_intr_handler(int irq, void *dev_id)
spin_unlock(&phba->hbalock);
}
}
-
- if (work_ha_copy & HA_ERATT) {
- /*
- * There was a link/board error. Read the
- * status register to retrieve the error event
- * and process it.
- */
- phba->sli.slistat.err_attn_event++;
- /* Save status info */
- phba->work_hs = readl(phba->HSregaddr);
- phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
- phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
-
- /* Clear Chip error bit */
- writel(HA_ERATT, phba->HAregaddr);
- readl(phba->HAregaddr); /* flush */
- phba->pport->stopped = 1;
- }
-
spin_lock(&phba->hbalock);
- if ((work_ha_copy & HA_MBATT) &&
- (phba->sli.mbox_active)) {
+ if (work_ha_copy & HA_ERATT)
+ lpfc_sli_read_hs(phba);
+ if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
pmb = phba->sli.mbox_active;
pmbox = &pmb->mb;
- mbox = &phba->slim2p->mbx;
+ mbox = phba->mbox;
vport = pmb->vport;
/* First check out the status word */
@@ -4270,7 +5433,7 @@ lpfc_intr_handler(int irq, void *dev_id)
lpfc_printf_log(phba,
KERN_ERR,
LOG_MBOX | LOG_SLI,
- "0306 rc should have"
+ "0350 rc should have"
"been MBX_BUSY");
goto send_current_mbox;
}
@@ -4283,6 +5446,7 @@ lpfc_intr_handler(int irq, void *dev_id)
}
} else
spin_unlock(&phba->hbalock);
+
if ((work_ha_copy & HA_MBATT) &&
(phba->sli.mbox_active == NULL)) {
send_current_mbox:
@@ -4302,15 +5466,74 @@ send_current_mbox:
spin_unlock(&phba->hbalock);
lpfc_worker_wake_up(phba);
}
+ return IRQ_HANDLED;
- ha_copy &= ~(phba->work_ha_mask);
+} /* lpfc_sp_intr_handler */
+
+/**
+ * lpfc_fp_intr_handler: The fast-path interrupt handler of lpfc driver.
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when the device is enabled with MSI-X multi-message
+ * interrupt mode and there is a fast-path FCP IOCB ring event in the
+ * HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA
+ * is undergoing initialization, the interrupt handler will not process
+ * the interrupt. The SCSI FCP fast-path ring event are handled in the
+ * intrrupt context. This function is called without any lock held. It
+ * gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_fp_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ uint32_t ha_copy;
+ unsigned long status;
+
+ /* Get the driver's phba structure from the dev_id and
+ * assume the HBA is not interrupting.
+ */
+ phba = (struct lpfc_hba *) dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /*
+ * Stuff needs to be attented to when this function is invoked as an
+ * individual interrupt handler in MSI-X multi-message interrupt mode
+ */
+ if (phba->intr_type == MSIX) {
+ /* If pci channel is offline, ignore all the interrupts */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IRQ_NONE;
+ /* Update device-level interrupt statistics */
+ phba->sli.slistat.sli_intr++;
+ /* Ignore all interrupts during initialization. */
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ return IRQ_NONE;
+ /* Need to read HA REG for FCP ring and other ring events */
+ ha_copy = readl(phba->HAregaddr);
+ /* Clear up only attention source related to fast-path */
+ spin_lock(&phba->hbalock);
+ writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
+ phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock(&phba->hbalock);
+ } else
+ ha_copy = phba->ha_copy;
/*
- * Process all events on FCP ring. Take the optimized path for
- * FCP IO. Any other IO is slow path and is handled by
- * the worker thread.
+ * Process all events on FCP ring. Take the optimized path for FCP IO.
*/
- status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+ ha_copy &= ~(phba->work_ha_mask);
+
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
status >>= (4*LPFC_FCP_RING);
if (status & HA_RXMASK)
lpfc_sli_handle_fast_ring_event(phba,
@@ -4319,11 +5542,10 @@ send_current_mbox:
if (phba->cfg_multi_ring_support == 2) {
/*
- * Process all events on extra ring. Take the optimized path
- * for extra ring IO. Any other IO is slow path and is handled
- * by the worker thread.
+ * Process all events on extra ring. Take the optimized path
+ * for extra ring IO.
*/
- status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
status >>= (4*LPFC_EXTRA_RING);
if (status & HA_RXMASK) {
lpfc_sli_handle_fast_ring_event(phba,
@@ -4332,5 +5554,106 @@ send_current_mbox:
}
}
return IRQ_HANDLED;
+} /* lpfc_fp_intr_handler */
+
+/**
+ * lpfc_intr_handler: The device-level interrupt handler of lpfc driver.
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the device-level interrupt handler called from the PCI
+ * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is
+ * an event in the HBA which requires driver attention. This function
+ * invokes the slow-path interrupt attention handling function and fast-path
+ * interrupt attention handling function in turn to process the relevant
+ * HBA attention events. This function is called without any lock held. It
+ * gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ irqreturn_t sp_irq_rc, fp_irq_rc;
+ unsigned long status1, status2;
+
+ /*
+ * Get the driver's phba structure from the dev_id and
+ * assume the HBA is not interrupting.
+ */
+ phba = (struct lpfc_hba *) dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* If the pci channel is offline, ignore all the interrupts. */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IRQ_NONE;
+
+ /* Update device level interrupt statistics */
+ phba->sli.slistat.sli_intr++;
+
+ /* Ignore all interrupts during initialization. */
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ return IRQ_NONE;
+
+ spin_lock(&phba->hbalock);
+ phba->ha_copy = readl(phba->HAregaddr);
+ if (unlikely(!phba->ha_copy)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_NONE;
+ } else if (phba->ha_copy & HA_ERATT) {
+ if (phba->hba_flag & HBA_ERATT_HANDLED)
+ /* ERATT polling has handled ERATT */
+ phba->ha_copy &= ~HA_ERATT;
+ else
+ /* Indicate interrupt handler handles ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ }
+
+ /* Clear attention sources except link and error attentions */
+ writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock(&phba->hbalock);
+
+ /*
+ * Invokes slow-path host attention interrupt handling as appropriate.
+ */
+
+ /* status of events with mailbox and link attention */
+ status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
+
+ /* status of events with ELS ring */
+ status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
+ status2 >>= (4*LPFC_ELS_RING);
+
+ if (status1 || (status2 & HA_RXMASK))
+ sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id);
+ else
+ sp_irq_rc = IRQ_NONE;
+
+ /*
+ * Invoke fast-path host attention interrupt handling as appropriate.
+ */
+
+ /* status of events with FCP ring */
+ status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+ status1 >>= (4*LPFC_FCP_RING);
+
+ /* status of events with extra ring */
+ if (phba->cfg_multi_ring_support == 2) {
+ status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+ status2 >>= (4*LPFC_EXTRA_RING);
+ } else
+ status2 = 0;
+
+ if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
+ fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id);
+ else
+ fp_irq_rc = IRQ_NONE;
-} /* lpfc_intr_handler */
+ /* Return device-level interrupt handling status */
+ return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
+} /* lpfc_intr_handler */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7249fd252cb..883938652a6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -233,6 +233,7 @@ struct lpfc_sli {
#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
+#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
struct lpfc_sli_ring ring[LPFC_MAX_RING];
int fcp_ring; /* ring used for FCP initiator commands */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ad24cacfbe1..cc43e9de22c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,9 +18,11 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.2.7"
+#define LPFC_DRIVER_VERSION "8.2.8"
-#define LPFC_DRIVER_NAME "lpfc"
+#define LPFC_DRIVER_NAME "lpfc"
+#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
+#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 109f89d9883..a7de1cc02b4 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -34,6 +34,7 @@
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -204,6 +205,77 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
return 1;
}
+/**
+ * lpfc_discovery_wait: Wait for driver discovery to quiesce.
+ * @vport: The virtual port for which this call is being executed.
+ *
+ * This driver calls this routine specifically from lpfc_vport_delete
+ * to enforce a synchronous execution of vport
+ * delete relative to discovery activities. The
+ * lpfc_vport_delete routine should not return until it
+ * can reasonably guarantee that discovery has quiesced.
+ * Post FDISC LOGO, the driver must wait until its SAN teardown is
+ * complete and all resources recovered before allowing
+ * cleanup.
+ *
+ * This routine does not require any locks held.
+ **/
+static void lpfc_discovery_wait(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t wait_flags = 0;
+ unsigned long wait_time_max;
+ unsigned long start_time;
+
+ wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
+ FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
+
+ /*
+ * The time constraint on this loop is a balance between the
+ * fabric RA_TOV value and dev_loss tmo. The driver's
+ * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
+ */
+ wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+ wait_time_max += jiffies;
+ start_time = jiffies;
+ while (time_before(jiffies, wait_time_max)) {
+ if ((vport->num_disc_nodes > 0) ||
+ (vport->fc_flag & wait_flags) ||
+ ((vport->port_state > LPFC_VPORT_FAILED) &&
+ (vport->port_state < LPFC_VPORT_READY))) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+ "1833 Vport discovery quiesce Wait:"
+ " vpi x%x state x%x fc_flags x%x"
+ " num_nodes x%x, waiting 1000 msecs"
+ " total wait msecs x%x\n",
+ vport->vpi, vport->port_state,
+ vport->fc_flag, vport->num_disc_nodes,
+ jiffies_to_msecs(jiffies - start_time));
+ msleep(1000);
+ } else {
+ /* Base case. Wait variants satisfied. Break out */
+ lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+ "1834 Vport discovery quiesced:"
+ " vpi x%x state x%x fc_flags x%x"
+ " wait msecs x%x\n",
+ vport->vpi, vport->port_state,
+ vport->fc_flag,
+ jiffies_to_msecs(jiffies
+ - start_time));
+ break;
+ }
+ }
+
+ if (time_after(jiffies, wait_time_max))
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1835 Vport discovery quiesce failed:"
+ " vpi x%x state x%x fc_flags x%x"
+ " wait msecs x%x\n",
+ vport->vpi, vport->port_state,
+ vport->fc_flag,
+ jiffies_to_msecs(jiffies - start_time));
+}
+
int
lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
{
@@ -506,8 +578,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
* initiated after we've disposed of all other resources associated
* with the port.
*/
- if (!scsi_host_get(shost) || !scsi_host_get(shost))
+ if (!scsi_host_get(shost))
+ return VPORT_INVAL;
+ if (!scsi_host_get(shost)) {
+ scsi_host_put(shost);
return VPORT_INVAL;
+ }
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
@@ -597,11 +673,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ goto skip_logo;
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout);
}
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_discovery_wait(vport);
+
skip_logo:
lpfc_cleanup(vport);
lpfc_sli_host_down(vport);
@@ -615,8 +696,10 @@ skip_logo:
* Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
* does the scsi_host_put() to release the vport.
*/
- lpfc_mbx_unreg_vpi(vport);
- }
+ if (lpfc_mbx_unreg_vpi(vport))
+ scsi_host_put(shost);
+ } else
+ scsi_host_put(shost);
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
@@ -663,3 +746,82 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
scsi_host_put(lpfc_shost_from_vport(vports[i]));
kfree(vports);
}
+
+
+/**
+ * lpfc_vport_reset_stat_data: Reset the statistical data for the vport.
+ * @vport: Pointer to vport object.
+ *
+ * This function resets the statistical data for the vport. This function
+ * is called with the host_lock held
+ **/
+void
+lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->lat_data)
+ memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
+ sizeof(struct lpfc_scsicmd_bkt));
+ }
+}
+
+
+/**
+ * lpfc_alloc_bucket: Allocate data buffer required for collecting
+ * statistical data.
+ * @vport: Pointer to vport object.
+ *
+ * This function allocates data buffer required for all the FC
+ * nodes of the vport to collect statistical data.
+ **/
+void
+lpfc_alloc_bucket(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+
+ kfree(ndlp->lat_data);
+ ndlp->lat_data = NULL;
+
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+ ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+ sizeof(struct lpfc_scsicmd_bkt),
+ GFP_ATOMIC);
+
+ if (!ndlp->lat_data)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0287 lpfc_alloc_bucket failed to "
+ "allocate statistical data buffer DID "
+ "0x%x\n", ndlp->nlp_DID);
+ }
+ }
+}
+
+/**
+ * lpfc_free_bucket: Free data buffer required for collecting
+ * statistical data.
+ * @vport: Pointer to vport object.
+ *
+ * Th function frees statistical data buffer of all the FC
+ * nodes of the vport.
+ **/
+void
+lpfc_free_bucket(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+
+ kfree(ndlp->lat_data);
+ ndlp->lat_data = NULL;
+ }
+}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 96c445333b6..90828340ace 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -112,4 +112,8 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
+void lpfc_vport_reset_stat_data(struct lpfc_vport *);
+void lpfc_alloc_bucket(struct lpfc_vport *);
+void lpfc_free_bucket(struct lpfc_vport *);
+
#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 1c79f9794f4..0ea78d9a37d 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -5708,7 +5708,8 @@ static int osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape * S
struct device *osst_member;
int err;
- osst_member = device_create_drvdata(osst_sysfs_class, device, dev, STp, "%s", name);
+ osst_member = device_create(osst_sysfs_class, device, dev, STp,
+ "%s", name);
if (IS_ERR(osst_member)) {
printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name);
return PTR_ERR(osst_member);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 0ddfe7106b3..ed731968f15 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1006,7 +1006,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
}
qla2x00_abort_fcport_cmds(fcport);
- scsi_target_unblock(&rport->dev);
}
static int
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index fc4bfa7f839..a76efd99d00 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1187,7 +1187,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
cp->serial_number, comp_status,
atomic_read(&fcport->state)));
- cp->result = DID_BUS_BUSY << 16;
+ /*
+ * We are going to have the fc class block the rport
+ * while we try to recover so instruct the mid layer
+ * to requeue until the class decides how to handle this.
+ */
+ cp->result = DID_TRANSPORT_DISRUPTED << 16;
if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
break;
@@ -1214,7 +1219,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
break;
case CS_TIMEOUT:
- cp->result = DID_BUS_BUSY << 16;
+ /*
+ * We are going to have the fc class block the rport
+ * while we try to recover so instruct the mid layer
+ * to requeue until the class decides how to handle this.
+ */
+ cp->result = DID_TRANSPORT_DISRUPTED << 16;
if (IS_FWI2_CAPABLE(ha)) {
DEBUG2(printk(KERN_INFO
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3433441b956..2aed4721c0d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -394,10 +394,8 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
- if (fcport->drport) {
- cmd->result = DID_IMM_RETRY << 16;
- goto qc_fail_command;
- }
+ if (fcport->drport)
+ goto qc_target_busy;
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
@@ -405,7 +403,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
cmd->result = DID_NO_CONNECT << 16;
goto qc_fail_command;
}
- goto qc_host_busy;
+ goto qc_target_busy;
}
spin_unlock_irq(ha->host->host_lock);
@@ -428,10 +426,11 @@ qc_host_busy_free_sp:
qc_host_busy_lock:
spin_lock_irq(ha->host->host_lock);
-
-qc_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
+qc_target_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
qc_fail_command:
done(cmd);
@@ -461,10 +460,8 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
- if (fcport->drport) {
- cmd->result = DID_IMM_RETRY << 16;
- goto qc24_fail_command;
- }
+ if (fcport->drport)
+ goto qc24_target_busy;
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
@@ -472,7 +469,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
- goto qc24_host_busy;
+ goto qc24_target_busy;
}
spin_unlock_irq(ha->host->host_lock);
@@ -495,10 +492,11 @@ qc24_host_busy_free_sp:
qc24_host_busy_lock:
spin_lock_irq(ha->host->host_lock);
-
-qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
+qc24_target_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
qc24_fail_command:
done(cmd);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index a91a57c57bf..799120fcb9b 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -139,7 +139,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun));
- cmd->result = DID_BUS_BUSY << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
/*
* Mark device missing so that we won't continue to send
@@ -243,7 +243,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
- cmd->result = DID_BUS_BUSY << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
case SCS_QUEUE_FULL:
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index de8279ad7d8..db7ea3bb4e8 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -353,7 +353,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
ha->host_no, ddb_entry->bus, ddb_entry->target,
ddb_entry->fw_ddb_index));
iscsi_block_session(ddb_entry->sess);
- iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
+ iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
}
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
@@ -439,7 +439,7 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
cmd->result = DID_NO_CONNECT << 16;
goto qc_fail_command;
}
- goto qc_host_busy;
+ return SCSI_MLQUEUE_TARGET_BUSY;
}
if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2ac3cb2b908..f8b79d401d5 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -754,8 +754,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
spin_unlock_irqrestore(host->host_lock, flags);
if (rtn) {
- scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
- rtn : SCSI_MLQUEUE_HOST_BUSY);
+ if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
+ rtn != SCSI_MLQUEUE_TARGET_BUSY)
+ rtn = SCSI_MLQUEUE_HOST_BUSY;
+
+ scsi_queue_insert(cmd, rtn);
+
SCSI_LOG_MLQUEUE(3,
printk("queuecommand : request rejected\n"));
}
@@ -800,6 +804,7 @@ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
void scsi_finish_command(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
+ struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
struct scsi_driver *drv;
unsigned int good_bytes;
@@ -815,6 +820,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
* XXX(hch): What about locking?
*/
shost->host_blocked = 0;
+ starget->target_blocked = 0;
sdev->device_blocked = 0;
/*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index fecefa05cb6..94ed262bdf0 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1065,10 +1065,10 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *tgtr_scmd, *next;
- unsigned int id;
+ unsigned int id = 0;
int rtn;
- for (id = 0; id <= shost->max_id; id++) {
+ do {
tgtr_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) {
if (id == scmd_id(scmd)) {
@@ -1076,8 +1076,18 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
break;
}
}
+ if (!tgtr_scmd) {
+ /* not one exactly equal; find the next highest */
+ list_for_each_entry(scmd, work_q, eh_entry) {
+ if (scmd_id(scmd) > id &&
+ (!tgtr_scmd ||
+ scmd_id(tgtr_scmd) > scmd_id(scmd)))
+ tgtr_scmd = scmd;
+ }
+ }
if (!tgtr_scmd)
- continue;
+ /* no more commands, that's it */
+ break;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
"to target %d\n",
@@ -1096,7 +1106,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
" failed target: "
"%d\n",
current->comm, id));
- }
+ id++;
+ } while(id != 0);
return list_empty(work_q);
}
@@ -1219,6 +1230,40 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
}
/**
+ * scsi_noretry_cmd - determinte if command should be failed fast
+ * @scmd: SCSI cmd to examine.
+ */
+int scsi_noretry_cmd(struct scsi_cmnd *scmd)
+{
+ switch (host_byte(scmd->result)) {
+ case DID_OK:
+ break;
+ case DID_BUS_BUSY:
+ return blk_failfast_transport(scmd->request);
+ case DID_PARITY:
+ return blk_failfast_dev(scmd->request);
+ case DID_ERROR:
+ if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
+ status_byte(scmd->result) == RESERVATION_CONFLICT)
+ return 0;
+ /* fall through */
+ case DID_SOFT_ERROR:
+ return blk_failfast_driver(scmd->request);
+ }
+
+ switch (status_byte(scmd->result)) {
+ case CHECK_CONDITION:
+ /*
+ * assume caller has checked sense and determinted
+ * the check condition was retryable.
+ */
+ return blk_failfast_dev(scmd->request);
+ }
+
+ return 0;
+}
+
+/**
* scsi_decide_disposition - Disposition a cmd on return from LLD.
* @scmd: SCSI cmd to examine.
*
@@ -1290,7 +1335,20 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
case DID_REQUEUE:
return ADD_TO_MLQUEUE;
-
+ case DID_TRANSPORT_DISRUPTED:
+ /*
+ * LLD/transport was disrupted during processing of the IO.
+ * The transport class is now blocked/blocking,
+ * and the transport will decide what to do with the IO
+ * based on its timers and recovery capablilities.
+ */
+ return ADD_TO_MLQUEUE;
+ case DID_TRANSPORT_FAILFAST:
+ /*
+ * The transport decided to failfast the IO (most likely
+ * the fast io fail tmo fired), so send IO directly upwards.
+ */
+ return SUCCESS;
case DID_ERROR:
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
@@ -1383,7 +1441,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* even if the request is marked fast fail, we still requeue
* for queue congestion conditions (QUEUE_FULL or BUSY) */
if ((++scmd->retries) <= scmd->allowed
- && !blk_noretry_request(scmd->request)) {
+ && !scsi_noretry_cmd(scmd)) {
return NEEDS_RETRY;
} else {
/*
@@ -1508,7 +1566,7 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&
- !blk_noretry_request(scmd->request) &&
+ !scsi_noretry_cmd(scmd) &&
(++scmd->retries <= scmd->allowed)) {
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
" retry cmd: %p\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 98ee55ced59..e5a9526d203 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -114,6 +114,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
+ struct scsi_target *starget = scsi_target(device);
struct request_queue *q = device->request_queue;
unsigned long flags;
@@ -133,10 +134,17 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* if a command is requeued with no other commands outstanding
* either for the device or for the host.
*/
- if (reason == SCSI_MLQUEUE_HOST_BUSY)
+ switch (reason) {
+ case SCSI_MLQUEUE_HOST_BUSY:
host->host_blocked = host->max_host_blocked;
- else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
+ break;
+ case SCSI_MLQUEUE_DEVICE_BUSY:
device->device_blocked = device->max_device_blocked;
+ break;
+ case SCSI_MLQUEUE_TARGET_BUSY:
+ starget->target_blocked = starget->max_target_blocked;
+ break;
+ }
/*
* Decrement the counters, since these commands are no longer
@@ -460,10 +468,12 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
void scsi_device_unbusy(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
+ starget->target_busy--;
if (unlikely(scsi_host_in_recovery(shost) &&
(shost->host_failed || shost->host_eh_scheduled)))
scsi_eh_wakeup(shost);
@@ -519,6 +529,13 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
spin_unlock_irqrestore(shost->host_lock, flags);
}
+static inline int scsi_target_is_busy(struct scsi_target *starget)
+{
+ return ((starget->can_queue > 0 &&
+ starget->target_busy >= starget->can_queue) ||
+ starget->target_blocked);
+}
+
/*
* Function: scsi_run_queue()
*
@@ -533,7 +550,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
*/
static void scsi_run_queue(struct request_queue *q)
{
- struct scsi_device *sdev = q->queuedata;
+ struct scsi_device *starved_head = NULL, *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
@@ -560,6 +577,21 @@ static void scsi_run_queue(struct request_queue *q)
*/
sdev = list_entry(shost->starved_list.next,
struct scsi_device, starved_entry);
+ /*
+ * The *queue_ready functions can add a device back onto the
+ * starved list's tail, so we must check for a infinite loop.
+ */
+ if (sdev == starved_head)
+ break;
+ if (!starved_head)
+ starved_head = sdev;
+
+ if (scsi_target_is_busy(scsi_target(sdev))) {
+ list_move_tail(&sdev->starved_entry,
+ &shost->starved_list);
+ continue;
+ }
+
list_del_init(&sdev->starved_entry);
spin_unlock(shost->host_lock);
@@ -575,13 +607,6 @@ static void scsi_run_queue(struct request_queue *q)
spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock);
- if (unlikely(!list_empty(&sdev->starved_entry)))
- /*
- * sdev lost a race, and was put back on the
- * starved list. This is unlikely but without this
- * in theory we could loop forever.
- */
- break;
}
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -681,7 +706,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
leftover = req->data_len;
/* kill remainder if no retrys */
- if (error && blk_noretry_request(req))
+ if (error && scsi_noretry_cmd(cmd))
blk_end_request(req, error, leftover);
else {
if (requeue) {
@@ -1344,6 +1369,52 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
return 1;
}
+
+/*
+ * scsi_target_queue_ready: checks if there we can send commands to target
+ * @sdev: scsi device on starget to check.
+ *
+ * Called with the host lock held.
+ */
+static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
+ struct scsi_device *sdev)
+{
+ struct scsi_target *starget = scsi_target(sdev);
+
+ if (starget->single_lun) {
+ if (starget->starget_sdev_user &&
+ starget->starget_sdev_user != sdev)
+ return 0;
+ starget->starget_sdev_user = sdev;
+ }
+
+ if (starget->target_busy == 0 && starget->target_blocked) {
+ /*
+ * unblock after target_blocked iterates to zero
+ */
+ if (--starget->target_blocked == 0) {
+ SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
+ "unblocking target at zero depth\n"));
+ } else {
+ blk_plug_device(sdev->request_queue);
+ return 0;
+ }
+ }
+
+ if (scsi_target_is_busy(starget)) {
+ if (list_empty(&sdev->starved_entry)) {
+ list_add_tail(&sdev->starved_entry,
+ &shost->starved_list);
+ return 0;
+ }
+ }
+
+ /* We're OK to process the command, so we can't be starved */
+ if (!list_empty(&sdev->starved_entry))
+ list_del_init(&sdev->starved_entry);
+ return 1;
+}
+
/*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0. We must end up running the queue again whenever 0 is
@@ -1390,6 +1461,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
+ struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
blkdev_dequeue_request(req);
@@ -1413,6 +1485,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock);
shost->host_busy++;
+ starget->target_busy++;
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);
@@ -1550,14 +1623,13 @@ static void scsi_request_fn(struct request_queue *q)
goto not_ready;
}
+ if (!scsi_target_queue_ready(shost, sdev))
+ goto not_ready;
+
if (!scsi_host_queue_ready(q, shost, sdev))
goto not_ready;
- if (scsi_target(sdev)->single_lun) {
- if (scsi_target(sdev)->starget_sdev_user &&
- scsi_target(sdev)->starget_sdev_user != sdev)
- goto not_ready;
- scsi_target(sdev)->starget_sdev_user = sdev;
- }
+
+ scsi_target(sdev)->target_busy++;
shost->host_busy++;
/*
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 6cddd5dd323..e1850904ff7 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -59,6 +59,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *done_q);
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q);
+int scsi_noretry_cmd(struct scsi_cmnd *scmd);
/* scsi_lib.c */
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 334862e26a1..b14dc02c3de 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -419,6 +419,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
dev->type = &scsi_target_type;
starget->id = id;
starget->channel = channel;
+ starget->can_queue = 0;
INIT_LIST_HEAD(&starget->siblings);
INIT_LIST_HEAD(&starget->devices);
starget->state = STARGET_CREATED;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index d5f7653bb94..1e71abf0607 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2133,8 +2133,7 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
- if (ft->terminate_rport_io)
- SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
BUG_ON(count > FC_RPORT_NUM_ATTRS);
@@ -2328,6 +2327,22 @@ fc_remove_host(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(fc_remove_host);
+static void fc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+
+ /* Involve the LLDD if possible to terminate all io on the rport. */
+ if (i->f->terminate_rport_io)
+ i->f->terminate_rport_io(rport);
+
+ /*
+ * must unblock to flush queued IO. The caller will have set
+ * the port_state or flags, so that fc_remote_port_chkready will
+ * fail IO.
+ */
+ scsi_target_unblock(&rport->dev);
+}
/**
* fc_starget_delete - called to delete the scsi decendents of an rport
@@ -2340,13 +2355,8 @@ fc_starget_delete(struct work_struct *work)
{
struct fc_rport *rport =
container_of(work, struct fc_rport, stgt_delete_work);
- struct Scsi_Host *shost = rport_to_shost(rport);
- struct fc_internal *i = to_fc_internal(shost->transportt);
-
- /* Involve the LLDD if possible to terminate all io on the rport. */
- if (i->f->terminate_rport_io)
- i->f->terminate_rport_io(rport);
+ fc_terminate_rport_io(rport);
scsi_remove_target(&rport->dev);
}
@@ -2372,10 +2382,7 @@ fc_rport_final_delete(struct work_struct *work)
if (rport->flags & FC_RPORT_SCAN_PENDING)
scsi_flush_work(shost);
- /* involve the LLDD to terminate all pending i/o */
- if (i->f->terminate_rport_io)
- i->f->terminate_rport_io(rport);
-
+ fc_terminate_rport_io(rport);
/*
* Cancel any outstanding timers. These should really exist
* only when rmmod'ing the LLDD and we're asking for
@@ -2639,7 +2646,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_lock_irqsave(shost->host_lock, flags);
- rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
+ rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
+ FC_RPORT_DEVLOSS_PENDING);
/* if target, initiate a scan */
if (rport->scsi_target_id != -1) {
@@ -2702,6 +2710,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
rport->port_id = ids->port_id;
rport->roles = ids->roles;
rport->port_state = FC_PORTSTATE_ONLINE;
+ rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
if (fci->f->dd_fcrport_size)
memset(rport->dd_data, 0,
@@ -2784,7 +2793,6 @@ void
fc_remote_port_delete(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
- struct fc_internal *i = to_fc_internal(shost->transportt);
int timeout = rport->dev_loss_tmo;
unsigned long flags;
@@ -2830,7 +2838,7 @@ fc_remote_port_delete(struct fc_rport *rport)
/* see if we need to kill io faster than waiting for device loss */
if ((rport->fast_io_fail_tmo != -1) &&
- (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io))
+ (rport->fast_io_fail_tmo < timeout))
fc_queue_devloss_work(shost, &rport->fail_io_work,
rport->fast_io_fail_tmo * HZ);
@@ -2906,7 +2914,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
fc_flush_devloss(shost);
spin_lock_irqsave(shost->host_lock, flags);
- rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
+ rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
+ FC_RPORT_DEVLOSS_PENDING);
spin_unlock_irqrestore(shost->host_lock, flags);
/* ensure any stgt delete functions are done */
@@ -3001,6 +3010,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
rport->supported_classes = FC_COS_UNSPECIFIED;
rport->roles = FC_PORT_ROLE_UNKNOWN;
rport->port_state = FC_PORTSTATE_NOTPRESENT;
+ rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
/* remove the identifiers that aren't used in the consisting binding */
switch (fc_host->tgtid_bind_type) {
@@ -3043,13 +3053,12 @@ fc_timeout_fail_rport_io(struct work_struct *work)
{
struct fc_rport *rport =
container_of(work, struct fc_rport, fail_io_work.work);
- struct Scsi_Host *shost = rport_to_shost(rport);
- struct fc_internal *i = to_fc_internal(shost->transportt);
if (rport->port_state != FC_PORTSTATE_BLOCKED)
return;
- i->f->terminate_rport_io(rport);
+ rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
+ fc_terminate_rport_io(rport);
}
/**
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0ce5f7cdfe2..4a803ebaf50 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -138,7 +138,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- return sprintf(buf, "%u\n", ep->id);
+ return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -156,7 +156,7 @@ static struct attribute_group iscsi_endpoint_group = {
static int iscsi_match_epid(struct device *dev, void *data)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- unsigned int *epid = (unsigned int *) data;
+ uint64_t *epid = (uint64_t *) data;
return *epid == ep->id;
}
@@ -166,7 +166,7 @@ iscsi_create_endpoint(int dd_size)
{
struct device *dev;
struct iscsi_endpoint *ep;
- unsigned int id;
+ uint64_t id;
int err;
for (id = 1; id < ISCSI_MAX_EPID; id++) {
@@ -187,7 +187,8 @@ iscsi_create_endpoint(int dd_size)
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+ snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%llu",
+ (unsigned long long) id);
err = device_register(&ep->dev);
if (err)
goto free_ep;
@@ -374,10 +375,10 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
err = 0;
break;
case ISCSI_SESSION_FAILED:
- err = DID_IMM_RETRY << 16;
+ err = DID_TRANSPORT_DISRUPTED << 16;
break;
case ISCSI_SESSION_FREE:
- err = DID_NO_CONNECT << 16;
+ err = DID_TRANSPORT_FAILFAST << 16;
break;
default:
err = DID_NO_CONNECT << 16;
@@ -1010,7 +1011,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
- iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
+ iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED);
iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
"control PDU: OOM\n");
return -ENOMEM;
@@ -1031,7 +1032,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
}
EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
-void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
@@ -1063,7 +1064,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
error);
}
-EXPORT_SYMBOL_GPL(iscsi_conn_error);
+EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
static int
iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index b29360ed0bd..7c2d28924d2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -109,7 +109,9 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
for(i = 0; i < DV_RETRIES; i++) {
result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
sense, DV_TIMEOUT, /* retries */ 1,
- REQ_FAILFAST);
+ REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER);
if (result & DRIVER_SENSE) {
struct scsi_sense_hdr sshdr_tmp;
if (!sshdr)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a7b53be6336..7c4d2e68df1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,7 +384,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
sector_t block = rq->sector;
sector_t threshold;
unsigned int this_count = rq->nr_sectors;
- int ret;
+ int ret, host_dif;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
@@ -515,7 +515,8 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
rq->nr_sectors));
/* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
- if (scsi_host_dif_capable(sdp->host, sdkp->protection_type))
+ host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
+ if (host_dif)
SCpnt->cmnd[1] = 1 << 5;
else
SCpnt->cmnd[1] = 0;
@@ -573,8 +574,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->sdb.length = this_count * sdp->sector_size;
/* If DIF or DIX is enabled, tell HBA how to handle request */
- if (sdkp->protection_type || scsi_prot_sg_count(SCpnt))
- sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt));
+ if (host_dif || scsi_prot_sg_count(SCpnt))
+ sd_dif_op(SCpnt, host_dif, scsi_prot_sg_count(SCpnt),
+ sdkp->protection_type);
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -1252,14 +1254,12 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
else
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
+ sdkp->protection_type = type;
+
switch (type) {
case SD_DIF_TYPE0_PROTECTION:
- sdkp->protection_type = 0;
- break;
-
case SD_DIF_TYPE1_PROTECTION:
case SD_DIF_TYPE3_PROTECTION:
- sdkp->protection_type = type;
break;
case SD_DIF_TYPE2_PROTECTION:
@@ -1277,7 +1277,6 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
return;
disable:
- sdkp->protection_type = 0;
sdkp->capacity = 0;
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 95b9f06534d..75638e7d3f6 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -97,19 +97,28 @@ struct sd_dif_tuple {
__be32 ref_tag; /* Target LBA or indirect LBA */
};
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
+#ifdef CONFIG_BLK_DEV_INTEGRITY
-extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int);
+extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int, unsigned int);
extern void sd_dif_config_host(struct scsi_disk *);
extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
#else /* CONFIG_BLK_DEV_INTEGRITY */
-#define sd_dif_op(a, b, c) do { } while (0)
-#define sd_dif_config_host(a) do { } while (0)
-#define sd_dif_prepare(a, b, c) (0)
-#define sd_dif_complete(a, b) (0)
+static inline void sd_dif_op(struct scsi_cmnd *cmd, unsigned int a, unsigned int b, unsigned int c)
+{
+}
+static inline void sd_dif_config_host(struct scsi_disk *disk)
+{
+}
+static inline int sd_dif_prepare(struct request *rq, sector_t s, unsigned int a)
+{
+ return 0;
+}
+static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
+{
+}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 4d17f3d35aa..3ebb1f28949 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -311,25 +311,26 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
struct scsi_device *sdp = sdkp->device;
struct gendisk *disk = sdkp->disk;
u8 type = sdkp->protection_type;
+ int dif, dix;
- /* If this HBA doesn't support DIX, resort to normal I/O or DIF */
- if (scsi_host_dix_capable(sdp->host, type) == 0) {
+ dif = scsi_host_dif_capable(sdp->host, type);
+ dix = scsi_host_dix_capable(sdp->host, type);
- if (type == SD_DIF_TYPE0_PROTECTION)
- return;
-
- if (scsi_host_dif_capable(sdp->host, type) == 0) {
- sd_printk(KERN_INFO, sdkp, "Type %d protection " \
- "unsupported by HBA. Disabling DIF.\n", type);
- sdkp->protection_type = 0;
- return;
- }
+ if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
+ dif = 0; dix = 1;
+ }
- sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n",
- type);
+ if (type) {
+ if (dif)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Enabling DIF Type %d protection\n", type);
+ else
+ sd_printk(KERN_NOTICE, sdkp,
+ "Disabling DIF Type %d protection\n", type);
+ }
+ if (!dix)
return;
- }
/* Enable DMA of protection information */
if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
@@ -343,17 +344,17 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
else
blk_integrity_register(disk, &dif_type1_integrity_crc);
- sd_printk(KERN_INFO, sdkp,
- "Enabling %s integrity protection\n", disk->integrity->name);
+ sd_printk(KERN_NOTICE, sdkp,
+ "Enabling DIX %s protection\n", disk->integrity->name);
/* Signal to block layer that we support sector tagging */
- if (type && sdkp->ATO) {
+ if (dif && type && sdkp->ATO) {
if (type == SD_DIF_TYPE3_PROTECTION)
disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
else
disk->integrity->tag_size = sizeof(u16);
- sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n",
+ sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
disk->integrity->tag_size);
}
}
@@ -361,7 +362,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
/*
* DIF DMA operation magic decoder ring.
*/
-void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
+void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsigned int type)
{
int csum_convert, prot_op;
@@ -406,7 +407,8 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
}
scsi_set_prot_op(scmd, prot_op);
- scsi_set_prot_type(scmd, dif);
+ if (dif)
+ scsi_set_prot_type(scmd, type);
}
/*
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ba9b9bbd4e7..93bd59a1ed7 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1450,12 +1450,10 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
if (sg_sysfs_valid) {
struct device *sg_class_member;
- sg_class_member = device_create_drvdata(sg_sysfs_class,
- cl_dev->parent,
- MKDEV(SCSI_GENERIC_MAJOR,
- sdp->index),
- sdp,
- "%s", disk->disk_name);
+ sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
+ MKDEV(SCSI_GENERIC_MAJOR,
+ sdp->index),
+ sdp, "%s", disk->disk_name);
if (IS_ERR(sg_class_member)) {
printk(KERN_ERR "sg_add: "
"device_create failed\n");
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 4eb3da996b3..4ad3e017213 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -223,9 +223,9 @@ int sr_cd_check(struct cdrom_device_info *cdi)
no_multi = 1;
break;
}
- min = BCD2BIN(buffer[15]);
- sec = BCD2BIN(buffer[16]);
- frame = BCD2BIN(buffer[17]);
+ min = bcd2bin(buffer[15]);
+ sec = bcd2bin(buffer[16]);
+ frame = bcd2bin(buffer[17]);
sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
break;
}
@@ -252,9 +252,9 @@ int sr_cd_check(struct cdrom_device_info *cdi)
}
if (rc != 0)
break;
- min = BCD2BIN(buffer[1]);
- sec = BCD2BIN(buffer[2]);
- frame = BCD2BIN(buffer[3]);
+ min = bcd2bin(buffer[1]);
+ sec = bcd2bin(buffer[2]);
+ frame = bcd2bin(buffer[3]);
sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
if (sector)
sector -= CD_MSF_OFFSET;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c2bb53e3d94..5c28d08f18f 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4428,13 +4428,10 @@ static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
snprintf(name, 10, "%s%s%s", rew ? "n" : "",
STp->disk->disk_name, st_formats[i]);
st_class_member =
- device_create_drvdata(st_sysfs_class,
- &STp->device->sdev_gendev,
- MKDEV(SCSI_TAPE_MAJOR,
- TAPE_MINOR(dev_num,
- mode, rew)),
- &STp->modes[mode],
- "%s", name);
+ device_create(st_sysfs_class, &STp->device->sdev_gendev,
+ MKDEV(SCSI_TAPE_MAJOR,
+ TAPE_MINOR(dev_num, mode, rew)),
+ &STp->modes[mode], "%s", name);
if (IS_ERR(st_class_member)) {
printk(KERN_WARNING "st%d: device_create failed\n",
dev_num);