From 3772a99175f5378b5001e8da364341a8b8226a4a Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Fri, 22 May 2009 14:50:54 -0400
Subject: [SCSI] lpfc 8.3.2 : Reorganization for SLI4

Preps the organization of the driver so that the bottom half, which
interacts with the hardware, can share common code sequences for
attachment, detachment, initialization, teardown, etc with new hardware.

For very common code sections, which become specific to the interface
type, the driver uses an indirect function call. The function is set at
initialization. For less common sections, such as initialization, the
driver looks at the interface type and calls the routines relative to
the interface.

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc.h         |   72 +-
 drivers/scsi/lpfc/lpfc_ct.c      |    4 +-
 drivers/scsi/lpfc/lpfc_debugfs.c |   17 +-
 drivers/scsi/lpfc/lpfc_els.c     |   84 +-
 drivers/scsi/lpfc/lpfc_hbadisc.c |   69 +-
 drivers/scsi/lpfc/lpfc_init.c    | 1955 ++++++++++++++++++++++++++++----------
 drivers/scsi/lpfc/lpfc_scsi.c    |  446 ++++++---
 drivers/scsi/lpfc/lpfc_sli.c     | 1006 +++++++++++++-------
 8 files changed, 2541 insertions(+), 1112 deletions(-)

(limited to 'drivers/scsi/lpfc')

diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1105f9a111b..6c24c9aabe7 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -23,6 +23,13 @@
 
 struct lpfc_sli2_slim;
 
+#define LPFC_PCI_DEV_LP		0x1
+#define LPFC_PCI_DEV_OC		0x2
+
+#define LPFC_SLI_REV2		2
+#define LPFC_SLI_REV3		3
+#define LPFC_SLI_REV4		4
+
 #define LPFC_MAX_TARGET		4096	/* max number of targets supported */
 #define LPFC_MAX_DISC_THREADS	64	/* max outstanding discovery els
 					   requests */
@@ -264,8 +271,8 @@ enum hba_state {
 };
 
 struct lpfc_vport {
-	struct list_head listentry;
 	struct lpfc_hba *phba;
+	struct list_head listentry;
 	uint8_t port_type;
 #define LPFC_PHYSICAL_PORT 1
 #define LPFC_NPIV_PORT  2
@@ -420,8 +427,66 @@ enum intr_type_t {
 };
 
 struct lpfc_hba {
+	/* SCSI interface function jump table entries */
+	int (*lpfc_new_scsi_buf)
+		(struct lpfc_vport *, int);
+	struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
+		(struct lpfc_hba *);
+	int (*lpfc_scsi_prep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_scsi_unprep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_release_scsi_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_rampdown_queue_depth)
+		(struct lpfc_hba *);
+	void (*lpfc_scsi_prep_cmnd)
+		(struct lpfc_vport *, struct lpfc_scsi_buf *,
+		 struct lpfc_nodelist *);
+	int (*lpfc_scsi_prep_task_mgmt_cmd)
+		(struct lpfc_vport *, struct lpfc_scsi_buf *,
+		 unsigned int, uint8_t);
+
+	/* IOCB interface function jump table entries */
+	int (*__lpfc_sli_issue_iocb)
+		(struct lpfc_hba *, uint32_t,
+		 struct lpfc_iocbq *, uint32_t);
+	void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
+			 struct lpfc_iocbq *);
+	int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
+
+
+	IOCB_t * (*lpfc_get_iocb_from_iocbq)
+		(struct lpfc_iocbq *);
+	void (*lpfc_scsi_cmd_iocb_cmpl)
+		(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
+
+	/* MBOX interface function jump table entries */
+	int (*lpfc_sli_issue_mbox)
+		(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+	/* Slow-path IOCB process function jump table entries */
+	void (*lpfc_sli_handle_slow_ring_event)
+		(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		 uint32_t mask);
+	/* INIT device interface function jump table entries */
+	int (*lpfc_sli_hbq_to_firmware)
+		(struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
+	int (*lpfc_sli_brdrestart)
+		(struct lpfc_hba *);
+	int (*lpfc_sli_brdready)
+		(struct lpfc_hba *, uint32_t);
+	void (*lpfc_handle_eratt)
+		(struct lpfc_hba *);
+	void (*lpfc_stop_port)
+		(struct lpfc_hba *);
+
+
+	/* SLI4 specific HBA data structure */
+	struct lpfc_sli4_hba sli4_hba;
+
 	struct lpfc_sli sli;
-	uint32_t sli_rev;		/* SLI2 or SLI3 */
+	uint8_t pci_dev_grp;	/* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
+	uint32_t sli_rev;		/* SLI2, SLI3, or SLI4 */
 	uint32_t sli3_options;		/* Mask of enabled SLI3 options */
 #define LPFC_SLI3_HBQ_ENABLED		0x01
 #define LPFC_SLI3_NPIV_ENABLED		0x02
@@ -526,11 +591,12 @@ struct lpfc_hba {
 	unsigned long data_flags;
 
 	uint32_t hbq_in_use;		/* HBQs in use flag */
-	struct list_head hbqbuf_in_list;  /* in-fly hbq buffer list */
+	struct list_head rb_pend_list;  /* Received buffers to be processed */
 	uint32_t hbq_count;	        /* Count of configured HBQs */
 	struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies  */
 
 	unsigned long pci_bar0_map;     /* Physical address for PCI BAR0 */
+	unsigned long pci_bar1_map;     /* Physical address for PCI BAR1 */
 	unsigned long pci_bar2_map;     /* Physical address for PCI BAR2 */
 	void __iomem *slim_memmap_p;	/* Kernel memory mapped address for
 					   PCI BAR0 */
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 896c7b0351e..4164b935ea9 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -267,8 +267,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 	     uint32_t tmo, uint8_t retry)
 {
 	struct lpfc_hba  *phba = vport->phba;
-	struct lpfc_sli  *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	IOCB_t *icmd;
 	struct lpfc_iocbq *geniocb;
 	int rc;
@@ -331,7 +329,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 	geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
 	geniocb->vport = vport;
 	geniocb->retry = retry;
-	rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
 
 	if (rc == IOCB_ERROR) {
 		lpfc_sli_release_iocbq(phba, geniocb);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 52be5644e07..5dd66925f4c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -280,6 +280,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 	struct lpfc_dmabuf *d_buf;
 	struct hbq_dmabuf *hbq_buf;
 
+	if (phba->sli_rev != 3)
+		return 0;
 	cnt = LPFC_HBQINFO_SIZE;
 	spin_lock_irq(&phba->hbalock);
 
@@ -489,12 +491,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
 				 pring->next_cmdidx, pring->local_getidx,
 				 pring->flag, pgpp->rspPutInx, pring->numRiocb);
 	}
-	word0 = readl(phba->HAregaddr);
-	word1 = readl(phba->CAregaddr);
-	word2 = readl(phba->HSregaddr);
-	word3 = readl(phba->HCregaddr);
-	len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n",
-	word0, word1, word2, word3);
+
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		word0 = readl(phba->HAregaddr);
+		word1 = readl(phba->CAregaddr);
+		word2 = readl(phba->HSregaddr);
+		word3 = readl(phba->HCregaddr);
+		len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+				 "HC:%08x\n", word0, word1, word2, word3);
+	}
 	spin_unlock_irq(&phba->hbalock);
 	return len;
 }
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b8b34cf5c3d..8c5c3aea4a1 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -84,7 +84,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
 	uint32_t ha_copy;
 
 	if (vport->port_state >= LPFC_VPORT_READY ||
-	    phba->link_state == LPFC_LINK_DOWN)
+	    phba->link_state == LPFC_LINK_DOWN ||
+	    phba->sli_rev > LPFC_SLI_REV3)
 		return 0;
 
 	/* Read the HBA Host Attention Register */
@@ -305,7 +306,7 @@ els_iocb_free_pcmb_exit:
  *   0 - successfully issued fabric registration login for @vport
  *   -ENXIO -- failed to issue fabric registration login for @vport
  **/
-static int
+int
 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
 {
 	struct lpfc_hba  *phba = vport->phba;
@@ -345,8 +346,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
 		err = 4;
 		goto fail;
 	}
-	rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
-			    0);
+	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
 	if (rc) {
 		err = 5;
 		goto fail_free_mbox;
@@ -1350,14 +1350,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 	IOCB_t *icmd;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	int ret;
 
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 
 	ndlp = lpfc_findnode_did(vport, did);
 	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1389,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 
 	phba->fc_stat.elsXmitPLOGI++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
-	ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
 	if (ret == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1499,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	PRLI *npr;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 
-	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
-
 	cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 				     ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1543,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_PRLI_SND;
 	spin_unlock_irq(shost->host_lock);
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_PRLI_SND;
 		spin_unlock_irq(shost->host_lock);
@@ -1788,8 +1782,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	ADISC *ap;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 
@@ -1822,7 +1814,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_ADISC_SND;
 	spin_unlock_irq(shost->host_lock);
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_ADISC_SND;
 		spin_unlock_irq(shost->host_lock);
@@ -1937,15 +1930,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct lpfc_hba  *phba = vport->phba;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	int rc;
 
-	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];
-
 	spin_lock_irq(shost->host_lock);
 	if (ndlp->nlp_flag & NLP_LOGO_SND) {
 		spin_unlock_irq(shost->host_lock);
@@ -1978,7 +1966,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_LOGO_SND;
 	spin_unlock_irq(shost->host_lock);
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
 	if (rc == IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
@@ -2058,14 +2046,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 	struct lpfc_hba  *phba = vport->phba;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	struct lpfc_nodelist *ndlp;
 
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
 
 	ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2094,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 
 	phba->fc_stat.elsXmitSCR++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		/* The additional lpfc_nlp_put will cause the following
 		 * lpfc_els_free_iocb routine to trigger the rlease of
 		 * the node.
@@ -2152,7 +2139,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 	struct lpfc_hba  *phba = vport->phba;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	FARP *fp;
 	uint8_t *pcmd;
@@ -2162,7 +2148,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 	struct lpfc_nodelist *ndlp;
 
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
 
 	ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2204,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 
 	phba->fc_stat.elsXmitFARPR++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		/* The additional lpfc_nlp_put will cause the following
 		 * lpfc_els_free_iocb routine to trigger the release of
 		 * the node.
@@ -2961,6 +2947,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		 */
 		lpfc_nlp_not_used(ndlp);
 	}
+
 	return;
 }
 
@@ -3170,7 +3157,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 	IOCB_t *icmd;
 	IOCB_t *oldcmd;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
@@ -3178,7 +3164,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 	ELS_PKT *els_pkt_ptr;
 
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 	oldcmd = &oldiocb->iocb;
 
 	switch (flag) {
@@ -3266,7 +3251,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 	}
 
 	phba->fc_stat.elsXmitACC++;
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
@@ -3305,15 +3290,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
 	IOCB_t *icmd;
 	IOCB_t *oldcmd;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	int rc;
 
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
-
 	cmdsize = 2 * sizeof(uint32_t);
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3328,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
 
 	phba->fc_stat.elsXmitLSRJT++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3361,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 		       struct lpfc_nodelist *ndlp)
 {
 	struct lpfc_hba  *phba = vport->phba;
-	struct lpfc_sli  *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	ADISC *ap;
 	IOCB_t *icmd, *oldcmd;
 	struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3402,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 
 	phba->fc_stat.elsXmitACC++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
@@ -3459,14 +3439,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 	IOCB_t *icmd;
 	IOCB_t *oldcmd;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	int rc;
 
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 
 	cmdsize = sizeof(uint32_t) + sizeof(PRLI);
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3498,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 	phba->fc_stat.elsXmitACC++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
@@ -3562,15 +3540,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
 	RNID *rn;
 	IOCB_t *icmd, *oldcmd;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	int rc;
 
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];
-
 	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
 					+ (2 * sizeof(struct lpfc_name));
 	if (format)
@@ -3626,7 +3601,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
 	elsiocb->context1 = NULL;  /* Don't need ndlp for cmpl,
 				    * it could be freed */
 
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
@@ -4440,8 +4415,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 static void
 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	MAILBOX_t *mb;
 	IOCB_t *icmd;
 	RPS_RSP *rps_rsp;
@@ -4507,7 +4480,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 			 ndlp->nlp_rpi);
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	phba->fc_stat.elsXmitACC++;
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
 		lpfc_els_free_iocb(phba, elsiocb);
 	return;
 }
@@ -4616,8 +4589,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
 	IOCB_t *icmd, *oldcmd;
 	RPL_RSP rpl_rsp;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	uint8_t *pcmd;
 
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4625,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
 			 ndlp->nlp_rpi);
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	phba->fc_stat.elsXmitACC++;
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
 	}
@@ -6139,7 +6111,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba  *phba = vport->phba;
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	uint8_t *pcmd;
@@ -6169,7 +6140,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_LOGO_SND;
 	spin_unlock_irq(shost->host_lock);
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_LOGO_SND;
 		spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6196,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
 	struct lpfc_iocbq *iocb;
 	unsigned long iflags;
 	int ret;
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	IOCB_t *cmd;
 
 repeat:
@@ -6248,7 +6219,7 @@ repeat:
 			"Fabric sched1:   ste:x%x",
 			iocb->vport->port_state, 0, 0);
 
-		ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
 
 		if (ret == IOCB_ERROR) {
 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6365,6 @@ static int
 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
 {
 	unsigned long iflags;
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	int ready;
 	int ret;
 
@@ -6418,7 +6388,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
 			"Fabric sched2:   ste:x%x",
 			iocb->vport->port_state, 0, 0);
 
-		ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
 
 		if (ret == IOCB_ERROR) {
 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e764ce0bf70..25fc96c9081 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -555,23 +555,24 @@ lpfc_work_done(struct lpfc_hba *phba)
 		/*
 		 * Turn on Ring interrupts
 		 */
-		spin_lock_irq(&phba->hbalock);
-		control = readl(phba->HCregaddr);
-		if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
-			lpfc_debugfs_slow_ring_trc(phba,
-				"WRK Enable ring: cntl:x%x hacopy:x%x",
-				control, ha_copy, 0);
-
-			control |= (HC_R0INT_ENA << LPFC_ELS_RING);
-			writel(control, phba->HCregaddr);
-			readl(phba->HCregaddr); /* flush */
-		}
-		else {
-			lpfc_debugfs_slow_ring_trc(phba,
-				"WRK Ring ok:     cntl:x%x hacopy:x%x",
-				control, ha_copy, 0);
+		if (phba->sli_rev <= LPFC_SLI_REV3) {
+			spin_lock_irq(&phba->hbalock);
+			control = readl(phba->HCregaddr);
+			if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
+				lpfc_debugfs_slow_ring_trc(phba,
+					"WRK Enable ring: cntl:x%x hacopy:x%x",
+					control, ha_copy, 0);
+
+				control |= (HC_R0INT_ENA << LPFC_ELS_RING);
+				writel(control, phba->HCregaddr);
+				readl(phba->HCregaddr); /* flush */
+			} else {
+				lpfc_debugfs_slow_ring_trc(phba,
+					"WRK Ring ok:     cntl:x%x hacopy:x%x",
+					control, ha_copy, 0);
+			}
+			spin_unlock_irq(&phba->hbalock);
 		}
-		spin_unlock_irq(&phba->hbalock);
 	}
 	lpfc_work_list_done(phba);
 }
@@ -689,7 +690,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
 	lpfc_can_disctmo(vport);
 }
 
-static void
+void
 lpfc_linkdown_port(struct lpfc_vport *vport)
 {
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
@@ -1147,10 +1148,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
 	struct lpfc_sli *psli = &phba->sli;
 	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag |= LPFC_PROCESS_LA;
-	control = readl(phba->HCregaddr);
-	control |= HC_LAINT_ENA;
-	writel(control, phba->HCregaddr);
-	readl(phba->HCregaddr); /* flush */
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		control = readl(phba->HCregaddr);
+		control |= HC_LAINT_ENA;
+		writel(control, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
 	spin_unlock_irq(&phba->hbalock);
 }
 
@@ -2919,11 +2922,13 @@ restart_disc:
 		 * set port_state to PORT_READY if SLI2.
 		 * cmpl_reg_vpi will set port_state to READY for SLI3.
 		 */
-		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
-			lpfc_issue_reg_vpi(phba, vport);
-		else  {	/* NPIV Not enabled */
-			lpfc_issue_clear_la(phba, vport);
-			vport->port_state = LPFC_VPORT_READY;
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+				lpfc_issue_reg_vpi(phba, vport);
+			else  {	/* NPIV Not enabled */
+				lpfc_issue_clear_la(phba, vport);
+				vport->port_state = LPFC_VPORT_READY;
+			}
 		}
 
 		/* Setup and issue mailbox INITIALIZE LINK command */
@@ -2959,11 +2964,13 @@ restart_disc:
 		 * set port_state to PORT_READY if SLI2.
 		 * cmpl_reg_vpi will set port_state to READY for SLI3.
 		 */
-		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
-			lpfc_issue_reg_vpi(phba, vport);
-		else {	/* NPIV Not enabled */
-			lpfc_issue_clear_la(phba, vport);
-			vport->port_state = LPFC_VPORT_READY;
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+				lpfc_issue_reg_vpi(phba, vport);
+			else  {	/* NPIV Not enabled */
+				lpfc_issue_clear_la(phba, vport);
+				vport->port_state = LPFC_VPORT_READY;
+			}
 		}
 		break;
 
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86d1bdcbf2d..3f06ce2becf 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -571,16 +571,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
 {
 	struct lpfc_vport **vports;
 	int i;
-	/* Disable interrupts */
-	writel(0, phba->HCregaddr);
-	readl(phba->HCregaddr); /* flush */
+
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		/* Disable interrupts */
+		writel(0, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
 
 	if (phba->pport->load_flag & FC_UNLOADING)
 		lpfc_cleanup_discovery_resources(phba->pport);
 	else {
 		vports = lpfc_create_vport_work_array(phba);
 		if (vports != NULL)
-			for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+			for (i = 0; i <= phba->max_vports &&
+				vports[i] != NULL; i++)
 				lpfc_cleanup_discovery_resources(vports[i]);
 		lpfc_destroy_vport_work_array(phba, vports);
 	}
@@ -588,7 +592,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset
+ * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
  * @phba: pointer to lpfc HBA data structure.
  *
  * This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +602,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
  *   0 - sucess.
  *   Any other value - error.
  **/
-int
-lpfc_hba_down_post(struct lpfc_hba *phba)
+static int
+lpfc_hba_down_post_s3(struct lpfc_hba *phba)
 {
 	struct lpfc_sli *psli = &phba->sli;
 	struct lpfc_sli_ring *pring;
@@ -909,13 +913,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
 	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
 		phba->work_hs = old_host_status & ~HS_FFER1;
 
+	spin_lock_irq(&phba->hbalock);
 	phba->hba_flag &= ~DEFER_ERATT;
+	spin_unlock_irq(&phba->hbalock);
 	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
 	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
 }
 
+static void
+lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
+{
+	struct lpfc_board_event_header board_event;
+	struct Scsi_Host *shost;
+
+	board_event.event_type = FC_REG_BOARD_EVENT;
+	board_event.subcategory = LPFC_EVENT_PORTINTERR;
+	shost = lpfc_shost_from_vport(phba->pport);
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(board_event),
+				  (char *) &board_event,
+				  LPFC_NL_VENDOR_ID);
+}
+
 /**
- * lpfc_handle_eratt - The HBA hardware error handler
+ * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine is invoked to handle the following HBA hardware error
@@ -924,8 +945,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
  * 2 - DMA ring index out of range
  * 3 - Mailbox command came back as unknown
  **/
-void
-lpfc_handle_eratt(struct lpfc_hba *phba)
+static void
+lpfc_handle_eratt_s3(struct lpfc_hba *phba)
 {
 	struct lpfc_vport *vport = phba->pport;
 	struct lpfc_sli   *psli = &phba->sli;
@@ -934,24 +955,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
 	unsigned long temperature;
 	struct temp_event temp_event_data;
 	struct Scsi_Host  *shost;
-	struct lpfc_board_event_header board_event;
 
 	/* If the pci channel is offline, ignore possible errors,
-	 * since we cannot communicate with the pci card anyway. */
-	if (pci_channel_offline(phba->pcidev))
+	 * since we cannot communicate with the pci card anyway.
+	 */
+	if (pci_channel_offline(phba->pcidev)) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~DEFER_ERATT;
+		spin_unlock_irq(&phba->hbalock);
 		return;
+	}
+
 	/* If resets are disabled then leave the HBA alone and return */
 	if (!phba->cfg_enable_hba_reset)
 		return;
 
 	/* Send an internal error event to mgmt application */
-	board_event.event_type = FC_REG_BOARD_EVENT;
-	board_event.subcategory = LPFC_EVENT_PORTINTERR;
-	shost = lpfc_shost_from_vport(phba->pport);
-	fc_host_post_vendor_event(shost, fc_get_event_number(),
-				  sizeof(board_event),
-				  (char *) &board_event,
-				  LPFC_NL_VENDOR_ID);
+	lpfc_board_errevt_to_mgmt(phba);
 
 	if (phba->hba_flag & DEFER_ERATT)
 		lpfc_handle_deferred_eratt(phba);
@@ -1137,7 +1157,7 @@ lpfc_handle_latt_err_exit:
  *   0 - pointer to the VPD passed in is NULL
  *   1 - success
  **/
-static int
+int
 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
 {
 	uint8_t lenlo, lenhi;
@@ -1533,7 +1553,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
 		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
 		icmd->ulpLe = 1;
 
-		if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
+		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
+		    IOCB_ERROR) {
 			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
 			kfree(mp1);
 			cnt++;
@@ -1761,7 +1782,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
 	 * Lets wait for this to happen, if needed.
 	 */
 	while (!list_empty(&vport->fc_nodes)) {
-
 		if (i++ > 3000) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
 				"0233 Nodelist not empty\n");
@@ -1782,7 +1802,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
 		/* Wait for any activity on ndlps to settle */
 		msleep(10);
 	}
-	return;
 }
 
 /**
@@ -1803,22 +1822,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
 }
 
 /**
- * lpfc_stop_phba_timers - Stop all the timers associated with an HBA
+ * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine stops all the timers associated with a HBA. This function is
  * invoked before either putting a HBA offline or unloading the driver.
  **/
-static void
-lpfc_stop_phba_timers(struct lpfc_hba *phba)
+void
+lpfc_stop_hba_timers(struct lpfc_hba *phba)
 {
-	del_timer_sync(&phba->fcp_poll_timer);
 	lpfc_stop_vport_timers(phba->pport);
 	del_timer_sync(&phba->sli.mbox_tmo);
 	del_timer_sync(&phba->fabric_block_timer);
-	phba->hb_outstanding = 0;
-	del_timer_sync(&phba->hb_tmofunc);
 	del_timer_sync(&phba->eratt_poll);
+	del_timer_sync(&phba->hb_tmofunc);
+	phba->hb_outstanding = 0;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		/* Stop any LightPulse device specific driver timers */
+		del_timer_sync(&phba->fcp_poll_timer);
+		break;
+	case LPFC_PCI_DEV_OC:
+		/* Stop any OneConnect device sepcific driver timers */
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0297 Invalid device group (x%x)\n",
+				phba->pci_dev_grp);
+		break;
+	}
 	return;
 }
 
@@ -2509,9 +2542,8 @@ lpfc_disable_msi(struct lpfc_hba *phba)
  *
  * This routine it invoked to log the currently used active interrupt mode
  * to the device.
- */
-static void
-lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+ **/
+static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
 {
 	switch (intr_mode) {
 	case 0:
@@ -2534,293 +2566,380 @@ lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
 	return;
 }
 
-static void
-lpfc_stop_port(struct lpfc_hba *phba)
-{
-	/* Clear all interrupt enable conditions */
-	writel(0, phba->HCregaddr);
-	readl(phba->HCregaddr); /* flush */
-	/* Clear all pending interrupts */
-	writel(0xffffffff, phba->HAregaddr);
-	readl(phba->HAregaddr); /* flush */
-
-	/* Reset some HBA SLI setup states */
-	lpfc_stop_phba_timers(phba);
-	phba->pport->work_port_events = 0;
-
-	return;
-}
-
 /**
- * lpfc_enable_intr - Enable device interrupt
+ * lpfc_enable_pci_dev - Enable a generic PCI device.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to enable device interrupt and associate driver's
- * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
- * mode configured to the driver, the driver will try to fallback from the
- * configured interrupt mode to an interrupt mode which is supported by the
- * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
+ * This routine is invoked to enable the PCI device that is common to all
+ * PCI devices.
  *
  * Return codes
- *   0 - sucessful
- *   other values - error
+ * 	0 - sucessful
+ * 	other values - error
  **/
-static uint32_t
-lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+static int
+lpfc_enable_pci_dev(struct lpfc_hba *phba)
 {
-	uint32_t intr_mode = LPFC_INTR_ERROR;
-	int retval;
+	struct pci_dev *pdev;
+	int bars;
 
-	if (cfg_mode == 2) {
-		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
-		retval = lpfc_sli_config_port(phba, 3);
-		if (!retval) {
-			/* Now, try to enable MSI-X interrupt mode */
-			retval = lpfc_enable_msix(phba);
-			if (!retval) {
-				/* Indicate initialization to MSI-X mode */
-				phba->intr_type = MSIX;
-				intr_mode = 2;
-			}
-		}
-	}
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		goto out_error;
+	else
+		pdev = phba->pcidev;
+	/* Select PCI BARs */
+	bars = pci_select_bars(pdev, IORESOURCE_MEM);
+	/* Enable PCI device */
+	if (pci_enable_device_mem(pdev))
+		goto out_error;
+	/* Request PCI resource for the device */
+	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
+		goto out_disable_device;
+	/* Set up device as PCI master and save state for EEH */
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+	pci_save_state(pdev);
 
-	/* Fallback to MSI if MSI-X initialization failed */
-	if (cfg_mode >= 1 && phba->intr_type == NONE) {
-		retval = lpfc_enable_msi(phba);
-		if (!retval) {
-			/* Indicate initialization to MSI mode */
-			phba->intr_type = MSI;
-			intr_mode = 1;
-		}
-	}
+	return 0;
 
-	/* Fallback to INTx if both MSI-X/MSI initalization failed */
-	if (phba->intr_type == NONE) {
-		retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
-				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
-		if (!retval) {
-			/* Indicate initialization to INTx mode */
-			phba->intr_type = INTx;
-			intr_mode = 0;
-		}
-	}
-	return intr_mode;
+out_disable_device:
+	pci_disable_device(pdev);
+out_error:
+	return -ENODEV;
 }
 
 /**
- * lpfc_disable_intr - Disable device interrupt
+ * lpfc_disable_pci_dev - Disable a generic PCI device.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to disable device interrupt and disassociate the
- * driver's interrupt handler(s) from interrupt vector(s). Depending on the
- * interrupt mode, the driver will release the interrupt vector(s) for the
- * message signaled interrupt.
+ * This routine is invoked to disable the PCI device that is common to all
+ * PCI devices.
  **/
 static void
-lpfc_disable_intr(struct lpfc_hba *phba)
+lpfc_disable_pci_dev(struct lpfc_hba *phba)
 {
-	/* Disable the currently initialized interrupt mode */
-	if (phba->intr_type == MSIX)
-		lpfc_disable_msix(phba);
-	else if (phba->intr_type == MSI)
-		lpfc_disable_msi(phba);
-	else if (phba->intr_type == INTx)
-		free_irq(phba->pcidev->irq, phba);
+	struct pci_dev *pdev;
+	int bars;
 
-	/* Reset interrupt management states */
-	phba->intr_type = NONE;
-	phba->sli.slistat.sli_intr = 0;
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return;
+	else
+		pdev = phba->pcidev;
+	/* Select PCI BARs */
+	bars = pci_select_bars(pdev, IORESOURCE_MEM);
+	/* Release PCI resource and disable PCI device */
+	pci_release_selected_regions(pdev, bars);
+	pci_disable_device(pdev);
+	/* Null out PCI private reference to driver */
+	pci_set_drvdata(pdev, NULL);
 
 	return;
 }
 
 /**
- * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem
- * @pdev: pointer to PCI device
- * @pid: pointer to PCI device identifier
- *
- * This routine is to be registered to the kernel's PCI subsystem. When an
- * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
- * PCI device-specific information of the device and driver to see if the
- * driver state that it can support this kind of device. If the match is
- * successful, the driver core invokes this routine. If this routine
- * determines it can claim the HBA, it does all the initialization that it
- * needs to do to handle the HBA properly.
+ * lpfc_reset_hba - Reset a hba
+ * @phba: pointer to lpfc hba data structure.
  *
- * Return code
- *   0 - driver can claim the device
- *   negative value - driver can not claim the device
+ * This routine is invoked to reset a hba device. It brings the HBA
+ * offline, performs a board restart, and then brings the board back
+ * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
+ * on outstanding mailbox commands.
  **/
-static int __devinit
-lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+void
+lpfc_reset_hba(struct lpfc_hba *phba)
 {
-	struct lpfc_vport *vport = NULL;
-	struct lpfc_hba   *phba;
-	struct lpfc_sli   *psli;
-	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
-	struct Scsi_Host  *shost = NULL;
-	void *ptr;
-	unsigned long bar0map_len, bar2map_len;
-	int error = -ENODEV, retval;
-	int  i, hbq_count;
-	uint16_t iotag;
-	uint32_t cfg_mode, intr_mode;
-	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
-	struct lpfc_adapter_event_header adapter_event;
-
-	if (pci_enable_device_mem(pdev))
-		goto out;
-	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
-		goto out_disable_device;
-
-	phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
-	if (!phba)
-		goto out_release_regions;
-
-	atomic_set(&phba->fast_event_count, 0);
-	spin_lock_init(&phba->hbalock);
-
-	/* Initialize ndlp management spinlock */
-	spin_lock_init(&phba->ndlp_lock);
-
-	phba->pcidev = pdev;
+	/* If resets are disabled then set error state and return. */
+	if (!phba->cfg_enable_hba_reset) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return;
+	}
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	lpfc_sli_brdrestart(phba);
+	lpfc_online(phba);
+	lpfc_unblock_mgmt_io(phba);
+}
 
-	/* Assign an unused board number */
-	if ((phba->brd_no = lpfc_get_instance()) < 0)
-		goto out_free_phba;
+/**
+ * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-3 HBA device it attached to.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ **/
+static int
+lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
 
-	INIT_LIST_HEAD(&phba->port_list);
-	init_waitqueue_head(&phba->wait_4_mlo_m_q);
 	/*
-	 * Get all the module params for configuring this host and then
-	 * establish the host.
+	 * Initialize timers used by driver
 	 */
-	lpfc_get_cfgparam(phba);
-	phba->max_vpi = LPFC_MAX_VPI;
 
-	/* Initialize timers used by driver */
+	/* Heartbeat timer */
 	init_timer(&phba->hb_tmofunc);
 	phba->hb_tmofunc.function = lpfc_hb_timeout;
 	phba->hb_tmofunc.data = (unsigned long)phba;
 
 	psli = &phba->sli;
+	/* MBOX heartbeat timer */
 	init_timer(&psli->mbox_tmo);
 	psli->mbox_tmo.function = lpfc_mbox_timeout;
 	psli->mbox_tmo.data = (unsigned long) phba;
+	/* FCP polling mode timer */
 	init_timer(&phba->fcp_poll_timer);
 	phba->fcp_poll_timer.function = lpfc_poll_timeout;
 	phba->fcp_poll_timer.data = (unsigned long) phba;
+	/* Fabric block timer */
 	init_timer(&phba->fabric_block_timer);
 	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
 	phba->fabric_block_timer.data = (unsigned long) phba;
+	/* EA polling mode timer */
 	init_timer(&phba->eratt_poll);
 	phba->eratt_poll.function = lpfc_poll_eratt;
 	phba->eratt_poll.data = (unsigned long) phba;
 
-	pci_set_master(pdev);
-	pci_save_state(pdev);
-	pci_try_set_mwi(pdev);
-
-	if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0)
-		if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0)
-			goto out_idr_remove;
+	/* Host attention work mask setup */
+	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
+	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
 
+	/* Get all the module params for configuring this host */
+	lpfc_get_cfgparam(phba);
 	/*
-	 * Get the bus address of Bar0 and Bar2 and the number of bytes
-	 * required by each mapping.
+	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+	 * used to create the sg_dma_buf_pool must be dynamically calculated.
+	 * 2 segments are added since the IOCB needs a command and response bde.
 	 */
-	phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
-	bar0map_len        = pci_resource_len(phba->pcidev, 0);
-
-	phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
-	bar2map_len        = pci_resource_len(phba->pcidev, 2);
-
-	/* Map HBA SLIM to a kernel virtual address. */
-	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
-	if (!phba->slim_memmap_p) {
-		error = -ENODEV;
-		dev_printk(KERN_ERR, &pdev->dev,
-			   "ioremap failed for SLIM memory.\n");
-		goto out_idr_remove;
-	}
-
-	/* Map HBA Control Registers to a kernel virtual address. */
-	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
-	if (!phba->ctrl_regs_memmap_p) {
-		error = -ENODEV;
-		dev_printk(KERN_ERR, &pdev->dev,
-			   "ioremap failed for HBA control registers.\n");
-		goto out_iounmap_slim;
+	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+		sizeof(struct fcp_rsp) +
+			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+	if (phba->cfg_enable_bg) {
+		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+		phba->cfg_sg_dma_buf_size +=
+			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
 	}
 
-	/* Allocate memory for SLI-2 structures */
-	phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev,
-					       SLI2_SLIM_SIZE,
-					       &phba->slim2p.phys,
-					       GFP_KERNEL);
-	if (!phba->slim2p.virt)
-		goto out_iounmap;
+	/* Also reinitialize the host templates with new values. */
+	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
 
-	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
-	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
-	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
-	phba->IOCBs = (phba->slim2p.virt +
-		       offsetof(struct lpfc_sli2_slim, IOCBs));
+	phba->max_vpi = LPFC_MAX_VPI;
+	/* This will be set to correct value after config_port mbox */
+	phba->max_vports = 0;
 
-	phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
-						 lpfc_sli_hbq_size(),
-						 &phba->hbqslimp.phys,
-						 GFP_KERNEL);
-	if (!phba->hbqslimp.virt)
-		goto out_free_slim;
+	/*
+	 * Initialize the SLI Layer to run with lpfc HBAs.
+	 */
+	lpfc_sli_setup(phba);
+	lpfc_sli_queue_setup(phba);
 
-	hbq_count = lpfc_sli_hbq_count();
-	ptr = phba->hbqslimp.virt;
-	for (i = 0; i < hbq_count; ++i) {
-		phba->hbqs[i].hbq_virt = ptr;
-		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
-		ptr += (lpfc_hbq_defs[i]->entry_count *
-			sizeof(struct lpfc_hbq_entry));
-	}
-	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
-	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer  = lpfc_els_hbq_free;
+	/* Allocate device driver memory */
+	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
+		return -ENOMEM;
 
-	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+	return 0;
+}
 
-	INIT_LIST_HEAD(&phba->hbqbuf_in_list);
+/**
+ * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-3 HBA device it attached to.
+ **/
+static void
+lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
+{
+	/* Free device driver memory allocated */
+	lpfc_mem_free_all(phba);
 
-	/* Initialize the SLI Layer to run with lpfc HBAs. */
-	lpfc_sli_setup(phba);
-	lpfc_sli_queue_setup(phba);
+	return;
+}
 
-	retval = lpfc_mem_alloc(phba);
-	if (retval) {
-		error = retval;
-		goto out_free_hbqslimp;
+/**
+ * lpfc_init_api_table_setup - Set up init api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the device INIT interface API function jump table
+ * in @phba struct.
+ *
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
+		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
+		phba->lpfc_stop_port = lpfc_stop_port_s3;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1431 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources before the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ *	0 - sucessful
+ *	other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
+{
+	/*
+	 * Driver resources common to all SLI revisions
+	 */
+	atomic_set(&phba->fast_event_count, 0);
+	spin_lock_init(&phba->hbalock);
+
+	/* Initialize ndlp management spinlock */
+	spin_lock_init(&phba->ndlp_lock);
+
+	INIT_LIST_HEAD(&phba->port_list);
+	INIT_LIST_HEAD(&phba->work_list);
+	init_waitqueue_head(&phba->wait_4_mlo_m_q);
+
+	/* Initialize the wait queue head for the kernel thread */
+	init_waitqueue_head(&phba->work_waitq);
+
+	/* Initialize the scsi buffer list used by driver for scsi IO */
+	spin_lock_init(&phba->scsi_buf_list_lock);
+	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+
+	/* Initialize the fabric iocb list */
+	INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+	/* Initialize list to save ELS buffers */
+	INIT_LIST_HEAD(&phba->elsbuf);
+
+	/* Initialize FCF connection rec list */
+	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+
+	return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources after the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
+{
+	int error;
+
+	/* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					  "lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		return error;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up after
+ * the device specific resource setup for supporting the HBA device it
+ * attached to.
+ **/
+static void
+lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
+{
+	/* Stop kernel worker thread */
+	kthread_stop(phba->worker_thread);
+}
+
+/**
+ * lpfc_free_iocb_list - Free iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's IOCB list and memory.
+ **/
+static void
+lpfc_free_iocb_list(struct lpfc_hba *phba)
+{
+	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(iocbq_entry, iocbq_next,
+				 &phba->lpfc_iocb_list, list) {
+		list_del(&iocbq_entry->list);
+		kfree(iocbq_entry);
+		phba->total_iocbq_bufs--;
 	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return;
+}
+
+/**
+ * lpfc_init_iocb_list - Allocate and initialize iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's IOCB
+ * list and set up the IOCB tag array accordingly.
+ *
+ * Return codes
+ *	0 - sucessful
+ *	other values - error
+ **/
+static int
+lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
+{
+	struct lpfc_iocbq *iocbq_entry = NULL;
+	uint16_t iotag;
+	int i;
 
 	/* Initialize and populate the iocb list per host.  */
 	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
-	for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
+	for (i = 0; i < iocb_count; i++) {
 		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
 		if (iocbq_entry == NULL) {
 			printk(KERN_ERR "%s: only allocated %d iocbs of "
 				"expected %d count. Unloading driver.\n",
 				__func__, i, LPFC_IOCB_LIST_CNT);
-			error = -ENOMEM;
 			goto out_free_iocbq;
 		}
 
 		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
 		if (iotag == 0) {
-			kfree (iocbq_entry);
+			kfree(iocbq_entry);
 			printk(KERN_ERR "%s: failed to allocate IOTAG. "
-			       "Unloading driver.\n",
-				__func__);
-			error = -ENOMEM;
+				"Unloading driver.\n", __func__);
 			goto out_free_iocbq;
 		}
+		iocbq_entry->sli4_xritag = NO_XRI;
 
 		spin_lock_irq(&phba->hbalock);
 		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
@@ -2828,71 +2947,799 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 		spin_unlock_irq(&phba->hbalock);
 	}
 
-	/* Initialize HBA structure */
-	phba->fc_edtov = FF_DEF_EDTOV;
-	phba->fc_ratov = FF_DEF_RATOV;
-	phba->fc_altov = FF_DEF_ALTOV;
-	phba->fc_arbtov = FF_DEF_ARBTOV;
+	return 0;
 
-	INIT_LIST_HEAD(&phba->work_list);
-	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
-	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+out_free_iocbq:
+	lpfc_free_iocb_list(phba);
 
-	/* Initialize the wait queue head for the kernel thread */
-	init_waitqueue_head(&phba->work_waitq);
+	return -ENOMEM;
+}
 
-	/* Startup the kernel thread for this host adapter. */
-	phba->worker_thread = kthread_run(lpfc_do_work, phba,
-				       "lpfc_worker_%d", phba->brd_no);
-	if (IS_ERR(phba->worker_thread)) {
-		error = PTR_ERR(phba->worker_thread);
-		goto out_free_iocbq;
+/**
+ * lpfc_hba_alloc - Allocate driver hba data structure for a device.
+ * @pdev: pointer to pci device data structure.
+ *
+ * This routine is invoked to allocate the driver hba data structure for an
+ * HBA device. If the allocation is successful, the phba reference to the
+ * PCI device data structure is set.
+ *
+ * Return codes
+ *      pointer to @phba - sucessful
+ *      NULL - error
+ **/
+static struct lpfc_hba *
+lpfc_hba_alloc(struct pci_dev *pdev)
+{
+	struct lpfc_hba *phba;
+
+	/* Allocate memory for HBA structure */
+	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
+	if (!phba) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1417 Failed to allocate hba struct.\n");
+		return NULL;
 	}
 
-	/* Initialize the list of scsi buffers used by driver for scsi IO. */
-	spin_lock_init(&phba->scsi_buf_list_lock);
-	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+	/* Set reference to PCI device in HBA structure */
+	phba->pcidev = pdev;
 
-	/* Initialize list of fabric iocbs */
-	INIT_LIST_HEAD(&phba->fabric_iocb_list);
+	/* Assign an unused board number */
+	phba->brd_no = lpfc_get_instance();
+	if (phba->brd_no < 0) {
+		kfree(phba);
+		return NULL;
+	}
 
-	/* Initialize list to save ELS buffers */
-	INIT_LIST_HEAD(&phba->elsbuf);
+	return phba;
+}
+
+/**
+ * lpfc_hba_free - Free driver hba data structure with a device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver hba data structure with an
+ * HBA device.
+ **/
+static void
+lpfc_hba_free(struct lpfc_hba *phba)
+{
+	/* Release the driver assigned board number */
+	idr_remove(&lpfc_hba_index, phba->brd_no);
+
+	kfree(phba);
+	return;
+}
+
+/**
+ * lpfc_create_shost - Create hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create HBA physical port and associate a SCSI
+ * host with it.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      other values - error
+ **/
+static int
+lpfc_create_shost(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport;
+	struct Scsi_Host  *shost;
+
+	/* Initialize HBA FC structure */
+	phba->fc_edtov = FF_DEF_EDTOV;
+	phba->fc_ratov = FF_DEF_RATOV;
+	phba->fc_altov = FF_DEF_ALTOV;
+	phba->fc_arbtov = FF_DEF_ARBTOV;
 
 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
 	if (!vport)
-		goto out_kthread_stop;
+		return -ENODEV;
 
 	shost = lpfc_shost_from_vport(vport);
 	phba->pport = vport;
 	lpfc_debugfs_initialize(vport);
+	/* Put reference to SCSI host to driver's device private data */
+	pci_set_drvdata(phba->pcidev, shost);
 
-	pci_set_drvdata(pdev, shost);
+	return 0;
+}
 
-	phba->MBslimaddr = phba->slim_memmap_p;
-	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
-	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
-	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
-	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+/**
+ * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to destroy HBA physical port and the associated
+ * SCSI host.
+ **/
+static void
+lpfc_destroy_shost(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+
+	/* Destroy physical port that associated with the SCSI host */
+	destroy_port(vport);
+
+	return;
+}
+
+/**
+ * lpfc_setup_bg - Setup Block guard structures and debug areas.
+ * @phba: pointer to lpfc hba data structure.
+ * @shost: the shost to be used to detect Block guard settings.
+ *
+ * This routine sets up the local Block guard protocol settings for @shost.
+ * This routine also allocates memory for debugging bg buffers.
+ **/
+static void
+lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
+{
+	int pagecnt = 10;
+	if (lpfc_prot_mask && lpfc_prot_guard) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"1478 Registering BlockGuard with the "
+				"SCSI layer\n");
+		scsi_host_set_prot(shost, lpfc_prot_mask);
+		scsi_host_set_guard(shost, lpfc_prot_guard);
+	}
+	if (!_dump_buf_data) {
+		while (pagecnt) {
+			spin_lock_init(&_dump_buf_lock);
+			_dump_buf_data =
+				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
+			if (_dump_buf_data) {
+				printk(KERN_ERR "BLKGRD allocated %d pages for "
+				       "_dump_buf_data at 0x%p\n",
+				       (1 << pagecnt), _dump_buf_data);
+				_dump_buf_data_order = pagecnt;
+				memset(_dump_buf_data, 0,
+				       ((1 << PAGE_SHIFT) << pagecnt));
+				break;
+			} else
+				--pagecnt;
+		}
+		if (!_dump_buf_data_order)
+			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+			       "memory for hexdump\n");
+	} else
+		printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
+		       "\n", _dump_buf_data);
+	if (!_dump_buf_dif) {
+		while (pagecnt) {
+			_dump_buf_dif =
+				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
+			if (_dump_buf_dif) {
+				printk(KERN_ERR "BLKGRD allocated %d pages for "
+				       "_dump_buf_dif at 0x%p\n",
+				       (1 << pagecnt), _dump_buf_dif);
+				_dump_buf_dif_order = pagecnt;
+				memset(_dump_buf_dif, 0,
+				       ((1 << PAGE_SHIFT) << pagecnt));
+				break;
+			} else
+				--pagecnt;
+		}
+		if (!_dump_buf_dif_order)
+			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+			       "memory for hexdump\n");
+	} else
+		printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
+		       _dump_buf_dif);
+}
+
+/**
+ * lpfc_post_init_setup - Perform necessary device post initialization setup.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to perform all the necessary post initialization
+ * setup for the device.
+ **/
+static void
+lpfc_post_init_setup(struct lpfc_hba *phba)
+{
+	struct Scsi_Host  *shost;
+	struct lpfc_adapter_event_header adapter_event;
+
+	/* Get the default values for Model Name and Description */
+	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+	/*
+	 * hba setup may have changed the hba_queue_depth so we need to
+	 * adjust the value of can_queue.
+	 */
+	shost = pci_get_drvdata(phba->pcidev);
+	shost->can_queue = phba->cfg_hba_queue_depth - 10;
+	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+		lpfc_setup_bg(phba, shost);
+
+	lpfc_host_attrib_init(shost);
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		spin_lock_irq(shost->host_lock);
+		lpfc_poll_start_timer(phba);
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0428 Perform SCSI scan\n");
+	/* Send board arrival event to upper layer */
+	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
+	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(adapter_event),
+				  (char *) &adapter_event,
+				  LPFC_NL_VENDOR_ID);
+	return;
+}
+
+/**
+ * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-3 interface spec.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ **/
+static int
+lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+	unsigned long bar0map_len, bar2map_len;
+	int i, hbq_count;
+	void *ptr;
+	int error = -ENODEV;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return error;
+	else
+		pdev = phba->pcidev;
+
+	/* Set the device DMA mask size */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+			return error;
+
+	/* Get the bus address of Bar0 and Bar2 and the number of bytes
+	 * required by each mapping.
+	 */
+	phba->pci_bar0_map = pci_resource_start(pdev, 0);
+	bar0map_len = pci_resource_len(pdev, 0);
+
+	phba->pci_bar2_map = pci_resource_start(pdev, 2);
+	bar2map_len = pci_resource_len(pdev, 2);
+
+	/* Map HBA SLIM to a kernel virtual address. */
+	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
+	if (!phba->slim_memmap_p) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for SLIM memory.\n");
+		goto out;
+	}
+
+	/* Map HBA Control Registers to a kernel virtual address. */
+	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
+	if (!phba->ctrl_regs_memmap_p) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for HBA control registers.\n");
+		goto out_iounmap_slim;
+	}
+
+	/* Allocate memory for SLI-2 structures */
+	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
+					       SLI2_SLIM_SIZE,
+					       &phba->slim2p.phys,
+					       GFP_KERNEL);
+	if (!phba->slim2p.virt)
+		goto out_iounmap;
+
+	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
+	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
+	phba->IOCBs = (phba->slim2p.virt +
+		       offsetof(struct lpfc_sli2_slim, IOCBs));
+
+	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
+						 lpfc_sli_hbq_size(),
+						 &phba->hbqslimp.phys,
+						 GFP_KERNEL);
+	if (!phba->hbqslimp.virt)
+		goto out_free_slim;
+
+	hbq_count = lpfc_sli_hbq_count();
+	ptr = phba->hbqslimp.virt;
+	for (i = 0; i < hbq_count; ++i) {
+		phba->hbqs[i].hbq_virt = ptr;
+		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+		ptr += (lpfc_hbq_defs[i]->entry_count *
+			sizeof(struct lpfc_hbq_entry));
+	}
+	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
+	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
+
+	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+
+	INIT_LIST_HEAD(&phba->rb_pend_list);
+
+	phba->MBslimaddr = phba->slim_memmap_p;
+	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+	return 0;
+
+out_free_slim:
+	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+			  phba->slim2p.virt, phba->slim2p.phys);
+out_iounmap:
+	iounmap(phba->ctrl_regs_memmap_p);
+out_iounmap_slim:
+	iounmap(phba->slim_memmap_p);
+out:
+	return error;
+}
+
+/**
+ * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return;
+	else
+		pdev = phba->pcidev;
+
+	/* Free coherent DMA memory allocated */
+	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+			  phba->hbqslimp.virt, phba->hbqslimp.phys);
+	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+			  phba->slim2p.virt, phba->slim2p.phys);
+
+	/* I/O memory unmap */
+	iounmap(phba->ctrl_regs_memmap_p);
+	iounmap(phba->slim_memmap_p);
+
+	return;
+}
+
+/**
+ * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-3 interface specs. The kernel function pci_enable_msix() is
+ * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
+ * invoked, enables either all or nothing, depending on the current
+ * availability of PCI vector resources. The device driver is responsible
+ * for calling the individual request_irq() to register each MSI-X vector
+ * with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
+ *
+ * Return codes
+ *   0 - sucessful
+ *   other values - error
+ **/
+static int
+lpfc_sli_enable_msix(struct lpfc_hba *phba)
+{
+	int rc, i;
+	LPFC_MBOXQ_t *pmb;
+
+	/* Set up MSI-X multi-message vectors */
+	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+		phba->msix_entries[i].entry = i;
+
+	/* Configure MSI-X capability structure */
+	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
+				ARRAY_SIZE(phba->msix_entries));
+	if (rc) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0420 PCI enable MSI-X failed (%d)\n", rc);
+		goto msi_fail_out;
+	}
+	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0477 MSI-X entry[%d]: vector=x%x "
+				"message=%d\n", i,
+				phba->msix_entries[i].vector,
+				phba->msix_entries[i].entry);
+	/*
+	 * Assign MSI-X vectors to interrupt handlers
+	 */
+
+	/* vector-0 is associated to slow-path handler */
+	rc = request_irq(phba->msix_entries[0].vector,
+			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
+			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0421 MSI-X slow-path request_irq failed "
+				"(%d)\n", rc);
+		goto msi_fail_out;
+	}
+
+	/* vector-1 is associated to fast-path handler */
+	rc = request_irq(phba->msix_entries[1].vector,
+			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
+			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
+
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0429 MSI-X fast-path request_irq failed "
+				"(%d)\n", rc);
+		goto irq_fail_out;
+	}
+
+	/*
+	 * Configure HBA MSI-X attention conditions to messages
+	 */
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+	if (!pmb) {
+		rc = -ENOMEM;
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0474 Unable to allocate memory for issuing "
+				"MBOX_CONFIG_MSI command\n");
+		goto mem_fail_out;
+	}
+	rc = lpfc_config_msi(phba, pmb);
+	if (rc)
+		goto mbx_fail_out;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+				"0351 Config MSI mailbox command failed, "
+				"mbxCmd x%x, mbxStatus x%x\n",
+				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
+		goto mbx_fail_out;
+	}
+
+	/* Free memory allocated for mailbox command */
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return rc;
+
+mbx_fail_out:
+	/* Free memory allocated for mailbox command */
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+mem_fail_out:
+	/* free the irq already requested */
+	free_irq(phba->msix_entries[1].vector, phba);
+
+irq_fail_out:
+	/* free the irq already requested */
+	free_irq(phba->msix_entries[0].vector, phba);
+
+msi_fail_out:
+	/* Unconfigure MSI-X capability structure */
+	pci_disable_msix(phba->pcidev);
+	return rc;
+}
+
+/**
+ * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_disable_msix(struct lpfc_hba *phba)
+{
+	int i;
+
+	/* Free up MSI-X multi-message vectors */
+	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+		free_irq(phba->msix_entries[i].vector, phba);
+	/* Disable MSI-X */
+	pci_disable_msix(phba->pcidev);
+
+	return;
+}
+
+/**
+ * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
+ * enable the MSI vector. The device driver is responsible for calling the
+ * request_irq() to register MSI vector with a interrupt the handler, which
+ * is done in this function.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ */
+static int
+lpfc_sli_enable_msi(struct lpfc_hba *phba)
+{
+	int rc;
+
+	rc = pci_enable_msi(phba->pcidev);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0462 PCI enable MSI mode success.\n");
+	else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0471 PCI enable MSI mode failed (%d)\n", rc);
+		return rc;
+	}
+
+	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+	if (rc) {
+		pci_disable_msi(phba->pcidev);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0478 MSI request_irq failed (%d)\n", rc);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ */
+static void
+lpfc_sli_disable_msi(struct lpfc_hba *phba)
+{
+	free_irq(phba->pcidev->irq, phba);
+	pci_disable_msi(phba->pcidev);
+	return;
+}
+
+/**
+ * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
+ * spec. Depends on the interrupt mode configured to the driver, the driver
+ * will try to fallback from the configured interrupt mode to an interrupt
+ * mode which is supported by the platform, kernel, and device in the order
+ * of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ *   0 - sucessful
+ *   other values - error
+ **/
+static uint32_t
+lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+	uint32_t intr_mode = LPFC_INTR_ERROR;
+	int retval;
+
+	if (cfg_mode == 2) {
+		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+		if (!retval) {
+			/* Now, try to enable MSI-X interrupt mode */
+			retval = lpfc_sli_enable_msix(phba);
+			if (!retval) {
+				/* Indicate initialization to MSI-X mode */
+				phba->intr_type = MSIX;
+				intr_mode = 2;
+			}
+		}
+	}
+
+	/* Fallback to MSI if MSI-X initialization failed */
+	if (cfg_mode >= 1 && phba->intr_type == NONE) {
+		retval = lpfc_sli_enable_msi(phba);
+		if (!retval) {
+			/* Indicate initialization to MSI mode */
+			phba->intr_type = MSI;
+			intr_mode = 1;
+		}
+	}
+
+	/* Fallback to INTx if both MSI-X/MSI initalization failed */
+	if (phba->intr_type == NONE) {
+		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+		if (!retval) {
+			/* Indicate initialization to INTx mode */
+			phba->intr_type = INTx;
+			intr_mode = 0;
+		}
+	}
+	return intr_mode;
+}
+
+/**
+ * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate the
+ * driver's interrupt handler(s) from interrupt vector(s) to device with
+ * SLI-3 interface spec. Depending on the interrupt mode, the driver will
+ * release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli_disable_intr(struct lpfc_hba *phba)
+{
+	/* Disable the currently initialized interrupt mode */
+	if (phba->intr_type == MSIX)
+		lpfc_sli_disable_msix(phba);
+	else if (phba->intr_type == MSI)
+		lpfc_sli_disable_msi(phba);
+	else if (phba->intr_type == INTx)
+		free_irq(phba->pcidev->irq, phba);
+
+	/* Reset interrupt management states */
+	phba->intr_type = NONE;
+	phba->sli.slistat.sli_intr = 0;
+
+	return;
+}
+
+/**
+ * lpfc_unset_hba - Unset SLI3 hba device initialization
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-3 interface spec.
+ **/
+static void
+lpfc_unset_hba(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_stop_hba_timers(phba);
+
+	phba->pport->work_port_events = 0;
+
+	lpfc_sli_hba_down(phba);
+
+	lpfc_sli_brdrestart(phba);
+
+	lpfc_sli_disable_intr(phba);
+
+	return;
+}
+
+/**
+ * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be called to attach a device with SLI-3 interface spec
+ * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it can
+ * support this kind of device. If the match is successful, the driver core
+ * invokes this routine. If this routine determines it can claim the HBA, it
+ * does all the initialization that it needs to do to handle the HBA properly.
+ *
+ * Return code
+ * 	0 - driver can claim the device
+ * 	negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	struct lpfc_hba   *phba;
+	struct lpfc_vport *vport = NULL;
+	int error;
+	uint32_t cfg_mode, intr_mode;
+
+	/* Allocate memory for HBA structure */
+	phba = lpfc_hba_alloc(pdev);
+	if (!phba)
+		return -ENOMEM;
+
+	/* Perform generic PCI device enabling operation */
+	error = lpfc_enable_pci_dev(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1401 Failed to enable pci device.\n");
+		goto out_free_phba;
+	}
+
+	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
+	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
+	if (error)
+		goto out_disable_pci_dev;
+
+	/* Set up SLI-3 specific device PCI memory space */
+	error = lpfc_sli_pci_mem_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1402 Failed to set up pci memory space.\n");
+		goto out_disable_pci_dev;
+	}
+
+	/* Set up phase-1 common device driver resources */
+	error = lpfc_setup_driver_resource_phase1(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1403 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s3;
+	}
+
+	/* Set up SLI-3 specific device driver resources */
+	error = lpfc_sli_driver_resource_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1404 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s3;
+	}
+
+	/* Initialize and populate the iocb list per host */
+	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1405 Failed to initialize iocb list.\n");
+		goto out_unset_driver_resource_s3;
+	}
+
+	/* Set up common device driver resources */
+	error = lpfc_setup_driver_resource_phase2(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1406 Failed to set up driver resource.\n");
+		goto out_free_iocb_list;
+	}
+
+	/* Create SCSI host to the physical port */
+	error = lpfc_create_shost(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1407 Failed to create scsi host.\n");
+		goto out_unset_driver_resource;
+	}
 
 	/* Configure sysfs attributes */
-	if (lpfc_alloc_sysfs_attr(vport)) {
+	vport = phba->pport;
+	error = lpfc_alloc_sysfs_attr(vport);
+	if (error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1476 Failed to allocate sysfs attr\n");
-		error = -ENOMEM;
-		goto out_destroy_port;
+		goto out_destroy_shost;
 	}
 
+	/* Now, trying to enable interrupt and bring up the device */
 	cfg_mode = phba->cfg_use_msi;
 	while (true) {
+		/* Put device to a known state before enabling interrupt */
+		lpfc_stop_port(phba);
 		/* Configure and enable interrupt */
-		intr_mode = lpfc_enable_intr(phba, cfg_mode);
+		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
 		if (intr_mode == LPFC_INTR_ERROR) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"0426 Failed to enable interrupt.\n");
+					"0431 Failed to enable interrupt.\n");
+			error = -ENODEV;
 			goto out_free_sysfs_attr;
 		}
-		/* HBA SLI setup */
+		/* SLI-3 HBA setup */
 		if (lpfc_sli_hba_setup(phba)) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"1477 Failed to set up hba\n");
@@ -2902,185 +3749,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 
 		/* Wait 50ms for the interrupts of previous mailbox commands */
 		msleep(50);
-		/* Check active interrupts received */
-		if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+		/* Check active interrupts on message signaled interrupts */
+		if (intr_mode == 0 ||
+		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
 			/* Log the current active interrupt mode */
 			phba->intr_mode = intr_mode;
 			lpfc_log_intr_mode(phba, intr_mode);
 			break;
 		} else {
 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-					"0451 Configure interrupt mode (%d) "
+					"0447 Configure interrupt mode (%d) "
 					"failed active interrupt test.\n",
 					intr_mode);
-			if (intr_mode == 0) {
-				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-						"0479 Failed to enable "
-						"interrupt.\n");
-				error = -ENODEV;
-				goto out_remove_device;
-			}
-			/* Stop HBA SLI setups */
-			lpfc_stop_port(phba);
 			/* Disable the current interrupt mode */
-			lpfc_disable_intr(phba);
+			lpfc_sli_disable_intr(phba);
 			/* Try next level of interrupt mode */
 			cfg_mode = --intr_mode;
 		}
 	}
 
-	/*
-	 * hba setup may have changed the hba_queue_depth so we need to adjust
-	 * the value of can_queue.
-	 */
-	shost->can_queue = phba->cfg_hba_queue_depth - 10;
-	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
-
-		if (lpfc_prot_mask && lpfc_prot_guard) {
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-					"1478 Registering BlockGuard with the "
-					"SCSI layer\n");
-
-			scsi_host_set_prot(shost, lpfc_prot_mask);
-			scsi_host_set_guard(shost, lpfc_prot_guard);
-		}
-	}
-
-	if (!_dump_buf_data) {
-		int pagecnt = 10;
-		while (pagecnt) {
-			spin_lock_init(&_dump_buf_lock);
-			_dump_buf_data =
-				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
-			if (_dump_buf_data) {
-				printk(KERN_ERR "BLKGRD allocated %d pages for "
-						"_dump_buf_data at 0x%p\n",
-						(1 << pagecnt), _dump_buf_data);
-				_dump_buf_data_order = pagecnt;
-				memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
-							   << pagecnt));
-				break;
-			} else {
-				--pagecnt;
-			}
-
-		}
-
-		if (!_dump_buf_data_order)
-			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
-					"memory for hexdump\n");
-
-	} else {
-		printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
-		       "\n", _dump_buf_data);
-	}
-
-
-	if (!_dump_buf_dif) {
-		int pagecnt = 10;
-		while (pagecnt) {
-			_dump_buf_dif =
-				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
-			if (_dump_buf_dif) {
-				printk(KERN_ERR "BLKGRD allocated %d pages for "
-						"_dump_buf_dif at 0x%p\n",
-						(1 << pagecnt), _dump_buf_dif);
-				_dump_buf_dif_order = pagecnt;
-				memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
-							  << pagecnt));
-				break;
-			} else {
-				--pagecnt;
-			}
-
-		}
-
-		if (!_dump_buf_dif_order)
-			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
-					"memory for hexdump\n");
-
-	} else {
-		printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
-				_dump_buf_dif);
-	}
-
-	lpfc_host_attrib_init(shost);
-
-	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
-		spin_lock_irq(shost->host_lock);
-		lpfc_poll_start_timer(phba);
-		spin_unlock_irq(shost->host_lock);
-	}
+	/* Perform post initialization setup */
+	lpfc_post_init_setup(phba);
 
-	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-			"0428 Perform SCSI scan\n");
-	/* Send board arrival event to upper layer */
-	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
-	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
-	fc_host_post_vendor_event(shost, fc_get_event_number(),
-		sizeof(adapter_event),
-		(char *) &adapter_event,
-		LPFC_NL_VENDOR_ID);
+	/* Check if there are static vports to be created. */
+	lpfc_create_static_vport(phba);
 
 	return 0;
 
 out_remove_device:
-	spin_lock_irq(shost->host_lock);
-	vport->load_flag |= FC_UNLOADING;
-	spin_unlock_irq(shost->host_lock);
-	lpfc_stop_phba_timers(phba);
-	phba->pport->work_port_events = 0;
-	lpfc_disable_intr(phba);
-	lpfc_sli_hba_down(phba);
-	lpfc_sli_brdrestart(phba);
+	lpfc_unset_hba(phba);
 out_free_sysfs_attr:
 	lpfc_free_sysfs_attr(vport);
-out_destroy_port:
-	destroy_port(vport);
-out_kthread_stop:
-	kthread_stop(phba->worker_thread);
-out_free_iocbq:
-	list_for_each_entry_safe(iocbq_entry, iocbq_next,
-						&phba->lpfc_iocb_list, list) {
-		kfree(iocbq_entry);
-		phba->total_iocbq_bufs--;
-	}
-	lpfc_mem_free(phba);
-out_free_hbqslimp:
-	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
-			  phba->hbqslimp.virt, phba->hbqslimp.phys);
-out_free_slim:
-	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
-			  phba->slim2p.virt, phba->slim2p.phys);
-out_iounmap:
-	iounmap(phba->ctrl_regs_memmap_p);
-out_iounmap_slim:
-	iounmap(phba->slim_memmap_p);
-out_idr_remove:
-	idr_remove(&lpfc_hba_index, phba->brd_no);
+out_destroy_shost:
+	lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+	lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+	lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s3:
+	lpfc_sli_driver_resource_unset(phba);
+out_unset_pci_mem_s3:
+	lpfc_sli_pci_mem_unset(phba);
+out_disable_pci_dev:
+	lpfc_disable_pci_dev(phba);
 out_free_phba:
-	kfree(phba);
-out_release_regions:
-	pci_release_selected_regions(pdev, bars);
-out_disable_device:
-	pci_disable_device(pdev);
-out:
-	pci_set_drvdata(pdev, NULL);
-	if (shost)
-		scsi_host_put(shost);
+	lpfc_hba_free(phba);
 	return error;
 }
 
 /**
- * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem
+ * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
  * @pdev: pointer to PCI device
  *
- * This routine is to be registered to the kernel's PCI subsystem. When an
- * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
- * for the HBA device to be removed from the PCI subsystem properly.
+ * This routine is to be called to disattach a device with SLI-3 interface
+ * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
  **/
 static void __devexit
-lpfc_pci_remove_one(struct pci_dev *pdev)
+lpfc_pci_remove_one_s3(struct pci_dev *pdev)
 {
 	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3098,7 +3825,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 	/* Release all the vports against this physical port */
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
+		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
 			fc_vport_terminate(vports[i]->fc_vport);
 	lpfc_destroy_vport_work_array(phba, vports);
 
@@ -3120,7 +3847,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 	/* Final cleanup of txcmplq and reset the HBA */
 	lpfc_sli_brdrestart(phba);
 
-	lpfc_stop_phba_timers(phba);
+	lpfc_stop_hba_timers(phba);
 	spin_lock_irq(&phba->hbalock);
 	list_del_init(&vport->listentry);
 	spin_unlock_irq(&phba->hbalock);
@@ -3128,7 +3855,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 	lpfc_debugfs_terminate(vport);
 
 	/* Disable interrupt */
-	lpfc_disable_intr(phba);
+	lpfc_sli_disable_intr(phba);
 
 	pci_set_drvdata(pdev, NULL);
 	scsi_host_put(shost);
@@ -3138,7 +3865,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 	 * corresponding pools here.
 	 */
 	lpfc_scsi_free(phba);
-	lpfc_mem_free(phba);
+	lpfc_mem_free_all(phba);
 
 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
@@ -3151,36 +3878,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 	iounmap(phba->ctrl_regs_memmap_p);
 	iounmap(phba->slim_memmap_p);
 
-	idr_remove(&lpfc_hba_index, phba->brd_no);
-
-	kfree(phba);
+	lpfc_hba_free(phba);
 
 	pci_release_selected_regions(pdev, bars);
 	pci_disable_device(pdev);
 }
 
 /**
- * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management
+ * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
  * @pdev: pointer to PCI device
  * @msg: power management message
  *
- * This routine is to be registered to the kernel's PCI subsystem to support
- * system Power Management (PM). When PM invokes this method, it quiesces the
- * device by stopping the driver's worker thread for the device, turning off
- * device's interrupt and DMA, and bring the device offline. Note that as the
- * driver implements the minimum PM requirements to a power-aware driver's PM
- * support for suspend/resume -- all the possible PM messages (SUSPEND,
- * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
- * and the driver will fully reinitialize its device during resume() method
- * call, the driver will set device to PCI_D3hot state in PCI config space
- * instead of setting it according to the @msg provided by the PM.
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When
+ * PM invokes this method, it quiesces the device by stopping the driver's
+ * worker thread for the device, turning off device's interrupt and DMA,
+ * and bring the device offline. Note that as the driver implements the
+ * minimum PM requirements to a power-aware driver's PM support for the
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver will
+ * fully reinitialize its device during resume() method call, the driver will
+ * set device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
  *
  * Return code
- *   0 - driver suspended the device
- *   Error otherwise
+ * 	0 - driver suspended the device
+ * 	Error otherwise
  **/
 static int
-lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3194,7 +3920,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
 	kthread_stop(phba->worker_thread);
 
 	/* Disable interrupt from device */
-	lpfc_disable_intr(phba);
+	lpfc_sli_disable_intr(phba);
 
 	/* Save device state to PCI config space */
 	pci_save_state(pdev);
@@ -3204,25 +3930,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
 }
 
 /**
- * lpfc_pci_resume_one - lpfc PCI func to resume device for power management
+ * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
  * @pdev: pointer to PCI device
  *
- * This routine is to be registered to the kernel's PCI subsystem to support
- * system Power Management (PM). When PM invokes this method, it restores
- * the device's PCI config space state and fully reinitializes the device
- * and brings it online. Note that as the driver implements the minimum PM
- * requirements to a power-aware driver's PM for suspend/resume -- all
- * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
- * method call will be treated as SUSPEND and the driver will fully
- * reinitialize its device during resume() method call, the device will be
- * set to PCI_D0 directly in PCI config space before restoring the state.
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When PM
+ * invokes this method, it restores the device's PCI config space state and
+ * fully reinitializes the device and brings it online. Note that as the
+ * driver implements the minimum PM requirements to a power-aware driver's
+ * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
+ * FREEZE) to the suspend() method call will be treated as SUSPEND and the
+ * driver will fully reinitialize its device during resume() method call,
+ * the device will be set to PCI_D0 directly in PCI config space before
+ * restoring the state.
  *
  * Return code
- *   0 - driver suspended the device
- *   Error otherwise
+ * 	0 - driver suspended the device
+ * 	Error otherwise
  **/
 static int
-lpfc_pci_resume_one(struct pci_dev *pdev)
+lpfc_pci_resume_one_s3(struct pci_dev *pdev)
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3250,7 +3977,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
 	}
 
 	/* Configure and enable interrupt */
-	intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
 	if (intr_mode == LPFC_INTR_ERROR) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0430 PM resume Failed to enable interrupt\n");
@@ -3269,23 +3996,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
 }
 
 /**
- * lpfc_io_error_detected - Driver method for handling PCI I/O error detected
+ * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
  * @pdev: pointer to PCI device.
  * @state: the current PCI connection state.
  *
- * This routine is registered to the PCI subsystem for error handling. This
- * function is called by the PCI subsystem after a PCI bus error affecting
- * this device has been detected. When this function is invoked, it will
- * need to stop all the I/Os and interrupt(s) to the device. Once that is
- * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to
- * perform proper recovery as desired.
+ * This routine is called from the PCI subsystem for I/O error handling to
+ * device with SLI-3 interface spec. This function is called by the PCI
+ * subsystem after a PCI bus error affecting this device has been detected.
+ * When this function is invoked, it will need to stop all the I/Os and
+ * interrupt(s) to the device. Once that is done, it will return
+ * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
+ * as desired.
  *
  * Return codes
- *   PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
- *   PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  **/
-static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
-				pci_channel_state_t state)
+static pci_ers_result_t
+lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3312,30 +4040,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
 	lpfc_sli_abort_iocb_ring(phba, pring);
 
 	/* Disable interrupt */
-	lpfc_disable_intr(phba);
+	lpfc_sli_disable_intr(phba);
 
 	/* Request a slot reset. */
 	return PCI_ERS_RESULT_NEED_RESET;
 }
 
 /**
- * lpfc_io_slot_reset - Restart a PCI device from scratch
+ * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
  * @pdev: pointer to PCI device.
  *
- * This routine is registered to the PCI subsystem for error handling. This is
- * called after PCI bus has been reset to restart the PCI card from scratch,
- * as if from a cold-boot. During the PCI subsystem error recovery, after the
- * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform
- * proper error recovery and then call this routine before calling the .resume
- * method to recover the device. This function will initialize the HBA device,
- * enable the interrupt, but it will just put the HBA to offline state without
- * passing any I/O traffic.
+ * This routine is called from the PCI subsystem for error handling to
+ * device with SLI-3 interface spec. This is called after PCI bus has been
+ * reset to restart the PCI card from scratch, as if from a cold-boot.
+ * During the PCI subsystem error recovery, after driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method
+ * to recover the device. This function will initialize the HBA device,
+ * enable the interrupt, but it will just put the HBA to offline state
+ * without passing any I/O traffic.
  *
  * Return codes
- *   PCI_ERS_RESULT_RECOVERED - the device has been recovered
- *   PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  */
-static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t
+lpfc_io_slot_reset_s3(struct pci_dev *pdev)
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3354,11 +4084,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
 		pci_set_master(pdev);
 
 	spin_lock_irq(&phba->hbalock);
-	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Configure and enable interrupt */
-	intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
 	if (intr_mode == LPFC_INTR_ERROR) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0427 Cannot re-enable interrupt after "
@@ -3378,15 +4108,17 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
 }
 
 /**
- * lpfc_io_resume - Resume PCI I/O operation
+ * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
  * @pdev: pointer to PCI device
  *
- * This routine is registered to the PCI subsystem for error handling. It is
- * called when kernel error recovery tells the lpfc driver that it is ok to
- * resume normal PCI operation after PCI bus error recovery. After this call,
- * traffic can start to flow from this device again.
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-3 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
  */
-static void lpfc_io_resume(struct pci_dev *pdev)
+static void
+lpfc_io_resume_s3(struct pci_dev *pdev)
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3394,6 +4126,235 @@ static void lpfc_io_resume(struct pci_dev *pdev)
 	lpfc_online(phba);
 }
 
+/**
+ * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
+ * at PCI device-specific information of the device and driver to see if the
+ * driver state that it can support this kind of device. If the match is
+ * successful, the driver core invokes this routine. This routine dispatches
+ * the action to the proper SLI-3 or SLI-4 device probing routine, which will
+ * do all the initialization that it needs to do to handle the HBA device
+ * properly.
+ *
+ * Return code
+ * 	0 - driver can claim the device
+ * 	negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	int rc;
+	uint16_t dev_id;
+
+	if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
+		return -ENODEV;
+
+	switch (dev_id) {
+	default:
+		rc = lpfc_pci_probe_one_s3(pdev, pid);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
+ * This routine dispatches the action to the proper SLI-3 or SLI-4 device
+ * remove routine, which will perform all the necessary cleanup for the
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void __devexit
+lpfc_pci_remove_one(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		lpfc_pci_remove_one_s3(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1424 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
+ * suspend the device.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	int rc = -ENODEV;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_pci_suspend_one_s3(pdev, msg);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1425 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device resume routine, which will
+ * resume the device.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_resume_one(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	int rc = -ENODEV;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_pci_resume_one_s3(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1426 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_io_error_detected - lpfc method for handling PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device error detected handling
+ * routine, which will perform the proper error detected operation.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_io_error_detected_s3(pdev, state);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1427 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called after PCI bus has been reset to restart the PCI card
+ * from scratch, as if from a cold-boot. When this routine is invoked, it
+ * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
+ * routine, which will perform the proper device reset.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_slot_reset(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_io_slot_reset_s3(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1428 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_io_resume - lpfc method for resuming PCI I/O operation
+ * @pdev: pointer to PCI device
+ *
+ * This routine is registered to the PCI subsystem for error handling. It
+ * is called when kernel error recovery tells the lpfc driver that it is
+ * OK to resume normal PCI operation after PCI bus error recovery. When
+ * this routine is invoked, it dispatches the action to the proper SLI-3
+ * or SLI-4 device io_resume routine, which will resume the device operation.
+ **/
+static void
+lpfc_io_resume(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		lpfc_io_resume_s3(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1429 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return;
+}
+
 static struct pci_device_id lpfc_id_table[] = {
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
 		PCI_ANY_ID, PCI_ANY_ID, },
@@ -3469,6 +4430,10 @@ static struct pci_device_id lpfc_id_table[] = {
 		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
 		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
+		PCI_ANY_ID, PCI_ANY_ID, },
 	{ 0 }
 };
 
@@ -3486,7 +4451,7 @@ static struct pci_driver lpfc_driver = {
 	.probe		= lpfc_pci_probe_one,
 	.remove		= __devexit_p(lpfc_pci_remove_one),
 	.suspend        = lpfc_pci_suspend_one,
-	.resume         = lpfc_pci_resume_one,
+	.resume		= lpfc_pci_resume_one,
 	.err_handler    = &lpfc_err_handler,
 };
 
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c..a226c053c0f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -438,22 +438,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_new_scsi_buf - Scsi buffer allocator
+ * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
  * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
  *
- * This routine allocates a scsi buffer, which contains all the necessary
- * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
- * contains information to build the IOCB.  The DMAable region contains
- * memory for the FCP CMND, FCP RSP, and the initial BPL.  In addition to
- * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
- * and the BPL BDE is setup in the IOCB.
+ * This routine allocates a scsi buffer for device with SLI-3 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O. The non-DMAable buffer region contains information to build
+ * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
+ * and the initial BPL. In addition to allocating memory, the FCP CMND and
+ * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
  *
  * Return codes:
- *   NULL - Error
- *   Pointer to lpfc_scsi_buf data structure - Success
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
  **/
-static struct lpfc_scsi_buf *
-lpfc_new_scsi_buf(struct lpfc_vport *vport)
+static int
+lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_scsi_buf *psb;
@@ -463,107 +464,134 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
 	dma_addr_t pdma_phys_fcp_rsp;
 	dma_addr_t pdma_phys_bpl;
 	uint16_t iotag;
+	int bcnt;
 
-	psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
-	if (!psb)
-		return NULL;
+	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+		if (!psb)
+			break;
 
-	/*
-	 * Get memory from the pci pool to map the virt space to pci bus space
-	 * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,
-	 * struct fcp_rsp and the number of bde's necessary to support the
-	 * sg_tablesize.
-	 */
-	psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
-							&psb->dma_handle);
-	if (!psb->data) {
-		kfree(psb);
-		return NULL;
-	}
-
-	/* Initialize virtual ptrs to dma_buf region. */
-	memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
-
-	/* Allocate iotag for psb->cur_iocbq. */
-	iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
-	if (iotag == 0) {
-		pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
-			      psb->data, psb->dma_handle);
-		kfree (psb);
-		return NULL;
-	}
-	psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
-
-	psb->fcp_cmnd = psb->data;
-	psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
-	psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
-							sizeof(struct fcp_rsp);
-
-	/* Initialize local short-hand pointers. */
-	bpl = psb->fcp_bpl;
-	pdma_phys_fcp_cmd = psb->dma_handle;
-	pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
-	pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+		/*
+		 * Get memory from the pci pool to map the virt space to pci
+		 * bus space for an I/O.  The DMA buffer includes space for the
+		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+		 * necessary to support the sg_tablesize.
+		 */
+		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+					GFP_KERNEL, &psb->dma_handle);
+		if (!psb->data) {
+			kfree(psb);
+			break;
+		}
+
+		/* Initialize virtual ptrs to dma_buf region. */
+		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+		/* Allocate iotag for psb->cur_iocbq. */
+		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+		if (iotag == 0) {
+			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+					psb->data, psb->dma_handle);
+			kfree(psb);
+			break;
+		}
+		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+		psb->fcp_cmnd = psb->data;
+		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
+		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
 			sizeof(struct fcp_rsp);
 
-	/*
-	 * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg
-	 * list bdes.  Initialize the first two and leave the rest for
-	 * queuecommand.
-	 */
-	bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
-	bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
-	bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
-	bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-	bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
-
-	/* Setup the physical region for the FCP RSP */
-	bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
-	bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
-	bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
-	bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-	bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
+		/* Initialize local short-hand pointers. */
+		bpl = psb->fcp_bpl;
+		pdma_phys_fcp_cmd = psb->dma_handle;
+		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
+		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp);
+
+		/*
+		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
+		 * are sg list bdes.  Initialize the first two and leave the
+		 * rest for queuecommand.
+		 */
+		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
+		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
+		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
+		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
+
+		/* Setup the physical region for the FCP RSP */
+		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
+		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
+		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
+		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
+
+		/*
+		 * Since the IOCB for the FCP I/O is built into this
+		 * lpfc_scsi_buf, initialize it with all known data now.
+		 */
+		iocb = &psb->cur_iocbq.iocb;
+		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+		if ((phba->sli_rev == 3) &&
+				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+			/* fill in immediate fcp command BDE */
+			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
+					unsli3.fcp_ext.icd);
+			iocb->un.fcpi64.bdl.addrHigh = 0;
+			iocb->ulpBdeCount = 0;
+			iocb->ulpLe = 0;
+			/* fill in responce BDE */
+			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
+							BUFF_TYPE_BDE_64;
+			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
+				sizeof(struct fcp_rsp);
+			iocb->unsli3.fcp_ext.rbde.addrLow =
+				putPaddrLow(pdma_phys_fcp_rsp);
+			iocb->unsli3.fcp_ext.rbde.addrHigh =
+				putPaddrHigh(pdma_phys_fcp_rsp);
+		} else {
+			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+			iocb->un.fcpi64.bdl.bdeSize =
+					(2 * sizeof(struct ulp_bde64));
+			iocb->un.fcpi64.bdl.addrLow =
+					putPaddrLow(pdma_phys_bpl);
+			iocb->un.fcpi64.bdl.addrHigh =
+					putPaddrHigh(pdma_phys_bpl);
+			iocb->ulpBdeCount = 1;
+			iocb->ulpLe = 1;
+		}
+		iocb->ulpClass = CLASS3;
+		psb->status = IOSTAT_SUCCESS;
 
-	/*
-	 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
-	 * initialize it with all known data now.
-	 */
-	iocb = &psb->cur_iocbq.iocb;
-	iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
-	if ((phba->sli_rev == 3) &&
-	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
-		/* fill in immediate fcp command BDE */
-		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
-		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
-		iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
-						       unsli3.fcp_ext.icd);
-		iocb->un.fcpi64.bdl.addrHigh = 0;
-		iocb->ulpBdeCount = 0;
-		iocb->ulpLe = 0;
-		/* fill in responce BDE */
-		iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-		iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
-						sizeof(struct fcp_rsp);
-		iocb->unsli3.fcp_ext.rbde.addrLow =
-						putPaddrLow(pdma_phys_fcp_rsp);
-		iocb->unsli3.fcp_ext.rbde.addrHigh =
-						putPaddrHigh(pdma_phys_fcp_rsp);
-	} else {
-		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
-		iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
-		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
-		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
-		iocb->ulpBdeCount = 1;
-		iocb->ulpLe = 1;
 	}
-	iocb->ulpClass = CLASS3;
 
-	return psb;
+	return bcnt;
 }
 
 /**
- * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba
- * @phba: The Hba for which this call is being executed.
+ * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine wraps the actual SCSI buffer allocator function pointer from
+ * the lpfc_hba struct.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static inline int
+lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
+{
+	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
+}
+
+/**
+ * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
  *
  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
  * and returns to caller.
@@ -591,7 +619,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
 }
 
 /**
- * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list
+ * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
  * @phba: The Hba for which this call is being executed.
  * @psb: The scsi buffer which is being released.
  *
@@ -599,7 +627,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
  * lpfc_scsi_buf_list list.
  **/
 static void
-lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
 	unsigned long iflag = 0;
 
@@ -610,21 +638,36 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 }
 
 /**
- * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer
+ * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
+static void
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+
+	phba->lpfc_release_scsi_buf(phba, psb);
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
  * @phba: The Hba for which this call is being executed.
  * @lpfc_cmd: The scsi buffer which is going to be mapped.
  *
  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
- * field of @lpfc_cmd. This routine scans through sg elements and format the
- * bdea. This routine also initializes all IOCB fields which are dependent on
- * scsi command request buffer.
+ * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
+ * through sg elements and format the bdea. This routine also initializes all
+ * IOCB fields which are dependent on scsi command request buffer.
  *
  * Return codes:
  *   1 - Error
  *   0 - Success
  **/
 static int
-lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 {
 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
 	struct scatterlist *sgel = NULL;
@@ -1411,6 +1454,24 @@ out:
 	return ret;
 }
 
+/**
+ * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ * 	1 - Error
+ * 	0 - Success
+ **/
+static inline int
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
 /**
  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
  * @phba: Pointer to hba context object.
@@ -1504,15 +1565,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
 }
 
 /**
- * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather
- * @phba: The Hba for which this call is being executed.
+ * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
+ * @phba: The HBA for which this call is being executed.
  * @psb: The scsi buffer which is going to be un-mapped.
  *
  * This routine does DMA un-mapping of scatter gather list of scsi command
- * field of @lpfc_cmd.
+ * field of @lpfc_cmd for device with SLI-3 interface spec.
  **/
 static void
-lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
+lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
 	/*
 	 * There are only two special cases to consider.  (1) the scsi command
@@ -1528,6 +1589,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
 				psb->pCmd->sc_data_direction);
 }
 
+/**
+ * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd for device with SLI-4 interface spec.
+ **/
+static void
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+	phba->lpfc_scsi_unprep_dma_buf(phba, psb);
+}
+
 /**
  * lpfc_handler_fcp_err - FCP response handler
  * @vport: The virtual port for which this call is being executed.
@@ -1676,7 +1751,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
  * @phba: The Hba for which this call is being executed.
  * @pIocbIn: The command IOCBQ for the scsi cmnd.
- * @pIocbOut: The response IOCBQ for the scsi cmnd .
+ * @pIocbOut: The response IOCBQ for the scsi cmnd.
  *
  * This routine assigns scsi command result by looking into response IOCB
  * status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,16 +2032,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
 }
 
 /**
- * lpfc_scsi_prep_cmnd -  Routine to convert scsi cmnd to FCP information unit
+ * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
  * @vport: The virtual port for which this call is being executed.
  * @lpfc_cmd: The scsi command which needs to send.
  * @pnode: Pointer to lpfc_nodelist.
  *
  * This routine initializes fcp_cmnd and iocb data structure from scsi command
- * to transfer.
+ * to transfer for device with SLI3 interface spec.
  **/
 static void
-lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		    struct lpfc_nodelist *pnode)
 {
 	struct lpfc_hba *phba = vport->phba;
@@ -2013,8 +2088,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 	if (scsi_sg_count(scsi_cmnd)) {
 		if (datadir == DMA_TO_DEVICE) {
 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
-			iocb_cmd->un.fcpi.fcpi_parm = 0;
-			iocb_cmd->ulpPU = 0;
+			if (phba->sli_rev < LPFC_SLI_REV4) {
+				iocb_cmd->un.fcpi.fcpi_parm = 0;
+				iocb_cmd->ulpPU = 0;
+			} else
+				iocb_cmd->ulpPU = PARM_READ_CHECK;
 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
 			phba->fc4OutputRequests++;
 		} else {
@@ -2051,20 +2129,37 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 }
 
 /**
- * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine wraps the actual convert SCSI cmnd function pointer from
+ * the lpfc_hba struct.
+ **/
+static inline void
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+		    struct lpfc_nodelist *pnode)
+{
+	vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
  * @vport: The virtual port for which this call is being executed.
  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
  * @lun: Logical unit number.
  * @task_mgmt_cmd: SCSI task management command.
  *
- * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-3 interface spec.
  *
  * Return codes:
  *   0 - Error
  *   1 - Success
  **/
 static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
+lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
 			     struct lpfc_scsi_buf *lpfc_cmd,
 			     unsigned int lun,
 			     uint8_t task_mgmt_cmd)
@@ -2113,6 +2208,67 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
 	return 1;
 }
 
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine wraps the actual convert SCSI TM to FCP information unit
+ * function pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * 	0 - Error
+ * 	1 - Success
+ **/
+static inline int
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
+			     struct lpfc_scsi_buf *lpfc_cmd,
+			     unsigned int lun,
+			     uint8_t task_mgmt_cmd)
+{
+	struct lpfc_hba *phba = vport->phba;
+
+	return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
+						  task_mgmt_cmd);
+}
+
+/**
+ * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SCSI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
+		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
+		phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
+		phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
+		phba->lpfc_scsi_prep_task_mgmt_cmd =
+					lpfc_scsi_prep_task_mgmt_cmd_s3;
+		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1418 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
+	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
+	return 0;
+}
+
 /**
  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
  * @phba: The Hba for which this call is being executed.
@@ -2178,9 +2334,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 			 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
 			 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
-	status = lpfc_sli_issue_iocb_wait(phba,
-				       &phba->sli.ring[phba->sli.fcp_ring],
-				       iocbq, iocbqrsp, lpfc_cmd->timeout);
+	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
+					  iocbq, iocbqrsp, lpfc_cmd->timeout);
 	if (status != IOCB_SUCCESS) {
 		if (status == IOCB_TIMEDOUT) {
 			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2305,7 +2460,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	struct Scsi_Host  *shost = cmnd->device->host;
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	struct lpfc_sli   *psli = &phba->sli;
 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
 	struct lpfc_nodelist *ndlp = rdata->pnode;
 	struct lpfc_scsi_buf *lpfc_cmd;
@@ -2427,7 +2581,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
 
 	atomic_inc(&ndlp->cmd_pending);
-	err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
+	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
 	if (err) {
 		atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +2644,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	struct Scsi_Host  *shost = cmnd->device->host;
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
 	struct lpfc_iocbq *iocb;
 	struct lpfc_iocbq *abtsiocb;
 	struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +2684,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	icmd = &abtsiocb->iocb;
 	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
 	icmd->un.acxri.abortContextTag = cmd->ulpContext;
-	icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
+	else
+		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
 
 	icmd->ulpLe = 1;
 	icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +2698,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 
 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
 	abtsiocb->vport = vport;
-	if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
+	    IOCB_ERROR) {
 		lpfc_sli_release_iocbq(phba, abtsiocb);
 		ret = FAILED;
 		goto out;
@@ -2668,8 +2825,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 			 "0703 Issue target reset to TGT %d LUN %d "
 			 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
 			 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
-	status = lpfc_sli_issue_iocb_wait(phba,
-					  &phba->sli.ring[phba->sli.fcp_ring],
+	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
 	if (status == IOCB_TIMEDOUT) {
 		iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2825,11 +2981,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	struct lpfc_scsi_buf *scsi_buf = NULL;
 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
-	uint32_t total = 0, i;
+	uint32_t total = 0;
 	uint32_t num_to_alloc = 0;
-	unsigned long flags;
+	int num_allocated = 0;
 
 	if (!rport || fc_remote_port_chkready(rport))
 		return -ENXIO;
@@ -2863,20 +3018,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
 				 (phba->cfg_hba_queue_depth - total));
 		num_to_alloc = phba->cfg_hba_queue_depth - total;
 	}
-
-	for (i = 0; i < num_to_alloc; i++) {
-		scsi_buf = lpfc_new_scsi_buf(vport);
-		if (!scsi_buf) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-					 "0706 Failed to allocate "
-					 "command buffer\n");
-			break;
-		}
-
-		spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
-		phba->total_scsi_bufs++;
-		list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
-		spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
+	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
+	if (num_to_alloc != num_allocated) {
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "0708 Allocation request of %d "
+				 "command buffers did not succeed.  "
+				 "Allocated %d buffers.\n",
+				 num_to_alloc, num_allocated);
 	}
 	return 0;
 }
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba..e2d07d97fa8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -142,7 +142,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
 }
 
 /**
- * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
  * @phba: Pointer to HBA context object.
  * @iocbq: Pointer to driver iocb object.
  *
@@ -152,7 +152,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
  * clears all other fields of the iocb object when it is freed.
  **/
 static void
-__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 {
 	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
 
@@ -160,9 +160,26 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 	 * Clean all volatile data fields, preserve iotag and node struct.
 	 */
 	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+	iocbq->sli4_xritag = NO_XRI;
 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
 }
 
+/**
+ * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	phba->__lpfc_sli_release_iocbq(phba, iocbq);
+}
+
 /**
  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
  * @phba: Pointer to HBA context object.
@@ -779,8 +796,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
 		phba->hbqs[i].buffer_count = 0;
 	}
 	/* Return all HBQ buffer that are in-fly */
-	list_for_each_entry_safe(dmabuf, next_dmabuf,
-			&phba->hbqbuf_in_list, list) {
+	list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
+				 list) {
 		hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
 		list_del(&hbq_buf->dbuf.list);
 		if (hbq_buf->tag == -1) {
@@ -814,9 +831,27 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
  * pointer to the hbq entry if it successfully post the buffer
  * else it will return NULL.
  **/
-static struct lpfc_hbq_entry *
+static int
 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
 			 struct hbq_dmabuf *hbq_buf)
+{
+	return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a hbq buffer to the
+ * firmware. If the function finds an empty slot in the HBQ, it will post the
+ * buffer and place it on the hbq_buffer_list. The function will return zero if
+ * it successfully post the buffer else it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
+			    struct hbq_dmabuf *hbq_buf)
 {
 	struct lpfc_hbq_entry *hbqe;
 	dma_addr_t physaddr = hbq_buf->dbuf.phys;
@@ -838,8 +873,9 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
 				/* flush */
 		readl(phba->hbq_put + hbqno);
 		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
-	}
-	return hbqe;
+		return 0;
+	} else
+		return -ENOMEM;
 }
 
 /* HBQ for ELS and CT traffic. */
@@ -914,7 +950,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
 				 dbuf.list);
 		hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
 				      (hbqno << 16));
-		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
+		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
 			phba->hbqs[hbqno].buffer_count++;
 			posted++;
 		} else
@@ -964,6 +1000,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
 					 lpfc_hbq_defs[qno]->init_count));
 }
 
+/**
+ * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function removes the first hbq buffer on an hbq list and returns a
+ * pointer to that buffer. If it finds no buffers on the list it returns NULL.
+ **/
+static struct hbq_dmabuf *
+lpfc_sli_hbqbuf_get(struct list_head *rb_list)
+{
+	struct lpfc_dmabuf *d_buf;
+
+	list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
+	if (!d_buf)
+		return NULL;
+	return container_of(d_buf, struct hbq_dmabuf, dbuf);
+}
+
 /**
  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
  * @phba: Pointer to HBA context object.
@@ -985,12 +1040,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
 	if (hbqno >= LPFC_MAX_HBQS)
 		return NULL;
 
+	spin_lock_irq(&phba->hbalock);
 	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
 		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
 		if (hbq_buf->tag == tag) {
+			spin_unlock_irq(&phba->hbalock);
 			return hbq_buf;
 		}
 	}
+	spin_unlock_irq(&phba->hbalock);
 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
 			"1803 Bad hbq tag. Data: x%x x%x\n",
 			tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1071,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
 
 	if (hbq_buffer) {
 		hbqno = hbq_buffer->tag >> 16;
-		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
+		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
-		}
 	}
 }
 
@@ -1317,6 +1374,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
 	return &hbq_entry->dbuf;
 }
 
+/**
+ * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
+ * @fch_r_ctl: the r_ctl for the first frame of the sequence.
+ * @fch_type: the type for the first frame of the sequence.
+ *
+ * This function is called with no lock held. This function uses the r_ctl and
+ * type of the received sequence to find the correct callback function to call
+ * to process the sequence.
+ **/
+static int
+lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
+			 uint32_t fch_type)
+{
+	int i;
+
+	/* unSolicited Responses */
+	if (pring->prt[0].profile) {
+		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
+			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
+									saveq);
+		return 1;
+	}
+	/* We must search, based on rctl / type
+	   for the right routine */
+	for (i = 0; i < pring->num_mask; i++) {
+		if ((pring->prt[i].rctl == fch_r_ctl) &&
+		    (pring->prt[i].type == fch_type)) {
+			if (pring->prt[i].lpfc_sli_rcv_unsol_event)
+				(pring->prt[i].lpfc_sli_rcv_unsol_event)
+						(phba, pring, saveq);
+			return 1;
+		}
+	}
+	return 0;
+}
 
 /**
  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1435,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	IOCB_t           * irsp;
 	WORD5            * w5p;
 	uint32_t           Rctl, Type;
-	uint32_t           match, i;
+	uint32_t           match;
 	struct lpfc_iocbq *iocbq;
 	struct lpfc_dmabuf *dmzbuf;
 
@@ -1482,35 +1578,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		}
 	}
 
-	/* unSolicited Responses */
-	if (pring->prt[0].profile) {
-		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
-			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
-									saveq);
-		match = 1;
-	} else {
-		/* We must search, based on rctl / type
-		   for the right routine */
-		for (i = 0; i < pring->num_mask; i++) {
-			if ((pring->prt[i].rctl == Rctl)
-			    && (pring->prt[i].type == Type)) {
-				if (pring->prt[i].lpfc_sli_rcv_unsol_event)
-					(pring->prt[i].lpfc_sli_rcv_unsol_event)
-							(phba, pring, saveq);
-				match = 1;
-				break;
-			}
-		}
-	}
-	if (match == 0) {
-		/* Unexpected Rctl / Type received */
-		/* Ring <ringno> handler: unexpected
-		   Rctl <Rctl> Type <Type> received */
+	if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 				"0313 Ring %d handler: unexpected Rctl x%x "
 				"Type x%x received\n",
 				pring->ringno, Rctl, Type);
-	}
+
 	return 1;
 }
 
@@ -1551,6 +1624,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
 	return NULL;
 }
 
+/**
+ * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iotag: IOCB tag.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given iotag. This function is called with the
+ * hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
+			     struct lpfc_sli_ring *pring, uint16_t iotag)
+{
+	struct lpfc_iocbq *cmd_iocb;
+
+	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
+		cmd_iocb = phba->sli.iocbq_lookup[iotag];
+		list_del_init(&cmd_iocb->list);
+		pring->txcmplq_cnt--;
+		return cmd_iocb;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0372 iotag x%x is out off range: max iotag (x%x)\n",
+			iotag, phba->sli.last_iotag);
+	return NULL;
+}
+
 /**
  * lpfc_sli_process_sol_iocb - process solicited iocb completion
  * @phba: Pointer to HBA context object.
@@ -1954,7 +2058,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
 			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
 				(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
 				spin_unlock_irqrestore(&phba->hbalock, iflag);
-				lpfc_rampdown_queue_depth(phba);
+				phba->lpfc_rampdown_queue_depth(phba);
 				spin_lock_irqsave(&phba->hbalock, iflag);
 			}
 
@@ -2068,39 +2172,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
 }
 
 /**
- * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings
+ * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @rspiocbp: Pointer to driver response IOCB object.
+ *
+ * This function is called from the worker thread when there is a slow-path
+ * response IOCB to process. This function chains all the response iocbs until
+ * seeing the iocb with the LE bit set. The function will call
+ * lpfc_sli_process_sol_iocb function if the response iocb indicates a
+ * completion of a command iocb. The function will call the
+ * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
+ * The function frees the resources or calls the completion handler if this
+ * iocb is an abort completion. The function returns NULL when the response
+ * iocb has the LE bit set and all the chained iocbs are processed, otherwise
+ * this function shall chain the iocb on to the iocb_continueq and return the
+ * response iocb passed in.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			struct lpfc_iocbq *rspiocbp)
+{
+	struct lpfc_iocbq *saveq;
+	struct lpfc_iocbq *cmdiocbp;
+	struct lpfc_iocbq *next_iocb;
+	IOCB_t *irsp = NULL;
+	uint32_t free_saveq;
+	uint8_t iocb_cmd_type;
+	lpfc_iocb_type type;
+	unsigned long iflag;
+	int rc;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	/* First add the response iocb to the countinueq list */
+	list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+	pring->iocb_continueq_cnt++;
+
+	/* Now, determine whetehr the list is completed for processing */
+	irsp = &rspiocbp->iocb;
+	if (irsp->ulpLe) {
+		/*
+		 * By default, the driver expects to free all resources
+		 * associated with this iocb completion.
+		 */
+		free_saveq = 1;
+		saveq = list_get_first(&pring->iocb_continueq,
+				       struct lpfc_iocbq, list);
+		irsp = &(saveq->iocb);
+		list_del_init(&pring->iocb_continueq);
+		pring->iocb_continueq_cnt = 0;
+
+		pring->stats.iocb_rsp++;
+
+		/*
+		 * If resource errors reported from HBA, reduce
+		 * queuedepths of the SCSI device.
+		 */
+		if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+		    (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			phba->lpfc_rampdown_queue_depth(phba);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+		}
+
+		if (irsp->ulpStatus) {
+			/* Rsp ring <ringno> error: IOCB */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0328 Rsp Ring %d error: "
+					"IOCB Data: "
+					"x%x x%x x%x x%x "
+					"x%x x%x x%x x%x "
+					"x%x x%x x%x x%x "
+					"x%x x%x x%x x%x\n",
+					pring->ringno,
+					irsp->un.ulpWord[0],
+					irsp->un.ulpWord[1],
+					irsp->un.ulpWord[2],
+					irsp->un.ulpWord[3],
+					irsp->un.ulpWord[4],
+					irsp->un.ulpWord[5],
+					*(((uint32_t *) irsp) + 6),
+					*(((uint32_t *) irsp) + 7),
+					*(((uint32_t *) irsp) + 8),
+					*(((uint32_t *) irsp) + 9),
+					*(((uint32_t *) irsp) + 10),
+					*(((uint32_t *) irsp) + 11),
+					*(((uint32_t *) irsp) + 12),
+					*(((uint32_t *) irsp) + 13),
+					*(((uint32_t *) irsp) + 14),
+					*(((uint32_t *) irsp) + 15));
+		}
+
+		/*
+		 * Fetch the IOCB command type and call the correct completion
+		 * routine. Solicited and Unsolicited IOCBs on the ELS ring
+		 * get freed back to the lpfc_iocb_list by the discovery
+		 * kernel thread.
+		 */
+		iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
+		type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
+		switch (type) {
+		case LPFC_SOL_IOCB:
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+			break;
+
+		case LPFC_UNSOL_IOCB:
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+			if (!rc)
+				free_saveq = 0;
+			break;
+
+		case LPFC_ABORT_IOCB:
+			cmdiocbp = NULL;
+			if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
+				cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
+								 saveq);
+			if (cmdiocbp) {
+				/* Call the specified completion routine */
+				if (cmdiocbp->iocb_cmpl) {
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
+					(cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
+							      saveq);
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
+				} else
+					__lpfc_sli_release_iocbq(phba,
+								 cmdiocbp);
+			}
+			break;
+
+		case LPFC_UNKNOWN_IOCB:
+			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+				char adaptermsg[LPFC_MAX_ADPTMSG];
+				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+				memcpy(&adaptermsg[0], (uint8_t *)irsp,
+				       MAX_MSG_DATA);
+				dev_warn(&((phba->pcidev)->dev),
+					 "lpfc%d: %s\n",
+					 phba->brd_no, adaptermsg);
+			} else {
+				/* Unknown IOCB command */
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"0335 Unknown IOCB "
+						"command Data: x%x "
+						"x%x x%x x%x\n",
+						irsp->ulpCommand,
+						irsp->ulpStatus,
+						irsp->ulpIoTag,
+						irsp->ulpContext);
+			}
+			break;
+		}
+
+		if (free_saveq) {
+			list_for_each_entry_safe(rspiocbp, next_iocb,
+						 &saveq->list, list) {
+				list_del(&rspiocbp->list);
+				__lpfc_sli_release_iocbq(phba, rspiocbp);
+			}
+			__lpfc_sli_release_iocbq(phba, saveq);
+		}
+		rspiocbp = NULL;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return rspiocbp;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
  * @phba: Pointer to HBA context object.
  * @pring: Pointer to driver SLI ring object.
  * @mask: Host attention register mask for this ring.
  *
- * This function is called from the worker thread when there is a ring
- * event for non-fcp rings. The caller does not hold any lock .
- * The function processes each response iocb in the response ring until it
- * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
- * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
- * response iocb indicates a completion of a command iocb. The function
- * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
- * iocb. The function frees the resources or calls the completion handler if
- * this iocb is an abort completion. The function returns 0 when the allocated
- * iocbs are not freed, otherwise returns 1.
+ * This routine wraps the actual slow_ring event process routine from the
+ * API jump table function pointer from the lpfc_hba struct.
  **/
-int
+void
 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
 				struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a ring event
+ * for non-fcp rings. The caller does not hold any lock. The function will
+ * remove each response iocb in the response ring and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
+				   struct lpfc_sli_ring *pring, uint32_t mask)
 {
 	struct lpfc_pgp *pgp;
 	IOCB_t *entry;
 	IOCB_t *irsp = NULL;
 	struct lpfc_iocbq *rspiocbp = NULL;
-	struct lpfc_iocbq *next_iocb;
-	struct lpfc_iocbq *cmdiocbp;
-	struct lpfc_iocbq *saveq;
-	uint8_t iocb_cmd_type;
-	lpfc_iocb_type type;
-	uint32_t status, free_saveq;
 	uint32_t portRspPut, portRspMax;
-	int rc = 1;
 	unsigned long iflag;
+	uint32_t status;
 
 	pgp = &phba->port_gp[pring->ringno];
 	spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2408,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
 		phba->work_hs = HS_FFER3;
 		lpfc_handle_eratt(phba);
 
-		return 1;
+		return;
 	}
 
 	rmb();
@@ -2173,138 +2453,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
 
 		writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
 
-		list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
-
-		pring->iocb_continueq_cnt++;
-		if (irsp->ulpLe) {
-			/*
-			 * By default, the driver expects to free all resources
-			 * associated with this iocb completion.
-			 */
-			free_saveq = 1;
-			saveq = list_get_first(&pring->iocb_continueq,
-					       struct lpfc_iocbq, list);
-			irsp = &(saveq->iocb);
-			list_del_init(&pring->iocb_continueq);
-			pring->iocb_continueq_cnt = 0;
-
-			pring->stats.iocb_rsp++;
-
-			/*
-			 * If resource errors reported from HBA, reduce
-			 * queuedepths of the SCSI device.
-			 */
-			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-			     (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
-				spin_unlock_irqrestore(&phba->hbalock, iflag);
-				lpfc_rampdown_queue_depth(phba);
-				spin_lock_irqsave(&phba->hbalock, iflag);
-			}
-
-			if (irsp->ulpStatus) {
-				/* Rsp ring <ringno> error: IOCB */
-				lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
-						"0328 Rsp Ring %d error: "
-						"IOCB Data: "
-						"x%x x%x x%x x%x "
-						"x%x x%x x%x x%x "
-						"x%x x%x x%x x%x "
-						"x%x x%x x%x x%x\n",
-						pring->ringno,
-						irsp->un.ulpWord[0],
-						irsp->un.ulpWord[1],
-						irsp->un.ulpWord[2],
-						irsp->un.ulpWord[3],
-						irsp->un.ulpWord[4],
-						irsp->un.ulpWord[5],
-						*(((uint32_t *) irsp) + 6),
-						*(((uint32_t *) irsp) + 7),
-						*(((uint32_t *) irsp) + 8),
-						*(((uint32_t *) irsp) + 9),
-						*(((uint32_t *) irsp) + 10),
-						*(((uint32_t *) irsp) + 11),
-						*(((uint32_t *) irsp) + 12),
-						*(((uint32_t *) irsp) + 13),
-						*(((uint32_t *) irsp) + 14),
-						*(((uint32_t *) irsp) + 15));
-			}
-
-			/*
-			 * Fetch the IOCB command type and call the correct
-			 * completion routine.  Solicited and Unsolicited
-			 * IOCBs on the ELS ring get freed back to the
-			 * lpfc_iocb_list by the discovery kernel thread.
-			 */
-			iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
-			type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
-			if (type == LPFC_SOL_IOCB) {
-				spin_unlock_irqrestore(&phba->hbalock, iflag);
-				rc = lpfc_sli_process_sol_iocb(phba, pring,
-							       saveq);
-				spin_lock_irqsave(&phba->hbalock, iflag);
-			} else if (type == LPFC_UNSOL_IOCB) {
-				spin_unlock_irqrestore(&phba->hbalock, iflag);
-				rc = lpfc_sli_process_unsol_iocb(phba, pring,
-								 saveq);
-				spin_lock_irqsave(&phba->hbalock, iflag);
-				if (!rc)
-					free_saveq = 0;
-			} else if (type == LPFC_ABORT_IOCB) {
-				if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
-				    ((cmdiocbp =
-				      lpfc_sli_iocbq_lookup(phba, pring,
-							    saveq)))) {
-					/* Call the specified completion
-					   routine */
-					if (cmdiocbp->iocb_cmpl) {
-						spin_unlock_irqrestore(
-						       &phba->hbalock,
-						       iflag);
-						(cmdiocbp->iocb_cmpl) (phba,
-							     cmdiocbp, saveq);
-						spin_lock_irqsave(
-							  &phba->hbalock,
-							  iflag);
-					} else
-						__lpfc_sli_release_iocbq(phba,
-								      cmdiocbp);
-				}
-			} else if (type == LPFC_UNKNOWN_IOCB) {
-				if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
-
-					char adaptermsg[LPFC_MAX_ADPTMSG];
-
-					memset(adaptermsg, 0,
-					       LPFC_MAX_ADPTMSG);
-					memcpy(&adaptermsg[0], (uint8_t *) irsp,
-					       MAX_MSG_DATA);
-					dev_warn(&((phba->pcidev)->dev),
-						 "lpfc%d: %s\n",
-						 phba->brd_no, adaptermsg);
-				} else {
-					/* Unknown IOCB command */
-					lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-							"0335 Unknown IOCB "
-							"command Data: x%x "
-							"x%x x%x x%x\n",
-							irsp->ulpCommand,
-							irsp->ulpStatus,
-							irsp->ulpIoTag,
-							irsp->ulpContext);
-				}
-			}
-
-			if (free_saveq) {
-				list_for_each_entry_safe(rspiocbp, next_iocb,
-							 &saveq->list, list) {
-					list_del(&rspiocbp->list);
-					__lpfc_sli_release_iocbq(phba,
-								 rspiocbp);
-				}
-				__lpfc_sli_release_iocbq(phba, saveq);
-			}
-			rspiocbp = NULL;
-		}
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		/* Handle the response IOCB */
+		rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
+		spin_lock_irqsave(&phba->hbalock, iflag);
 
 		/*
 		 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2490,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
 	}
 
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
-	return rc;
+	return;
 }
 
 /**
@@ -2420,7 +2572,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli_brdready - Check for host status bits
+ * lpfc_sli_brdready_s3 - Check for sli3 host ready status
  * @phba: Pointer to HBA context object.
  * @mask: Bit mask to be checked.
  *
@@ -2432,8 +2584,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
  * function returns 1 when HBA fail to restart otherwise returns
  * zero.
  **/
-int
-lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
+static int
+lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
 {
 	uint32_t status;
 	int i = 0;
@@ -2647,7 +2799,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli_brdreset - Reset the HBA
+ * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
  * @phba: Pointer to HBA context object.
  *
  * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +2835,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
 			      (cfg_value &
 			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
 
-	psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
+	psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
+
 	/* Now toggle INITFF bit in the Host Control Register */
 	writel(HC_INITFF, phba->HCregaddr);
 	mdelay(1);
@@ -3289,32 +3442,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
 
 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
 			"0345 Resetting board due to mailbox timeout\n");
-	/*
-	 * lpfc_offline calls lpfc_sli_hba_down which will clean up
-	 * on oustanding mailbox commands.
-	 */
-	/* If resets are disabled then set error state and return. */
-	if (!phba->cfg_enable_hba_reset) {
-		phba->link_state = LPFC_HBA_ERROR;
-		return;
-	}
-	lpfc_offline_prep(phba);
-	lpfc_offline(phba);
-	lpfc_sli_brdrestart(phba);
-	lpfc_online(phba);
-	lpfc_unblock_mgmt_io(phba);
-	return;
+
+	/* Reset the HBA device */
+	lpfc_reset_hba(phba);
 }
 
 /**
- * lpfc_sli_issue_mbox - Issue a mailbox command to firmware
+ * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
  * @phba: Pointer to HBA context object.
  * @pmbox: Pointer to mailbox object.
  * @flag: Flag indicating how the mailbox need to be processed.
  *
  * This function is called by discovery code and HBA management code
- * to submit a mailbox command to firmware. This function gets the
- * hbalock to protect the data structures.
+ * to submit a mailbox command to firmware with SLI-3 interface spec. This
+ * function gets the hbalock to protect the data structures.
  * The mailbox command can be submitted in polling mode, in which case
  * this function will wait in a polling loop for the completion of the
  * mailbox.
@@ -3332,8 +3473,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
  * return codes the caller owns the mailbox command after the return of
  * the function.
  **/
-int
-lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
+static int
+lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
+		       uint32_t flag)
 {
 	MAILBOX_t *mb;
 	struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +3491,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
 	if (!pmbox) {
 		/* processing mbox queue from intr_handler */
+		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			return MBX_SUCCESS;
+		}
 		processing_queue = 1;
 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
 		pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +3511,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 			lpfc_printf_log(phba, KERN_ERR,
 					LOG_MBOX | LOG_VPORT,
 					"1806 Mbox x%x failed. No vport\n",
-					pmbox->mb.mbxCommand);
+					pmbox->u.mb.mbxCommand);
 			dump_stack();
 			goto out_not_finished;
 		}
@@ -3385,21 +3531,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 
 	psli = &phba->sli;
 
-	mb = &pmbox->mb;
+	mb = &pmbox->u.mb;
 	status = MBX_SUCCESS;
 
 	if (phba->link_state == LPFC_HBA_ERROR) {
 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 
 		/* Mbox command <mbxCommand> cannot issue */
-		LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):0311 Mailbox command x%x cannot "
+				"issue Data: x%x x%x\n",
+				pmbox->vport ? pmbox->vport->vpi : 0,
+				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
 		goto out_not_finished;
 	}
 
 	if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
 	    !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
-		LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2528 Mailbox command x%x cannot "
+				"issue Data: x%x x%x\n",
+				pmbox->vport ? pmbox->vport->vpi : 0,
+				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
 		goto out_not_finished;
 	}
 
@@ -3413,14 +3567,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 
 			/* Mbox command <mbxCommand> cannot issue */
-			LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2529 Mailbox command x%x "
+					"cannot issue Data: x%x x%x\n",
+					pmbox->vport ? pmbox->vport->vpi : 0,
+					pmbox->u.mb.mbxCommand,
+					psli->sli_flag, flag);
 			goto out_not_finished;
 		}
 
-		if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
+		if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 			/* Mbox command <mbxCommand> cannot issue */
-			LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2530 Mailbox command x%x "
+					"cannot issue Data: x%x x%x\n",
+					pmbox->vport ? pmbox->vport->vpi : 0,
+					pmbox->u.mb.mbxCommand,
+					psli->sli_flag, flag);
 			goto out_not_finished;
 		}
 
@@ -3462,12 +3626,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 
 	/* If we are not polling, we MUST be in SLI2 mode */
 	if (flag != MBX_POLL) {
-		if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
+		if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
 		    (mb->mbxCommand != MBX_KILL_BOARD)) {
 			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 			/* Mbox command <mbxCommand> cannot issue */
-			LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2531 Mailbox command x%x "
+					"cannot issue Data: x%x x%x\n",
+					pmbox->vport ? pmbox->vport->vpi : 0,
+					pmbox->u.mb.mbxCommand,
+					psli->sli_flag, flag);
 			goto out_not_finished;
 		}
 		/* timeout active mbox command */
@@ -3506,7 +3675,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 	/* next set own bit for the adapter and copy over command word */
 	mb->mbxOwner = OWN_CHIP;
 
-	if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
 		/* First copy command data to host SLIM area */
 		lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
 	} else {
@@ -3529,7 +3698,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 
 		if (mb->mbxCommand == MBX_CONFIG_PORT) {
 			/* switch over to host mailbox */
-			psli->sli_flag |= LPFC_SLI2_ACTIVE;
+			psli->sli_flag |= LPFC_SLI_ACTIVE;
 		}
 	}
 
@@ -3552,7 +3721,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 		writel(CA_MBATT, phba->CAregaddr);
 		readl(phba->CAregaddr); /* flush */
 
-		if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
 			/* First read mbox status word */
 			word0 = *((uint32_t *)phba->mbox);
 			word0 = le32_to_cpu(word0);
@@ -3591,7 +3760,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 				spin_lock_irqsave(&phba->hbalock, drvr_flag);
 			}
 
-			if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+			if (psli->sli_flag & LPFC_SLI_ACTIVE) {
 				/* First copy command data */
 				word0 = *((uint32_t *)phba->mbox);
 				word0 = le32_to_cpu(word0);
@@ -3604,7 +3773,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
 					    && slimmb->mbxStatus) {
 						psli->sli_flag &=
-						    ~LPFC_SLI2_ACTIVE;
+						    ~LPFC_SLI_ACTIVE;
 						word0 = slimword0;
 					}
 				}
@@ -3616,7 +3785,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 			ha_copy = readl(phba->HAregaddr);
 		}
 
-		if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
 			/* copy results back to user */
 			lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
 		} else {
@@ -3701,35 +3870,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 }
 
 /**
- * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb
+ * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
  * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
+ * @ring_number: SLI ring number to issue iocb on.
  * @piocb: Pointer to command iocb.
  * @flag: Flag indicating if this command can be put into txq.
  *
- * __lpfc_sli_issue_iocb is used by other functions in the driver
- * to issue an iocb command to the HBA. If the PCI slot is recovering
- * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT
- * flag is turned on, the function returns IOCB_ERROR.
- * When the link is down, this function allows only iocbs for
- * posting buffers.
- * This function finds next available slot in the command ring and
- * posts the command to the available slot and writes the port
- * attention register to request HBA start processing new iocb.
- * If there is no slot available in the ring and
- * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the
- * txq, otherwise the function returns IOCB_BUSY.
+ * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
+ * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
+ * flag is turned on, the function returns IOCB_ERROR. When the link is down,
+ * this function allows only iocbs for posting buffers. This function finds
+ * next available slot in the command ring and posts the command to the
+ * available slot and writes the port attention register to request HBA start
+ * processing new iocb. If there is no slot available in the ring and
+ * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
+ * the function returns IOCB_BUSY.
  *
- * This function is called with hbalock held.
- * The function will return success after it successfully submit the
- * iocb to firmware or after adding to the txq.
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
  **/
 static int
-__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
 		    struct lpfc_iocbq *piocb, uint32_t flag)
 {
 	struct lpfc_iocbq *nextiocb;
 	IOCB_t *iocb;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
 
 	if (piocb->iocb_cmpl && (!piocb->vport) &&
 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +4001,52 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	return IOCB_BUSY;
 }
 
+/**
+ * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
+ *
+ * This routine wraps the actual lockless version for issusing IOCB function
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * 	IOCB_ERROR - Error
+ * 	IOCB_SUCCESS - Success
+ * 	IOCB_BUSY - Busy
+ **/
+static inline int
+__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+		struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+}
+
+/**
+ * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SLI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
+		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1419 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
+	return 0;
+}
 
 /**
  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +4062,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  * functions which do not hold hbalock.
  **/
 int
-lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
 		    struct lpfc_iocbq *piocb, uint32_t flag)
 {
 	unsigned long iflags;
 	int rc;
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
-	rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
+	rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
 
 	return rc;
@@ -5077,53 +5291,104 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
 }
 
 /**
- * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function
+ * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
  * @phba: Pointer to HBA context.
  *
- * This function is called to cleanup any pending mailbox
- * objects in the driver queue before bringing the HBA offline.
- * This function is called while resetting the HBA.
- * The function is called without any lock held. The function
- * takes hbalock to update SLI data structure.
- * This function returns 1 when there is an active mailbox
- * command pending else returns 0.
+ * This function is called to shutdown the driver's mailbox sub-system.
+ * It first marks the mailbox sub-system is in a block state to prevent
+ * the asynchronous mailbox command from issued off the pending mailbox
+ * command queue. If the mailbox command sub-system shutdown is due to
+ * HBA error conditions such as EEH or ERATT, this routine shall invoke
+ * the mailbox sub-system flush routine to forcefully bring down the
+ * mailbox sub-system. Otherwise, if it is due to normal condition (such
+ * as with offline or HBA function reset), this routine will wait for the
+ * outstanding mailbox command to complete before invoking the mailbox
+ * sub-system flush routine to gracefully bring down mailbox sub-system.
  **/
-int
-lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
+void
+lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
 {
-	struct lpfc_vport *vport = phba->pport;
-	int i = 0;
-	uint32_t ha_copy;
+	struct lpfc_sli *psli = &phba->sli;
+	uint8_t actcmd = MBX_HEARTBEAT;
+	unsigned long timeout;
 
-	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
-		if (i++ > LPFC_MBOX_TMO * 1000)
-			return 1;
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+	spin_unlock_irq(&phba->hbalock);
 
-		/*
-		 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
-		 * did finish. This way we won't get the misleading
-		 * "Stray Mailbox Interrupt" message.
-		 */
+	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
 		spin_lock_irq(&phba->hbalock);
-		ha_copy = phba->work_ha;
-		phba->work_ha &= ~HA_MBATT;
+		if (phba->sli.mbox_active)
+			actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
 		spin_unlock_irq(&phba->hbalock);
+		/* Determine how long we might wait for the active mailbox
+		 * command to be gracefully completed by firmware.
+		 */
+		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
+					   1000) + jiffies;
+		while (phba->sli.mbox_active) {
+			/* Check active mailbox complete status every 2ms */
+			msleep(2);
+			if (time_after(jiffies, timeout))
+				/* Timeout, let the mailbox flush routine to
+				 * forcefully release active mailbox command
+				 */
+				break;
+		}
+	}
+	lpfc_sli_mbox_sys_flush(phba);
+}
 
-		if (ha_copy & HA_MBATT)
-			if (lpfc_sli_handle_mb_event(phba) == 0)
-				i = 0;
+/**
+ * lpfc_sli_eratt_read - read sli-3 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI3 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli_eratt_read(struct lpfc_hba *phba)
+{
+	uint32_t ha_copy;
 
-		msleep(1);
-	}
+	/* Read chip Host Attention (HA) register */
+	ha_copy = readl(phba->HAregaddr);
+	if (ha_copy & HA_ERATT) {
+		/* Read host status register to retrieve error event */
+		lpfc_sli_read_hs(phba);
 
-	return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
+		/* Check if there is a deferred error condition is active */
+		if ((HS_FFER1 & phba->work_hs) &&
+		    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
+		     HS_FFER6 | HS_FFER7) & phba->work_hs)) {
+			spin_lock_irq(&phba->hbalock);
+			phba->hba_flag |= DEFER_ERATT;
+			spin_unlock_irq(&phba->hbalock);
+			/* Clear all interrupt enable conditions */
+			writel(0, phba->HCregaddr);
+			readl(phba->HCregaddr);
+		}
+
+		/* Set the driver HA work bitmap */
+		spin_lock_irq(&phba->hbalock);
+		phba->work_ha |= HA_ERATT;
+		/* Indicate polling handles this ERATT */
+		phba->hba_flag |= HBA_ERATT_HANDLED;
+		spin_unlock_irq(&phba->hbalock);
+		return 1;
+	}
+	return 0;
 }
 
 /**
  * lpfc_sli_check_eratt - check error attention events
  * @phba: Pointer to HBA context.
  *
- * This function is called form timer soft interrupt context to check HBA's
+ * This function is called from timer soft interrupt context to check HBA's
  * error attention register bit for error attention events.
  *
  * This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +5399,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
 {
 	uint32_t ha_copy;
 
-	/* If PCI channel is offline, don't process it */
-	if (unlikely(pci_channel_offline(phba->pcidev)))
-		return 0;
-
 	/* If somebody is waiting to handle an eratt, don't process it
 	 * here. The brdkill function will do this.
 	 */
@@ -5161,56 +5422,80 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
 		return 0;
 	}
 
-	/* Read chip Host Attention (HA) register */
-	ha_copy = readl(phba->HAregaddr);
-	if (ha_copy & HA_ERATT) {
-		/* Read host status register to retrieve error event */
-		lpfc_sli_read_hs(phba);
-
-		/* Check if there is a deferred error condition is active */
-		if ((HS_FFER1 & phba->work_hs) &&
-			((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
-			HS_FFER6 | HS_FFER7) & phba->work_hs)) {
-			phba->hba_flag |= DEFER_ERATT;
-			/* Clear all interrupt enable conditions */
-			writel(0, phba->HCregaddr);
-			readl(phba->HCregaddr);
-		}
-
-		/* Set the driver HA work bitmap */
-		phba->work_ha |= HA_ERATT;
-		/* Indicate polling handles this ERATT */
-		phba->hba_flag |= HBA_ERATT_HANDLED;
+	/* If PCI channel is offline, don't process it */
+	if (unlikely(pci_channel_offline(phba->pcidev))) {
 		spin_unlock_irq(&phba->hbalock);
-		return 1;
+		return 0;
+	}
+
+	switch (phba->sli_rev) {
+	case LPFC_SLI_REV2:
+	case LPFC_SLI_REV3:
+		/* Read chip Host Attention (HA) register */
+		ha_copy = lpfc_sli_eratt_read(phba);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0299 Invalid SLI revision (%d)\n",
+				phba->sli_rev);
+		ha_copy = 0;
+		break;
 	}
 	spin_unlock_irq(&phba->hbalock);
+
+	return ha_copy;
+}
+
+/**
+ * lpfc_intr_state_check - Check device state for interrupt handling
+ * @phba: Pointer to HBA context.
+ *
+ * This inline routine checks whether a device or its PCI slot is in a state
+ * that the interrupt should be handled.
+ *
+ * This function returns 0 if the device or the PCI slot is in a state that
+ * interrupt should be handled, otherwise -EIO.
+ */
+static inline int
+lpfc_intr_state_check(struct lpfc_hba *phba)
+{
+	/* If the pci channel is offline, ignore all the interrupts */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return -EIO;
+
+	/* Update device level interrupt statistics */
+	phba->sli.slistat.sli_intr++;
+
+	/* Ignore all interrupts during initialization. */
+	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+		return -EIO;
+
 	return 0;
 }
 
 /**
- * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver
+ * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
  * @irq: Interrupt number.
  * @dev_id: The device context pointer.
  *
  * This function is directly called from the PCI layer as an interrupt
- * service routine when the device is enabled with MSI-X multi-message
- * interrupt mode and there are slow-path events in the HBA. However,
- * when the device is enabled with either MSI or Pin-IRQ interrupt mode,
- * this function is called as part of the device-level interrupt handler.
- * When the PCI slot is in error recovery or the HBA is undergoing
- * initialization, the interrupt handler will not process the interrupt.
- * The link attention and ELS ring attention events are handled by the
- * worker thread. The interrupt handler signals the worker thread and
- * and returns for these events. This function is called without any
- * lock held. It gets the hbalock to access and update SLI data
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA
+ * is undergoing initialization, the interrupt handler will not process
+ * the interrupt. The link attention and ELS ring attention events are
+ * handled by the worker thread. The interrupt handler signals the worker
+ * thread and returns for these events. This function is called without
+ * any lock held. It gets the hbalock to access and update SLI data
  * structures.
  *
  * This function returns IRQ_HANDLED when interrupt is handled else it
  * returns IRQ_NONE.
  **/
 irqreturn_t
-lpfc_sp_intr_handler(int irq, void *dev_id)
+lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 {
 	struct lpfc_hba  *phba;
 	uint32_t ha_copy;
@@ -5240,13 +5525,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 	 * individual interrupt handler in MSI-X multi-message interrupt mode
 	 */
 	if (phba->intr_type == MSIX) {
-		/* If the pci channel is offline, ignore all the interrupts */
-		if (unlikely(pci_channel_offline(phba->pcidev)))
-			return IRQ_NONE;
-		/* Update device-level interrupt statistics */
-		phba->sli.slistat.sli_intr++;
-		/* Ignore all interrupts during initialization. */
-		if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+		/* Check device state for handling interrupt */
+		if (lpfc_intr_state_check(phba))
 			return IRQ_NONE;
 		/* Need to read HA REG for slow-path events */
 		spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +5551,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 		 * interrupt.
 		 */
 		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
-			spin_unlock_irq(&phba->hbalock);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
 			return IRQ_NONE;
 		}
 
@@ -5434,7 +5714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 							LOG_MBOX | LOG_SLI,
 							"0350 rc should have"
 							"been MBX_BUSY");
-						goto send_current_mbox;
+						if (rc != MBX_NOT_FINISHED)
+							goto send_current_mbox;
 					}
 				}
 				spin_lock_irqsave(
@@ -5471,29 +5752,29 @@ send_current_mbox:
 	}
 	return IRQ_HANDLED;
 
-} /* lpfc_sp_intr_handler */
+} /* lpfc_sli_sp_intr_handler */
 
 /**
- * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver
+ * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
  * @irq: Interrupt number.
  * @dev_id: The device context pointer.
  *
  * This function is directly called from the PCI layer as an interrupt
- * service routine when the device is enabled with MSI-X multi-message
- * interrupt mode and there is a fast-path FCP IOCB ring event in the
- * HBA. However, when the device is enabled with either MSI or Pin-IRQ
- * interrupt mode, this function is called as part of the device-level
- * interrupt handler. When the PCI slot is in error recovery or the HBA
- * is undergoing initialization, the interrupt handler will not process
- * the interrupt. The SCSI FCP fast-path ring event are handled in the
- * intrrupt context. This function is called without any lock held. It
- * gets the hbalock to access and update SLI data structures.
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures.
  *
  * This function returns IRQ_HANDLED when interrupt is handled else it
  * returns IRQ_NONE.
  **/
 irqreturn_t
-lpfc_fp_intr_handler(int irq, void *dev_id)
+lpfc_sli_fp_intr_handler(int irq, void *dev_id)
 {
 	struct lpfc_hba  *phba;
 	uint32_t ha_copy;
@@ -5513,13 +5794,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
 	 * individual interrupt handler in MSI-X multi-message interrupt mode
 	 */
 	if (phba->intr_type == MSIX) {
-		/* If pci channel is offline, ignore all the interrupts */
-		if (unlikely(pci_channel_offline(phba->pcidev)))
-			return IRQ_NONE;
-		/* Update device-level interrupt statistics */
-		phba->sli.slistat.sli_intr++;
-		/* Ignore all interrupts during initialization. */
-		if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+		/* Check device state for handling interrupt */
+		if (lpfc_intr_state_check(phba))
 			return IRQ_NONE;
 		/* Need to read HA REG for FCP ring and other ring events */
 		ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +5806,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
 		 * any interrupt.
 		 */
 		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
-			spin_unlock_irq(&phba->hbalock);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
 			return IRQ_NONE;
 		}
 		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +5842,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
 		}
 	}
 	return IRQ_HANDLED;
-}  /* lpfc_fp_intr_handler */
+}  /* lpfc_sli_fp_intr_handler */
 
 /**
- * lpfc_intr_handler - The device-level interrupt handler of lpfc driver
+ * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
  * @irq: Interrupt number.
  * @dev_id: The device context pointer.
  *
- * This function is the device-level interrupt handler called from the PCI
- * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is
- * an event in the HBA which requires driver attention. This function
- * invokes the slow-path interrupt attention handling function and fast-path
- * interrupt attention handling function in turn to process the relevant
- * HBA attention events. This function is called without any lock held. It
- * gets the hbalock to access and update SLI data structures.
+ * This function is the HBA device-level interrupt handler to device with
+ * SLI-3 interface spec, called from the PCI layer when either MSI or
+ * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
+ * requires driver attention. This function invokes the slow-path interrupt
+ * attention handling function and fast-path interrupt attention handling
+ * function in turn to process the relevant HBA attention events. This
+ * function is called without any lock held. It gets the hbalock to access
+ * and update SLI data structures.
  *
  * This function returns IRQ_HANDLED when interrupt is handled, else it
  * returns IRQ_NONE.
  **/
 irqreturn_t
-lpfc_intr_handler(int irq, void *dev_id)
+lpfc_sli_intr_handler(int irq, void *dev_id)
 {
 	struct lpfc_hba  *phba;
 	irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +5877,8 @@ lpfc_intr_handler(int irq, void *dev_id)
 	if (unlikely(!phba))
 		return IRQ_NONE;
 
-	/* If the pci channel is offline, ignore all the interrupts. */
-	if (unlikely(pci_channel_offline(phba->pcidev)))
-		return IRQ_NONE;
-
-	/* Update device level interrupt statistics */
-	phba->sli.slistat.sli_intr++;
-
-	/* Ignore all interrupts during initialization. */
-	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+	/* Check device state for handling interrupt */
+	if (lpfc_intr_state_check(phba))
 		return IRQ_NONE;
 
 	spin_lock(&phba->hbalock);
@@ -5650,7 +5920,7 @@ lpfc_intr_handler(int irq, void *dev_id)
 	status2 >>= (4*LPFC_ELS_RING);
 
 	if (status1 || (status2 & HA_RXMASK))
-		sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id);
+		sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
 	else
 		sp_irq_rc = IRQ_NONE;
 
@@ -5670,10 +5940,10 @@ lpfc_intr_handler(int irq, void *dev_id)
 		status2 = 0;
 
 	if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
-		fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id);
+		fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
 	else
 		fp_irq_rc = IRQ_NONE;
 
 	/* Return device-level interrupt handling status */
 	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
-}  /* lpfc_intr_handler */
+}  /* lpfc_sli_intr_handler */
-- 
cgit v1.2.3-70-g09d2


From da0436e915a5c17ee79e72c1bf978a4ebb1cbf4d Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Fri, 22 May 2009 14:51:39 -0400
Subject: [SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - Base Support

Adds new hardware and interface definitions.

Adds new interface routines - utilizing the reorganized layout of the
driver. Adds SLI-4 specific functions for attachment, initialization,
teardown, etc.

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc.h           |   49 +-
 drivers/scsi/lpfc/lpfc_attr.c      |   83 +-
 drivers/scsi/lpfc/lpfc_crtn.h      |   59 +-
 drivers/scsi/lpfc/lpfc_ct.c        |    2 +
 drivers/scsi/lpfc/lpfc_debugfs.c   |    2 +
 drivers/scsi/lpfc/lpfc_els.c       |    4 +-
 drivers/scsi/lpfc/lpfc_hbadisc.c   |   34 +-
 drivers/scsi/lpfc/lpfc_hw.h        |  140 +-
 drivers/scsi/lpfc/lpfc_hw4.h       | 2141 ++++++++++++++++
 drivers/scsi/lpfc/lpfc_init.c      | 4978 ++++++++++++++++++++++++++++++------
 drivers/scsi/lpfc/lpfc_mbox.c      |    2 +
 drivers/scsi/lpfc/lpfc_mem.c       |  204 +-
 drivers/scsi/lpfc/lpfc_nportdisc.c |    2 +
 drivers/scsi/lpfc/lpfc_scsi.c      |  492 ++++
 drivers/scsi/lpfc/lpfc_scsi.h      |    2 +
 drivers/scsi/lpfc/lpfc_sli.c       | 1455 ++++++++++-
 drivers/scsi/lpfc/lpfc_sli.h       |   27 +-
 drivers/scsi/lpfc/lpfc_sli4.h      |  467 ++++
 drivers/scsi/lpfc/lpfc_vport.c     |   23 +
 19 files changed, 9170 insertions(+), 996 deletions(-)
 create mode 100644 drivers/scsi/lpfc/lpfc_hw4.h
 create mode 100644 drivers/scsi/lpfc/lpfc_sli4.h

(limited to 'drivers/scsi/lpfc')

diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 6c24c9aabe7..13ac108a244 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -105,9 +105,11 @@ struct lpfc_dma_pool {
 };
 
 struct hbq_dmabuf {
+	struct lpfc_dmabuf hbuf;
 	struct lpfc_dmabuf dbuf;
 	uint32_t size;
 	uint32_t tag;
+	struct lpfc_rcqe rcqe;
 };
 
 /* Priority bit.  Set value to exceed low water mark in lpfc_mem. */
@@ -141,7 +143,10 @@ typedef struct lpfc_vpd {
 	} rev;
 	struct {
 #ifdef __BIG_ENDIAN_BITFIELD
-		uint32_t rsvd2  :24;  /* Reserved                             */
+		uint32_t rsvd3  :19;  /* Reserved                             */
+		uint32_t cdss	: 1;  /* Configure Data Security SLI          */
+		uint32_t rsvd2	: 3;  /* Reserved                             */
+		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
 		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
 		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
 		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
@@ -159,7 +164,10 @@ typedef struct lpfc_vpd {
 		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
 		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
 		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
-		uint32_t rsvd2  :24;  /* Reserved                             */
+		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
+		uint32_t rsvd2	: 3;  /* Reserved                             */
+		uint32_t cdss	: 1;  /* Configure Data Security SLI          */
+		uint32_t rsvd3  :19;  /* Reserved                             */
 #endif
 	} sli3Feat;
 } lpfc_vpd_t;
@@ -280,6 +288,9 @@ struct lpfc_vport {
 	enum discovery_state port_state;
 
 	uint16_t vpi;
+	uint16_t vfi;
+	uint8_t vfi_state;
+#define LPFC_VFI_REGISTERED	0x1
 
 	uint32_t fc_flag;	/* FC flags */
 /* Several of these flags are HBA centric and should be moved to
@@ -392,6 +403,9 @@ struct lpfc_vport {
 #endif
 	uint8_t stat_data_enabled;
 	uint8_t stat_data_blocked;
+	struct list_head rcv_buffer_list;
+	uint32_t vport_flag;
+#define STATIC_VPORT	1
 };
 
 struct hbq_s {
@@ -494,6 +508,7 @@ struct lpfc_hba {
 #define LPFC_SLI3_CRP_ENABLED		0x08
 #define LPFC_SLI3_INB_ENABLED		0x10
 #define LPFC_SLI3_BG_ENABLED		0x20
+#define LPFC_SLI3_DSS_ENABLED		0x40
 	uint32_t iocb_cmd_size;
 	uint32_t iocb_rsp_size;
 
@@ -507,8 +522,13 @@ struct lpfc_hba {
 
 	uint32_t hba_flag;	/* hba generic flags */
 #define HBA_ERATT_HANDLED	0x1 /* This flag is set when eratt handled */
-
-#define DEFER_ERATT		0x4 /* Deferred error attention in progress */
+#define DEFER_ERATT		0x2 /* Deferred error attention in progress */
+#define HBA_FCOE_SUPPORT	0x4 /* HBA function supports FCOE */
+#define HBA_RECEIVE_BUFFER	0x8 /* Rcv buffer posted to worker thread */
+#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define FCP_XRI_ABORT_EVENT	0x20
+#define ELS_XRI_ABORT_EVENT	0x40
+#define ASYNC_EVENT		0x80
 	struct lpfc_dmabuf slim2p;
 
 	MAILBOX_t *mbox;
@@ -567,6 +587,9 @@ struct lpfc_hba {
 	uint32_t cfg_poll;
 	uint32_t cfg_poll_tmo;
 	uint32_t cfg_use_msi;
+	uint32_t cfg_fcp_imax;
+	uint32_t cfg_fcp_wq_count;
+	uint32_t cfg_fcp_eq_count;
 	uint32_t cfg_sg_seg_cnt;
 	uint32_t cfg_prot_sg_seg_cnt;
 	uint32_t cfg_sg_dma_buf_size;
@@ -576,6 +599,8 @@ struct lpfc_hba {
 	uint32_t cfg_enable_hba_reset;
 	uint32_t cfg_enable_hba_heartbeat;
 	uint32_t cfg_enable_bg;
+	uint32_t cfg_enable_fip;
+	uint32_t cfg_log_verbose;
 
 	lpfc_vpd_t vpd;		/* vital product data */
 
@@ -659,7 +684,8 @@ struct lpfc_hba {
 	/* pci_mem_pools */
 	struct pci_pool *lpfc_scsi_dma_buf_pool;
 	struct pci_pool *lpfc_mbuf_pool;
-	struct pci_pool *lpfc_hbq_pool;
+	struct pci_pool *lpfc_hrb_pool;	/* header receive buffer pool */
+	struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
 	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
 
 	mempool_t *mbox_mem_pool;
@@ -675,6 +701,14 @@ struct lpfc_hba {
 	struct lpfc_vport *pport;	/* physical lpfc_vport pointer */
 	uint16_t max_vpi;		/* Maximum virtual nports */
 #define LPFC_MAX_VPI 0xFFFF		/* Max number of VPI supported */
+	uint16_t max_vports;            /*
+					 * For IOV HBAs max_vpi can change
+					 * after a reset. max_vports is max
+					 * number of vports present. This can
+					 * be greater than max_vpi.
+					 */
+	uint16_t vpi_base;
+	uint16_t vfi_base;
 	unsigned long *vpi_bmask;	/* vpi allocation table */
 
 	/* Data structure used by fabric iocb scheduler */
@@ -733,6 +767,11 @@ struct lpfc_hba {
 /* Maximum number of events that can be outstanding at any time*/
 #define LPFC_MAX_EVT_COUNT 512
 	atomic_t fast_event_count;
+	struct lpfc_fcf fcf;
+	uint8_t fc_map[3];
+	uint8_t valid_vlan;
+	uint16_t vlan_id;
+	struct list_head fcf_conn_rec_list;
 };
 
 static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c14f0cbdb12..82016fc672b 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -30,8 +30,10 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -828,18 +830,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
 		return 0;
 	}
 
-	if (mrpi)
-		*mrpi = pmb->un.varRdConfig.max_rpi;
-	if (arpi)
-		*arpi = pmb->un.varRdConfig.avail_rpi;
-	if (mxri)
-		*mxri = pmb->un.varRdConfig.max_xri;
-	if (axri)
-		*axri = pmb->un.varRdConfig.avail_xri;
-	if (mvpi)
-		*mvpi = pmb->un.varRdConfig.max_vpi;
-	if (avpi)
-		*avpi = pmb->un.varRdConfig.avail_vpi;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rd_config = &pmboxq->u.mqe.un.rd_config;
+		if (mrpi)
+			*mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+		if (arpi)
+			*arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.rpi_used;
+		if (mxri)
+			*mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+		if (axri)
+			*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.xri_used;
+		if (mvpi)
+			*mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+		if (avpi)
+			*avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.vpi_used;
+	} else {
+		if (mrpi)
+			*mrpi = pmb->un.varRdConfig.max_rpi;
+		if (arpi)
+			*arpi = pmb->un.varRdConfig.avail_rpi;
+		if (mxri)
+			*mxri = pmb->un.varRdConfig.max_xri;
+		if (axri)
+			*axri = pmb->un.varRdConfig.avail_xri;
+		if (mvpi)
+			*mvpi = pmb->un.varRdConfig.max_vpi;
+		if (avpi)
+			*avpi = pmb->un.varRdConfig.avail_vpi;
+	}
 
 	mempool_free(pmboxq, phba->mbox_mem_pool);
 	return 1;
@@ -2844,14 +2865,38 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
 /*
 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
 #		support this feature
-#       0  = MSI disabled
+#       0  = MSI disabled (default)
 #       1  = MSI enabled
-#       2  = MSI-X enabled (default)
-# Value range is [0,2]. Default value is 2.
+#       2  = MSI-X enabled
+# Value range is [0,2]. Default value is 0.
 */
-LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
+LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
 	    "MSI-X (2), if possible");
 
+/*
+# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
+#
+# Value range is [636,651042]. Default value is 10000.
+*/
+LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
+	    "Set the maximum number of fast-path FCP interrupts per second");
+
+/*
+# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
+#
+# Value range is [1,31]. Default value is 4.
+*/
+LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
+	    "Set the number of fast-path FCP work queues, if possible");
+
+/*
+# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
+#
+# Value range is [1,7]. Default value is 1.
+*/
+LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
+	    "Set the number of fast-path FCP event queues, if possible");
+
 /*
 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
 #       0  = HBA resets disabled
@@ -2969,6 +3014,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_poll,
 	&dev_attr_lpfc_poll_tmo,
 	&dev_attr_lpfc_use_msi,
+	&dev_attr_lpfc_fcp_imax,
+	&dev_attr_lpfc_fcp_wq_count,
+	&dev_attr_lpfc_fcp_eq_count,
 	&dev_attr_lpfc_enable_bg,
 	&dev_attr_lpfc_soft_wwnn,
 	&dev_attr_lpfc_soft_wwpn,
@@ -4105,6 +4153,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
 	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
 	lpfc_use_msi_init(phba, lpfc_use_msi);
+	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+	lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
+	lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
 	lpfc_enable_bg_init(phba, lpfc_enable_bg);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f88ce3f2619..3802e455734 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -35,17 +35,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
 void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
-		   LPFC_MBOXQ_t *, uint32_t);
+int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
+		 LPFC_MBOXQ_t *, uint32_t);
 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
-void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
 void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
 void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
 
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
 void lpfc_cleanup_rpis(struct lpfc_vport *, int);
 int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_linkdown_port(struct lpfc_vport *);
 void lpfc_port_link_failure(struct lpfc_vport *);
 void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
 
@@ -54,6 +56,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -149,15 +152,19 @@ int lpfc_online(struct lpfc_hba *);
 void lpfc_unblock_mgmt_io(struct lpfc_hba *);
 void lpfc_offline_prep(struct lpfc_hba *);
 void lpfc_offline(struct lpfc_hba *);
+void lpfc_reset_hba(struct lpfc_hba *);
 
 int lpfc_sli_setup(struct lpfc_hba *);
 int lpfc_sli_queue_setup(struct lpfc_hba *);
 
 void lpfc_handle_eratt(struct lpfc_hba *);
 void lpfc_handle_latt(struct lpfc_hba *);
-irqreturn_t lpfc_intr_handler(int, void *);
-irqreturn_t lpfc_sp_intr_handler(int, void *);
-irqreturn_t lpfc_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_intr_handler(int, void *);
+irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
 
 void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +172,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
 LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_dev_check(struct lpfc_hba *);
 int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
+void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
+void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
+void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
+void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
+void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
 
 void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
 	uint32_t , LPFC_MBOXQ_t *);
 struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
 void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
+struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
+void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
+			uint16_t);
+void lpfc_unregister_unused_fcf(struct lpfc_hba *);
 
-int lpfc_mem_alloc(struct lpfc_hba *);
+int lpfc_mem_alloc(struct lpfc_hba *, int align);
 void lpfc_mem_free(struct lpfc_hba *);
+void lpfc_mem_free_all(struct lpfc_hba *);
 void lpfc_stop_vport_timers(struct lpfc_vport *);
 
 void lpfc_poll_timeout(unsigned long ptr);
@@ -198,12 +221,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
 int lpfc_sli_hba_down(struct lpfc_hba *);
 int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 int lpfc_sli_handle_mb_event(struct lpfc_hba *);
-int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
+void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
 int lpfc_sli_check_eratt(struct lpfc_hba *);
-int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
+void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
 				    struct lpfc_sli_ring *, uint32_t);
+int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
 void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
 			struct lpfc_iocbq *, uint32_t);
 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +261,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
 
 int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
-int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
 			     struct lpfc_iocbq *, struct lpfc_iocbq *,
 			     uint32_t);
 void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +278,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
 const char* lpfc_info(struct Scsi_Host *);
 int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
 
+int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
+
 void lpfc_get_cfgparam(struct lpfc_hba *);
 void lpfc_get_vport_cfgparam(struct lpfc_vport *);
 int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +344,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
 				struct lpfc_iocbq *);
 struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
 void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
+void lpfc_create_static_vport(struct lpfc_hba *);
+void lpfc_stop_hba_timers(struct lpfc_hba *);
+void lpfc_stop_port(struct lpfc_hba *);
+void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
+int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
+void lpfc_start_fdiscs(struct lpfc_hba *phba);
 
 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
 #define HBA_EVENT_RSCN                   5
 #define HBA_EVENT_LINK_UP                2
 #define HBA_EVENT_LINK_DOWN              3
+
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 4164b935ea9..51990787796 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -32,8 +32,10 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5dd66925f4c..42ef258c7d5 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -33,8 +33,10 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8c5c3aea4a1..9fe36bf6fd1 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -28,8 +28,10 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -220,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 		icmd->un.elsreq64.myID = vport->fc_myDID;
 
 		/* For ELS_REQUEST64_CR, use the VPI by default */
-		icmd->ulpContext = vport->vpi;
+		icmd->ulpContext = vport->vpi + phba->vpi_base;
 		icmd->ulpCt_h = 0;
 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
 		if (elscmd == ELS_CMD_ECHO)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 25fc96c9081..0fc66005d54 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -29,10 +29,12 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_scsi.h"
 #include "lpfc.h"
 #include "lpfc_logmsg.h"
@@ -491,6 +493,10 @@ lpfc_work_done(struct lpfc_hba *phba)
 	phba->work_ha = 0;
 	spin_unlock_irq(&phba->hbalock);
 
+	/* First, try to post the next mailbox command to SLI4 device */
+	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+		lpfc_sli4_post_async_mbox(phba);
+
 	if (ha_copy & HA_ERATT)
 		/* Handle the error attention event */
 		lpfc_handle_eratt(phba);
@@ -501,9 +507,27 @@ lpfc_work_done(struct lpfc_hba *phba)
 	if (ha_copy & HA_LATT)
 		lpfc_handle_latt(phba);
 
+	/* Process SLI4 events */
+	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+		if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
+			lpfc_sli4_fcp_xri_abort_event_proc(phba);
+		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
+			lpfc_sli4_els_xri_abort_event_proc(phba);
+		if (phba->hba_flag & ASYNC_EVENT)
+			lpfc_sli4_async_event_proc(phba);
+		if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
+			spin_lock_irq(&phba->hbalock);
+			phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+		}
+		if (phba->hba_flag & HBA_RECEIVE_BUFFER)
+			lpfc_sli4_handle_received_buffer(phba);
+	}
+
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for(i = 0; i <= phba->max_vpi; i++) {
+		for (i = 0; i <= phba->max_vports; i++) {
 			/*
 			 * We could have no vports in array if unloading, so if
 			 * this happens then just use the pport
@@ -2556,7 +2580,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
 	 * clear_la then don't send it.
 	 */
 	if ((phba->link_state >= LPFC_CLEAR_LA) ||
-	    (vport->port_type != LPFC_PHYSICAL_PORT))
+	    (vport->port_type != LPFC_PHYSICAL_PORT) ||
+		(phba->sli_rev == LPFC_SLI_REV4))
 		return;
 
 			/* Link up discovery */
@@ -2585,7 +2610,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
 
 	regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (regvpimbox) {
-		lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
+		lpfc_reg_vpi(vport, regvpimbox);
 		regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
 		regvpimbox->vport = vport;
 		if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2645,7 +2670,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
 	 */
 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 	    !(vport->fc_flag & FC_PT2PT) &&
-	    !(vport->fc_flag & FC_RSCN_MODE)) {
+	    !(vport->fc_flag & FC_RSCN_MODE) &&
+	    (phba->sli_rev < LPFC_SLI_REV4)) {
 		lpfc_issue_reg_vpi(phba, vport);
 		return;
 	}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4168c7b498b..a9d64cfbe5c 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -470,6 +470,35 @@ struct serv_parm {	/* Structure is in Big Endian format */
 	uint8_t vendorVersion[16];
 };
 
+/*
+ * Virtual Fabric Tagging Header
+ */
+struct fc_vft_header {
+	 uint32_t word0;
+#define fc_vft_hdr_r_ctl_SHIFT		24
+#define fc_vft_hdr_r_ctl_MASK		0xFF
+#define fc_vft_hdr_r_ctl_WORD		word0
+#define fc_vft_hdr_ver_SHIFT		22
+#define fc_vft_hdr_ver_MASK		0x3
+#define fc_vft_hdr_ver_WORD		word0
+#define fc_vft_hdr_type_SHIFT		18
+#define fc_vft_hdr_type_MASK		0xF
+#define fc_vft_hdr_type_WORD		word0
+#define fc_vft_hdr_e_SHIFT		16
+#define fc_vft_hdr_e_MASK		0x1
+#define fc_vft_hdr_e_WORD		word0
+#define fc_vft_hdr_priority_SHIFT	13
+#define fc_vft_hdr_priority_MASK	0x7
+#define fc_vft_hdr_priority_WORD	word0
+#define fc_vft_hdr_vf_id_SHIFT		1
+#define fc_vft_hdr_vf_id_MASK		0xFFF
+#define fc_vft_hdr_vf_id_WORD		word0
+	uint32_t word1;
+#define fc_vft_hdr_hopct_SHIFT		24
+#define fc_vft_hdr_hopct_MASK		0xFF
+#define fc_vft_hdr_hopct_WORD		word1
+};
+
 /*
  *  Extended Link Service LS_COMMAND codes (Payload Word 0)
  */
@@ -1152,6 +1181,9 @@ typedef struct {
 #define PCI_DEVICE_ID_HORNET        0xfe05
 #define PCI_DEVICE_ID_ZEPHYR_SCSP   0xfe11
 #define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
+#define PCI_VENDOR_ID_SERVERENGINE  0x19a2
+#define PCI_DEVICE_ID_TIGERSHARK    0x0704
+#define PCI_DEVICE_ID_TIGERSHARK_S  0x0705
 
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
@@ -1342,15 +1374,21 @@ typedef struct {		/* FireFly BIU registers */
 #define MBX_READ_LA64       0x95
 #define MBX_REG_VPI	    0x96
 #define MBX_UNREG_VPI	    0x97
-#define MBX_REG_VNPID	    0x96
-#define MBX_UNREG_VNPID	    0x97
 
 #define MBX_WRITE_WWN       0x98
 #define MBX_SET_DEBUG       0x99
 #define MBX_LOAD_EXP_ROM    0x9C
-
-#define MBX_MAX_CMDS        0x9D
+#define MBX_SLI4_CONFIG	    0x9B
+#define MBX_SLI4_REQ_FTRS   0x9D
+#define MBX_MAX_CMDS        0x9E
+#define MBX_RESUME_RPI      0x9E
 #define MBX_SLI2_CMD_MASK   0x80
+#define MBX_REG_VFI         0x9F
+#define MBX_REG_FCFI        0xA0
+#define MBX_UNREG_VFI       0xA1
+#define MBX_UNREG_FCFI	    0xA2
+#define MBX_INIT_VFI        0xA3
+#define MBX_INIT_VPI        0xA4
 
 /* IOCB Commands */
 
@@ -1440,6 +1478,16 @@ typedef struct {		/* FireFly BIU registers */
 #define CMD_IOCB_LOGENTRY_CN		0x94
 #define CMD_IOCB_LOGENTRY_ASYNC_CN	0x96
 
+/* Unhandled Data Security SLI Commands */
+#define DSSCMD_IWRITE64_CR 		0xD8
+#define DSSCMD_IWRITE64_CX		0xD9
+#define DSSCMD_IREAD64_CR		0xDA
+#define DSSCMD_IREAD64_CX		0xDB
+#define DSSCMD_INVALIDATE_DEK		0xDC
+#define DSSCMD_SET_KEK			0xDD
+#define DSSCMD_GET_KEK_ID		0xDE
+#define DSSCMD_GEN_XFER			0xDF
+
 #define CMD_MAX_IOCB_CMD        0xE6
 #define CMD_IOCB_MASK           0xff
 
@@ -1466,6 +1514,7 @@ typedef struct {		/* FireFly BIU registers */
 #define MBXERR_BAD_RCV_LENGTH       14
 #define MBXERR_DMA_ERROR            15
 #define MBXERR_ERROR                16
+#define MBXERR_LINK_DOWN            0x33
 #define MBX_NOT_FINISHED           255
 
 #define MBX_BUSY                   0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1553,6 @@ struct ulp_bde {
 #endif
 };
 
-struct ulp_bde64 {	/* SLI-2 */
-	union ULP_BDE_TUS {
-		uint32_t w;
-		struct {
-#ifdef __BIG_ENDIAN_BITFIELD
-			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
-						   VALUE !! */
-			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
-			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
-						   VALUE !! */
-#endif
-#define BUFF_TYPE_BDE_64    0x00	/* BDE (Host_resident) */
-#define BUFF_TYPE_BDE_IMMED 0x01	/* Immediate Data BDE */
-#define BUFF_TYPE_BDE_64P   0x02	/* BDE (Port-resident) */
-#define BUFF_TYPE_BDE_64I   0x08	/* Input BDE (Host-resident) */
-#define BUFF_TYPE_BDE_64IP  0x0A	/* Input BDE (Port-resident) */
-#define BUFF_TYPE_BLP_64    0x40	/* BLP (Host-resident) */
-#define BUFF_TYPE_BLP_64P   0x42	/* BLP (Port-resident) */
-		} f;
-	} tus;
-	uint32_t addrLow;
-	uint32_t addrHigh;
-};
-
 typedef struct ULP_BDL {	/* SLI-2 */
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint32_t bdeFlags:8;	/* BDL Flags */
@@ -2287,7 +2310,7 @@ typedef struct {
 	uint32_t rsvd3;
 	uint32_t rsvd4;
 	uint32_t rsvd5;
-	uint16_t rsvd6;
+	uint16_t vfi;
 	uint16_t vpi;
 #else	/*  __LITTLE_ENDIAN */
 	uint32_t rsvd1;
@@ -2297,7 +2320,7 @@ typedef struct {
 	uint32_t rsvd4;
 	uint32_t rsvd5;
 	uint16_t vpi;
-	uint16_t rsvd6;
+	uint16_t vfi;
 #endif
 } REG_VPI_VAR;
 
@@ -2457,7 +2480,7 @@ typedef struct {
 	uint32_t entry_index:16;
 #endif
 
-	uint32_t rsvd1;
+	uint32_t sli4_length;
 	uint32_t word_cnt;
 	uint32_t resp_offset;
 } DUMP_VAR;
@@ -2470,9 +2493,32 @@ typedef struct {
 #define  DMP_RSP_OFFSET          0x14   /* word 5 contains first word of rsp */
 #define  DMP_RSP_SIZE            0x6C   /* maximum of 27 words of rsp data */
 
+#define  DMP_REGION_VPORT	 0x16   /* VPort info region */
+#define  DMP_VPORT_REGION_SIZE	 0x200
+#define  DMP_MBOX_OFFSET_WORD	 0x5
+
+#define  DMP_REGION_FCOEPARAM	 0x17   /* fcoe param region */
+#define  DMP_FCOEPARAM_RGN_SIZE	 0x400
+
 #define  WAKE_UP_PARMS_REGION_ID    4
 #define  WAKE_UP_PARMS_WORD_SIZE   15
 
+struct vport_rec {
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+#define VPORT_INFO_SIG 0x32324752
+#define VPORT_INFO_REV_MASK 0xff
+#define VPORT_INFO_REV 0x1
+#define MAX_STATIC_VPORT_COUNT 16
+struct static_vport_info {
+	uint32_t 		signature;
+	uint32_t		rev;
+	struct vport_rec 	vport_list[MAX_STATIC_VPORT_COUNT];
+	uint32_t		resvd[66];
+};
+
 /* Option rom version structure */
 struct prog_id {
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2743,9 @@ typedef struct {
 #endif
 
 #ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd1     : 23;  /* Reserved                             */
+	uint32_t rsvd1     : 19;  /* Reserved                             */
+	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd2     :  3;  /* Reserved                             */
 	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
 	uint32_t cmv       :  1;  /* Configure Max VPIs                   */
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
@@ -2717,10 +2765,14 @@ typedef struct {
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
 	uint32_t cmv	   :  1;  /* Configure Max VPIs                   */
 	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
-	uint32_t rsvd1     : 23;  /* Reserved                             */
+	uint32_t rsvd2     :  3;  /* Reserved                             */
+	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd1     : 19;  /* Reserved                             */
 #endif
 #ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd2     : 23;  /* Reserved                             */
+	uint32_t rsvd3     : 19;  /* Reserved                             */
+	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd4     :  3;  /* Reserved                             */
 	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
@@ -2740,7 +2792,9 @@ typedef struct {
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
 	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
-	uint32_t rsvd2     : 23;  /* Reserved                             */
+	uint32_t rsvd4     :  3;  /* Reserved                             */
+	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd3     : 19;  /* Reserved                             */
 #endif
 
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2807,20 @@ typedef struct {
 
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
-	uint32_t rsvd3     : 16;  /* Max HBQs Host expect to configure    */
+	uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
 #else	/*  __LITTLE_ENDIAN */
-	uint32_t rsvd3     : 16;  /* Max HBQs Host expect to configure    */
+	uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
 	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
 #endif
 
-	uint32_t rsvd4;           /* Reserved                             */
+	uint32_t rsvd6;           /* Reserved                             */
 
 #ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd5      : 16;  /* Reserved                             */
+	uint32_t rsvd7      : 16;  /* Reserved                             */
 	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
 #else	/*  __LITTLE_ENDIAN */
 	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
-	uint32_t rsvd5      : 16;  /* Reserved                             */
+	uint32_t rsvd7      : 16;  /* Reserved                             */
 #endif
 
 } CONFIG_PORT_VAR;
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
 #define MENLO_TIMEOUT 30
 #define SETVAR_MLOMNT 0x103107
 #define SETVAR_MLORST 0x103007
+
+#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 00000000000..39c34b3ad29
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,2141 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ *	struct temp {
+ *		uint32_t	field1;
+ *		uint32_t	field2;
+ *		uint32_t	field3;
+ *		uint32_t	field4;
+ *	#define example_bit_field_SHIFT		7
+ *	#define example_bit_field_MASK		0x03
+ *	#define example_bit_field_WORD		field4
+ *		uint32_t	field5;
+ *	};
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ *	struct temp t1;
+ *	value = bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ *	bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ *	bf_set(example_bit_field, &t1, 0);
+ */
+#define bf_get(name, ptr) \
+	(((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set(name, ptr, value) \
+	((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+		 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+struct dma_address {
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+};
+
+#define LPFC_SLI4_BAR0		1
+#define LPFC_SLI4_BAR1		2
+#define LPFC_SLI4_BAR2		4
+
+#define LPFC_SLI4_MBX_EMBED	true
+#define LPFC_SLI4_MBX_NEMBED	false
+
+#define LPFC_SLI4_MB_WORD_COUNT		64
+#define LPFC_MAX_MQ_PAGE		8
+#define LPFC_MAX_WQ_PAGE		8
+#define LPFC_MAX_CQ_PAGE		4
+#define LPFC_MAX_EQ_PAGE		8
+
+#define LPFC_VIR_FUNC_MAX       32 /* Maximum number of virtual functions */
+#define LPFC_PCI_FUNC_MAX        5 /* Maximum number of PCI functions */
+#define LPFC_VFR_PAGE_SIZE	0x1000 /* 4KB BAR2 per-VF register page size */
+
+/* Define SLI4 Alignment requirements. */
+#define LPFC_ALIGN_16_BYTE	16
+#define LPFC_ALIGN_64_BYTE	64
+
+/* Define SLI4 specific definitions. */
+#define LPFC_MQ_CQE_BYTE_OFFSET	256
+#define LPFC_MBX_CMD_HDR_LENGTH 16
+#define LPFC_MBX_ERROR_RANGE	0x4000
+#define LPFC_BMBX_BIT1_ADDR_HI	0x2
+#define LPFC_BMBX_BIT1_ADDR_LO	0
+#define LPFC_RPI_HDR_COUNT	64
+#define LPFC_HDR_TEMPLATE_SIZE	4096
+#define LPFC_RPI_ALLOC_ERROR 	0xFFFF
+#define LPFC_FCF_RECORD_WD_CNT	132
+#define LPFC_ENTIRE_FCF_DATABASE 0
+#define LPFC_DFLT_FCF_INDEX	 0
+
+/* Virtual function numbers */
+#define LPFC_VF0		0
+#define LPFC_VF1		1
+#define LPFC_VF2		2
+#define LPFC_VF3		3
+#define LPFC_VF4		4
+#define LPFC_VF5		5
+#define LPFC_VF6		6
+#define LPFC_VF7		7
+#define LPFC_VF8		8
+#define LPFC_VF9		9
+#define LPFC_VF10		10
+#define LPFC_VF11		11
+#define LPFC_VF12		12
+#define LPFC_VF13		13
+#define LPFC_VF14		14
+#define LPFC_VF15		15
+#define LPFC_VF16		16
+#define LPFC_VF17		17
+#define LPFC_VF18		18
+#define LPFC_VF19		19
+#define LPFC_VF20		20
+#define LPFC_VF21		21
+#define LPFC_VF22		22
+#define LPFC_VF23		23
+#define LPFC_VF24		24
+#define LPFC_VF25		25
+#define LPFC_VF26		26
+#define LPFC_VF27		27
+#define LPFC_VF28		28
+#define LPFC_VF29		29
+#define LPFC_VF30		30
+#define LPFC_VF31		31
+
+/* PCI function numbers */
+#define LPFC_PCI_FUNC0		0
+#define LPFC_PCI_FUNC1		1
+#define LPFC_PCI_FUNC2		2
+#define LPFC_PCI_FUNC3		3
+#define LPFC_PCI_FUNC4		4
+
+/* Active interrupt test count */
+#define LPFC_ACT_INTR_CNT	4
+
+/* Delay Multiplier constant */
+#define LPFC_DMULT_CONST       651042
+#define LPFC_MIM_IMAX          636
+#define LPFC_FP_DEF_IMAX       10000
+#define LPFC_SP_DEF_IMAX       10000
+
+struct ulp_bde64 {
+	union ULP_BDE_TUS {
+		uint32_t w;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
+						   VALUE !! */
+			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
+			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
+						   VALUE !! */
+#endif
+#define BUFF_TYPE_BDE_64    0x00	/* BDE (Host_resident) */
+#define BUFF_TYPE_BDE_IMMED 0x01	/* Immediate Data BDE */
+#define BUFF_TYPE_BDE_64P   0x02	/* BDE (Port-resident) */
+#define BUFF_TYPE_BDE_64I   0x08	/* Input BDE (Host-resident) */
+#define BUFF_TYPE_BDE_64IP  0x0A	/* Input BDE (Port-resident) */
+#define BUFF_TYPE_BLP_64    0x40	/* BLP (Host-resident) */
+#define BUFF_TYPE_BLP_64P   0x42	/* BLP (Port-resident) */
+		} f;
+	} tus;
+	uint32_t addrLow;
+	uint32_t addrHigh;
+};
+
+struct lpfc_sli4_flags {
+	uint32_t word0;
+#define lpfc_fip_flag_SHIFT 0
+#define lpfc_fip_flag_MASK 0x00000001
+#define lpfc_fip_flag_WORD word0
+};
+
+/* event queue entry structure */
+struct lpfc_eqe {
+	uint32_t word0;
+#define lpfc_eqe_resource_id_SHIFT	16
+#define lpfc_eqe_resource_id_MASK	0x000000FF
+#define lpfc_eqe_resource_id_WORD	word0
+#define lpfc_eqe_minor_code_SHIFT	4
+#define lpfc_eqe_minor_code_MASK	0x00000FFF
+#define lpfc_eqe_minor_code_WORD	word0
+#define lpfc_eqe_major_code_SHIFT	1
+#define lpfc_eqe_major_code_MASK	0x00000007
+#define lpfc_eqe_major_code_WORD	word0
+#define lpfc_eqe_valid_SHIFT		0
+#define lpfc_eqe_valid_MASK		0x00000001
+#define lpfc_eqe_valid_WORD		word0
+};
+
+/* completion queue entry structure (common fields for all cqe types) */
+struct lpfc_cqe {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t reserved2;
+	uint32_t word3;
+#define lpfc_cqe_valid_SHIFT		31
+#define lpfc_cqe_valid_MASK		0x00000001
+#define lpfc_cqe_valid_WORD		word3
+#define lpfc_cqe_code_SHIFT		16
+#define lpfc_cqe_code_MASK		0x000000FF
+#define lpfc_cqe_code_WORD		word3
+};
+
+/* Completion Queue Entry Status Codes */
+#define CQE_STATUS_SUCCESS		0x0
+#define CQE_STATUS_FCP_RSP_FAILURE	0x1
+#define CQE_STATUS_REMOTE_STOP		0x2
+#define CQE_STATUS_LOCAL_REJECT		0x3
+#define CQE_STATUS_NPORT_RJT		0x4
+#define CQE_STATUS_FABRIC_RJT		0x5
+#define CQE_STATUS_NPORT_BSY		0x6
+#define CQE_STATUS_FABRIC_BSY		0x7
+#define CQE_STATUS_INTERMED_RSP		0x8
+#define CQE_STATUS_LS_RJT		0x9
+#define CQE_STATUS_CMD_REJECT		0xb
+#define CQE_STATUS_FCP_TGT_LENCHECK	0xc
+#define CQE_STATUS_NEED_BUFF_ENTRY	0xf
+
+/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
+#define CQE_HW_STATUS_NO_ERR		0x0
+#define CQE_HW_STATUS_UNDERRUN		0x1
+#define CQE_HW_STATUS_OVERRUN		0x2
+
+/* Completion Queue Entry Codes */
+#define CQE_CODE_COMPL_WQE		0x1
+#define CQE_CODE_RELEASE_WQE		0x2
+#define CQE_CODE_RECEIVE		0x4
+#define CQE_CODE_XRI_ABORTED		0x5
+
+/* completion queue entry for wqe completions */
+struct lpfc_wcqe_complete {
+	uint32_t word0;
+#define lpfc_wcqe_c_request_tag_SHIFT	16
+#define lpfc_wcqe_c_request_tag_MASK	0x0000FFFF
+#define lpfc_wcqe_c_request_tag_WORD	word0
+#define lpfc_wcqe_c_status_SHIFT	8
+#define lpfc_wcqe_c_status_MASK		0x000000FF
+#define lpfc_wcqe_c_status_WORD		word0
+#define lpfc_wcqe_c_hw_status_SHIFT	0
+#define lpfc_wcqe_c_hw_status_MASK	0x000000FF
+#define lpfc_wcqe_c_hw_status_WORD	word0
+	uint32_t total_data_placed;
+	uint32_t parameter;
+	uint32_t word3;
+#define lpfc_wcqe_c_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_c_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_c_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_c_xb_SHIFT		28
+#define lpfc_wcqe_c_xb_MASK		0x00000001
+#define lpfc_wcqe_c_xb_WORD		word3
+#define lpfc_wcqe_c_pv_SHIFT		27
+#define lpfc_wcqe_c_pv_MASK		0x00000001
+#define lpfc_wcqe_c_pv_WORD		word3
+#define lpfc_wcqe_c_priority_SHIFT	24
+#define lpfc_wcqe_c_priority_MASK		0x00000007
+#define lpfc_wcqe_c_priority_WORD		word3
+#define lpfc_wcqe_c_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_c_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_c_code_WORD		lpfc_cqe_code_WORD
+};
+
+/* completion queue entry for wqe release */
+struct lpfc_wcqe_release {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t word2;
+#define lpfc_wcqe_r_wq_id_SHIFT		16
+#define lpfc_wcqe_r_wq_id_MASK		0x0000FFFF
+#define lpfc_wcqe_r_wq_id_WORD		word2
+#define lpfc_wcqe_r_wqe_index_SHIFT	0
+#define lpfc_wcqe_r_wqe_index_MASK	0x0000FFFF
+#define lpfc_wcqe_r_wqe_index_WORD	word2
+	uint32_t word3;
+#define lpfc_wcqe_r_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_r_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_r_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_r_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_r_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_r_code_WORD		lpfc_cqe_code_WORD
+};
+
+struct sli4_wcqe_xri_aborted {
+	uint32_t word0;
+#define lpfc_wcqe_xa_status_SHIFT		8
+#define lpfc_wcqe_xa_status_MASK		0x000000FF
+#define lpfc_wcqe_xa_status_WORD		word0
+	uint32_t parameter;
+	uint32_t word2;
+#define lpfc_wcqe_xa_remote_xid_SHIFT	16
+#define lpfc_wcqe_xa_remote_xid_MASK	0x0000FFFF
+#define lpfc_wcqe_xa_remote_xid_WORD	word2
+#define lpfc_wcqe_xa_xri_SHIFT		0
+#define lpfc_wcqe_xa_xri_MASK		0x0000FFFF
+#define lpfc_wcqe_xa_xri_WORD		word2
+	uint32_t word3;
+#define lpfc_wcqe_xa_valid_SHIFT	lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_xa_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_xa_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_xa_ia_SHIFT		30
+#define lpfc_wcqe_xa_ia_MASK		0x00000001
+#define lpfc_wcqe_xa_ia_WORD		word3
+#define CQE_XRI_ABORTED_IA_REMOTE	0
+#define CQE_XRI_ABORTED_IA_LOCAL	1
+#define lpfc_wcqe_xa_br_SHIFT		29
+#define lpfc_wcqe_xa_br_MASK		0x00000001
+#define lpfc_wcqe_xa_br_WORD		word3
+#define CQE_XRI_ABORTED_BR_BA_ACC	0
+#define CQE_XRI_ABORTED_BR_BA_RJT	1
+#define lpfc_wcqe_xa_eo_SHIFT		28
+#define lpfc_wcqe_xa_eo_MASK		0x00000001
+#define lpfc_wcqe_xa_eo_WORD		word3
+#define CQE_XRI_ABORTED_EO_REMOTE	0
+#define CQE_XRI_ABORTED_EO_LOCAL	1
+#define lpfc_wcqe_xa_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_xa_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_xa_code_WORD		lpfc_cqe_code_WORD
+};
+
+/* completion queue entry structure for rqe completion */
+struct lpfc_rcqe {
+	uint32_t word0;
+#define lpfc_rcqe_bindex_SHIFT		16
+#define lpfc_rcqe_bindex_MASK		0x0000FFF
+#define lpfc_rcqe_bindex_WORD		word0
+#define lpfc_rcqe_status_SHIFT		8
+#define lpfc_rcqe_status_MASK		0x000000FF
+#define lpfc_rcqe_status_WORD		word0
+#define FC_STATUS_RQ_SUCCESS		0x10 /* Async receive successful */
+#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 	0x11 /* payload truncated */
+#define FC_STATUS_INSUFF_BUF_NEED_BUF 	0x12 /* Insufficient buffers */
+#define FC_STATUS_INSUFF_BUF_FRM_DISC 	0x13 /* Frame Discard */
+	uint32_t reserved1;
+	uint32_t word2;
+#define lpfc_rcqe_length_SHIFT		16
+#define lpfc_rcqe_length_MASK		0x0000FFFF
+#define lpfc_rcqe_length_WORD		word2
+#define lpfc_rcqe_rq_id_SHIFT		6
+#define lpfc_rcqe_rq_id_MASK		0x000003FF
+#define lpfc_rcqe_rq_id_WORD		word2
+#define lpfc_rcqe_fcf_id_SHIFT		0
+#define lpfc_rcqe_fcf_id_MASK		0x0000003F
+#define lpfc_rcqe_fcf_id_WORD		word2
+	uint32_t word3;
+#define lpfc_rcqe_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_rcqe_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_rcqe_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_rcqe_port_SHIFT		30
+#define lpfc_rcqe_port_MASK		0x00000001
+#define lpfc_rcqe_port_WORD		word3
+#define lpfc_rcqe_hdr_length_SHIFT	24
+#define lpfc_rcqe_hdr_length_MASK	0x0000001F
+#define lpfc_rcqe_hdr_length_WORD	word3
+#define lpfc_rcqe_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_rcqe_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_rcqe_code_WORD		lpfc_cqe_code_WORD
+#define lpfc_rcqe_eof_SHIFT		8
+#define lpfc_rcqe_eof_MASK		0x000000FF
+#define lpfc_rcqe_eof_WORD		word3
+#define FCOE_EOFn	0x41
+#define FCOE_EOFt	0x42
+#define FCOE_EOFni	0x49
+#define FCOE_EOFa	0x50
+#define lpfc_rcqe_sof_SHIFT		0
+#define lpfc_rcqe_sof_MASK		0x000000FF
+#define lpfc_rcqe_sof_WORD		word3
+#define FCOE_SOFi2	0x2d
+#define FCOE_SOFi3	0x2e
+#define FCOE_SOFn2	0x35
+#define FCOE_SOFn3	0x36
+};
+
+struct lpfc_wqe_generic{
+	struct ulp_bde64 bde;
+	uint32_t word3;
+	uint32_t word4;
+	uint32_t word5;
+	uint32_t word6;
+#define lpfc_wqe_gen_context_SHIFT	16
+#define lpfc_wqe_gen_context_MASK	0x0000FFFF
+#define lpfc_wqe_gen_context_WORD	word6
+#define lpfc_wqe_gen_xri_SHIFT		0
+#define lpfc_wqe_gen_xri_MASK		0x0000FFFF
+#define lpfc_wqe_gen_xri_WORD		word6
+	uint32_t word7;
+#define lpfc_wqe_gen_lnk_SHIFT		23
+#define lpfc_wqe_gen_lnk_MASK		0x00000001
+#define lpfc_wqe_gen_lnk_WORD		word7
+#define lpfc_wqe_gen_erp_SHIFT		22
+#define lpfc_wqe_gen_erp_MASK		0x00000001
+#define lpfc_wqe_gen_erp_WORD		word7
+#define lpfc_wqe_gen_pu_SHIFT		20
+#define lpfc_wqe_gen_pu_MASK		0x00000003
+#define lpfc_wqe_gen_pu_WORD		word7
+#define lpfc_wqe_gen_class_SHIFT	16
+#define lpfc_wqe_gen_class_MASK		0x00000007
+#define lpfc_wqe_gen_class_WORD		word7
+#define lpfc_wqe_gen_command_SHIFT	8
+#define lpfc_wqe_gen_command_MASK	0x000000FF
+#define lpfc_wqe_gen_command_WORD	word7
+#define lpfc_wqe_gen_status_SHIFT	4
+#define lpfc_wqe_gen_status_MASK	0x0000000F
+#define lpfc_wqe_gen_status_WORD	word7
+#define lpfc_wqe_gen_ct_SHIFT		2
+#define lpfc_wqe_gen_ct_MASK		0x00000007
+#define lpfc_wqe_gen_ct_WORD		word7
+	uint32_t abort_tag;
+	uint32_t word9;
+#define lpfc_wqe_gen_request_tag_SHIFT	0
+#define lpfc_wqe_gen_request_tag_MASK	0x0000FFFF
+#define lpfc_wqe_gen_request_tag_WORD	word9
+	uint32_t word10;
+#define lpfc_wqe_gen_ccp_SHIFT		24
+#define lpfc_wqe_gen_ccp_MASK		0x000000FF
+#define lpfc_wqe_gen_ccp_WORD		word10
+#define lpfc_wqe_gen_ccpe_SHIFT		23
+#define lpfc_wqe_gen_ccpe_MASK		0x00000001
+#define lpfc_wqe_gen_ccpe_WORD		word10
+#define lpfc_wqe_gen_pv_SHIFT		19
+#define lpfc_wqe_gen_pv_MASK		0x00000001
+#define lpfc_wqe_gen_pv_WORD		word10
+#define lpfc_wqe_gen_pri_SHIFT		16
+#define lpfc_wqe_gen_pri_MASK		0x00000007
+#define lpfc_wqe_gen_pri_WORD		word10
+	uint32_t word11;
+#define lpfc_wqe_gen_cq_id_SHIFT	16
+#define lpfc_wqe_gen_cq_id_MASK		0x000003FF
+#define lpfc_wqe_gen_cq_id_WORD		word11
+#define LPFC_WQE_CQ_ID_DEFAULT	0x3ff
+#define lpfc_wqe_gen_wqec_SHIFT		7
+#define lpfc_wqe_gen_wqec_MASK		0x00000001
+#define lpfc_wqe_gen_wqec_WORD		word11
+#define lpfc_wqe_gen_cmd_type_SHIFT	0
+#define lpfc_wqe_gen_cmd_type_MASK	0x0000000F
+#define lpfc_wqe_gen_cmd_type_WORD	word11
+	uint32_t payload[4];
+};
+
+struct lpfc_rqe {
+	uint32_t address_hi;
+	uint32_t address_lo;
+};
+
+/* buffer descriptors */
+struct lpfc_bde4 {
+	uint32_t addr_hi;
+	uint32_t addr_lo;
+	uint32_t word2;
+#define lpfc_bde4_last_SHIFT		31
+#define lpfc_bde4_last_MASK		0x00000001
+#define lpfc_bde4_last_WORD		word2
+#define lpfc_bde4_sge_offset_SHIFT	0
+#define lpfc_bde4_sge_offset_MASK	0x000003FF
+#define lpfc_bde4_sge_offset_WORD	word2
+	uint32_t word3;
+#define lpfc_bde4_length_SHIFT		0
+#define lpfc_bde4_length_MASK		0x000000FF
+#define lpfc_bde4_length_WORD		word3
+};
+
+struct lpfc_register {
+	uint32_t word0;
+};
+
+#define LPFC_UERR_STATUS_HI		0x00A4
+#define LPFC_UERR_STATUS_LO		0x00A0
+#define LPFC_ONLINE0			0x00B0
+#define LPFC_ONLINE1			0x00B4
+#define LPFC_SCRATCHPAD			0x0058
+
+/* BAR0 Registers */
+#define LPFC_HST_STATE			0x00AC
+#define lpfc_hst_state_perr_SHIFT	31
+#define lpfc_hst_state_perr_MASK	0x1
+#define lpfc_hst_state_perr_WORD	word0
+#define lpfc_hst_state_sfi_SHIFT	30
+#define lpfc_hst_state_sfi_MASK		0x1
+#define lpfc_hst_state_sfi_WORD		word0
+#define lpfc_hst_state_nip_SHIFT	29
+#define lpfc_hst_state_nip_MASK		0x1
+#define lpfc_hst_state_nip_WORD		word0
+#define lpfc_hst_state_ipc_SHIFT	28
+#define lpfc_hst_state_ipc_MASK		0x1
+#define lpfc_hst_state_ipc_WORD		word0
+#define lpfc_hst_state_xrom_SHIFT	27
+#define lpfc_hst_state_xrom_MASK	0x1
+#define lpfc_hst_state_xrom_WORD	word0
+#define lpfc_hst_state_dl_SHIFT		26
+#define lpfc_hst_state_dl_MASK		0x1
+#define lpfc_hst_state_dl_WORD		word0
+#define lpfc_hst_state_port_status_SHIFT	0
+#define lpfc_hst_state_port_status_MASK		0xFFFF
+#define lpfc_hst_state_port_status_WORD		word0
+
+#define LPFC_POST_STAGE_POWER_ON_RESET			0x0000
+#define LPFC_POST_STAGE_AWAITING_HOST_RDY		0x0001
+#define LPFC_POST_STAGE_HOST_RDY			0x0002
+#define LPFC_POST_STAGE_BE_RESET			0x0003
+#define LPFC_POST_STAGE_SEEPROM_CS_START		0x0100
+#define LPFC_POST_STAGE_SEEPROM_CS_DONE			0x0101
+#define LPFC_POST_STAGE_DDR_CONFIG_START		0x0200
+#define LPFC_POST_STAGE_DDR_CONFIG_DONE			0x0201
+#define LPFC_POST_STAGE_DDR_CALIBRATE_START		0x0300
+#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE		0x0301
+#define LPFC_POST_STAGE_DDR_TEST_START			0x0400
+#define LPFC_POST_STAGE_DDR_TEST_DONE			0x0401
+#define LPFC_POST_STAGE_REDBOOT_INIT_START		0x0600
+#define LPFC_POST_STAGE_REDBOOT_INIT_DONE		0x0601
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START		0x0700
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE		0x0701
+#define LPFC_POST_STAGE_ARMFW_START			0x0800
+#define LPFC_POST_STAGE_DHCP_QUERY_START		0x0900
+#define LPFC_POST_STAGE_DHCP_QUERY_DONE			0x0901
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START	0x0A00
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE	0x0A01
+#define LPFC_POST_STAGE_RC_OPTION_SET			0x0B00
+#define LPFC_POST_STAGE_SWITCH_LINK			0x0B01
+#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE		0x0B02
+#define LPFC_POST_STAGE_PERFROM_TFTP			0x0B03
+#define LPFC_POST_STAGE_PARSE_XML			0x0B04
+#define LPFC_POST_STAGE_DOWNLOAD_IMAGE			0x0B05
+#define LPFC_POST_STAGE_FLASH_IMAGE			0x0B06
+#define LPFC_POST_STAGE_RC_DONE				0x0B07
+#define LPFC_POST_STAGE_REBOOT_SYSTEM			0x0B08
+#define LPFC_POST_STAGE_MAC_ADDRESS			0x0C00
+#define LPFC_POST_STAGE_ARMFW_READY			0xC000
+#define LPFC_POST_STAGE_ARMFW_UE 			0xF000
+
+#define lpfc_scratchpad_slirev_SHIFT			4
+#define lpfc_scratchpad_slirev_MASK			0xF
+#define lpfc_scratchpad_slirev_WORD			word0
+#define lpfc_scratchpad_chiptype_SHIFT			8
+#define lpfc_scratchpad_chiptype_MASK			0xFF
+#define lpfc_scratchpad_chiptype_WORD			word0
+#define lpfc_scratchpad_featurelevel1_SHIFT		16
+#define lpfc_scratchpad_featurelevel1_MASK		0xFF
+#define lpfc_scratchpad_featurelevel1_WORD		word0
+#define lpfc_scratchpad_featurelevel2_SHIFT		24
+#define lpfc_scratchpad_featurelevel2_MASK		0xFF
+#define lpfc_scratchpad_featurelevel2_WORD		word0
+
+/* BAR1 Registers */
+#define LPFC_IMR_MASK_ALL	0xFFFFFFFF
+#define LPFC_ISCR_CLEAR_ALL	0xFFFFFFFF
+
+#define LPFC_HST_ISR0		0x0C18
+#define LPFC_HST_ISR1		0x0C1C
+#define LPFC_HST_ISR2		0x0C20
+#define LPFC_HST_ISR3		0x0C24
+#define LPFC_HST_ISR4		0x0C28
+
+#define LPFC_HST_IMR0		0x0C48
+#define LPFC_HST_IMR1		0x0C4C
+#define LPFC_HST_IMR2		0x0C50
+#define LPFC_HST_IMR3		0x0C54
+#define LPFC_HST_IMR4		0x0C58
+
+#define LPFC_HST_ISCR0		0x0C78
+#define LPFC_HST_ISCR1		0x0C7C
+#define LPFC_HST_ISCR2		0x0C80
+#define LPFC_HST_ISCR3		0x0C84
+#define LPFC_HST_ISCR4		0x0C88
+
+#define LPFC_SLI4_INTR0			BIT0
+#define LPFC_SLI4_INTR1			BIT1
+#define LPFC_SLI4_INTR2			BIT2
+#define LPFC_SLI4_INTR3			BIT3
+#define LPFC_SLI4_INTR4			BIT4
+#define LPFC_SLI4_INTR5			BIT5
+#define LPFC_SLI4_INTR6			BIT6
+#define LPFC_SLI4_INTR7			BIT7
+#define LPFC_SLI4_INTR8			BIT8
+#define LPFC_SLI4_INTR9			BIT9
+#define LPFC_SLI4_INTR10		BIT10
+#define LPFC_SLI4_INTR11		BIT11
+#define LPFC_SLI4_INTR12		BIT12
+#define LPFC_SLI4_INTR13		BIT13
+#define LPFC_SLI4_INTR14		BIT14
+#define LPFC_SLI4_INTR15		BIT15
+#define LPFC_SLI4_INTR16		BIT16
+#define LPFC_SLI4_INTR17		BIT17
+#define LPFC_SLI4_INTR18		BIT18
+#define LPFC_SLI4_INTR19		BIT19
+#define LPFC_SLI4_INTR20		BIT20
+#define LPFC_SLI4_INTR21		BIT21
+#define LPFC_SLI4_INTR22		BIT22
+#define LPFC_SLI4_INTR23		BIT23
+#define LPFC_SLI4_INTR24		BIT24
+#define LPFC_SLI4_INTR25		BIT25
+#define LPFC_SLI4_INTR26		BIT26
+#define LPFC_SLI4_INTR27		BIT27
+#define LPFC_SLI4_INTR28		BIT28
+#define LPFC_SLI4_INTR29		BIT29
+#define LPFC_SLI4_INTR30		BIT30
+#define LPFC_SLI4_INTR31		BIT31
+
+/* BAR2 Registers */
+#define LPFC_RQ_DOORBELL		0x00A0
+#define lpfc_rq_doorbell_num_posted_SHIFT	16
+#define lpfc_rq_doorbell_num_posted_MASK	0x3FFF
+#define lpfc_rq_doorbell_num_posted_WORD	word0
+#define LPFC_RQ_POST_BATCH		8	/* RQEs to post at one time */
+#define lpfc_rq_doorbell_id_SHIFT		0
+#define lpfc_rq_doorbell_id_MASK		0x03FF
+#define lpfc_rq_doorbell_id_WORD		word0
+
+#define LPFC_WQ_DOORBELL		0x0040
+#define lpfc_wq_doorbell_num_posted_SHIFT	24
+#define lpfc_wq_doorbell_num_posted_MASK	0x00FF
+#define lpfc_wq_doorbell_num_posted_WORD	word0
+#define lpfc_wq_doorbell_index_SHIFT		16
+#define lpfc_wq_doorbell_index_MASK		0x00FF
+#define lpfc_wq_doorbell_index_WORD		word0
+#define lpfc_wq_doorbell_id_SHIFT		0
+#define lpfc_wq_doorbell_id_MASK		0xFFFF
+#define lpfc_wq_doorbell_id_WORD		word0
+
+#define LPFC_EQCQ_DOORBELL		0x0120
+#define lpfc_eqcq_doorbell_arm_SHIFT		29
+#define lpfc_eqcq_doorbell_arm_MASK		0x0001
+#define lpfc_eqcq_doorbell_arm_WORD		word0
+#define lpfc_eqcq_doorbell_num_released_SHIFT	16
+#define lpfc_eqcq_doorbell_num_released_MASK	0x1FFF
+#define lpfc_eqcq_doorbell_num_released_WORD	word0
+#define lpfc_eqcq_doorbell_qt_SHIFT		10
+#define lpfc_eqcq_doorbell_qt_MASK		0x0001
+#define lpfc_eqcq_doorbell_qt_WORD		word0
+#define LPFC_QUEUE_TYPE_COMPLETION	0
+#define LPFC_QUEUE_TYPE_EVENT		1
+#define lpfc_eqcq_doorbell_eqci_SHIFT		9
+#define lpfc_eqcq_doorbell_eqci_MASK		0x0001
+#define lpfc_eqcq_doorbell_eqci_WORD		word0
+#define lpfc_eqcq_doorbell_cqid_SHIFT		0
+#define lpfc_eqcq_doorbell_cqid_MASK		0x03FF
+#define lpfc_eqcq_doorbell_cqid_WORD		word0
+#define lpfc_eqcq_doorbell_eqid_SHIFT		0
+#define lpfc_eqcq_doorbell_eqid_MASK		0x01FF
+#define lpfc_eqcq_doorbell_eqid_WORD		word0
+
+#define LPFC_BMBX			0x0160
+#define lpfc_bmbx_addr_SHIFT		2
+#define lpfc_bmbx_addr_MASK		0x3FFFFFFF
+#define lpfc_bmbx_addr_WORD		word0
+#define lpfc_bmbx_hi_SHIFT		1
+#define lpfc_bmbx_hi_MASK		0x0001
+#define lpfc_bmbx_hi_WORD		word0
+#define lpfc_bmbx_rdy_SHIFT		0
+#define lpfc_bmbx_rdy_MASK		0x0001
+#define lpfc_bmbx_rdy_WORD		word0
+
+#define LPFC_MQ_DOORBELL			0x0140
+#define lpfc_mq_doorbell_num_posted_SHIFT	16
+#define lpfc_mq_doorbell_num_posted_MASK	0x3FFF
+#define lpfc_mq_doorbell_num_posted_WORD	word0
+#define lpfc_mq_doorbell_id_SHIFT		0
+#define lpfc_mq_doorbell_id_MASK		0x03FF
+#define lpfc_mq_doorbell_id_WORD		word0
+
+struct lpfc_sli4_cfg_mhdr {
+	uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT		0
+#define lpfc_mbox_hdr_emb_MASK		0x00000001
+#define lpfc_mbox_hdr_emb_WORD		word1
+#define lpfc_mbox_hdr_sge_cnt_SHIFT	3
+#define lpfc_mbox_hdr_sge_cnt_MASK	0x0000001F
+#define lpfc_mbox_hdr_sge_cnt_WORD	word1
+	uint32_t payload_length;
+	uint32_t tag_lo;
+	uint32_t tag_hi;
+	uint32_t reserved5;
+};
+
+union lpfc_sli4_cfg_shdr {
+	struct {
+		uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT		0
+#define lpfc_mbox_hdr_opcode_MASK		0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD		word6
+#define lpfc_mbox_hdr_subsystem_SHIFT		8
+#define lpfc_mbox_hdr_subsystem_MASK		0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD		word6
+#define lpfc_mbox_hdr_port_number_SHIFT		16
+#define lpfc_mbox_hdr_port_number_MASK		0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD		word6
+#define lpfc_mbox_hdr_domain_SHIFT		24
+#define lpfc_mbox_hdr_domain_MASK		0x000000FF
+#define lpfc_mbox_hdr_domain_WORD		word6
+		uint32_t timeout;
+		uint32_t request_length;
+		uint32_t reserved9;
+	} request;
+	struct {
+		uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT		0
+#define lpfc_mbox_hdr_opcode_MASK		0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD		word6
+#define lpfc_mbox_hdr_subsystem_SHIFT		8
+#define lpfc_mbox_hdr_subsystem_MASK		0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD		word6
+#define lpfc_mbox_hdr_domain_SHIFT		24
+#define lpfc_mbox_hdr_domain_MASK		0x000000FF
+#define lpfc_mbox_hdr_domain_WORD		word6
+		uint32_t word7;
+#define lpfc_mbox_hdr_status_SHIFT		0
+#define lpfc_mbox_hdr_status_MASK		0x000000FF
+#define lpfc_mbox_hdr_status_WORD		word7
+#define lpfc_mbox_hdr_add_status_SHIFT		8
+#define lpfc_mbox_hdr_add_status_MASK		0x000000FF
+#define lpfc_mbox_hdr_add_status_WORD		word7
+		uint32_t response_length;
+		uint32_t actual_response_length;
+	} response;
+};
+
+/* Mailbox structures */
+struct mbox_header {
+	struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+};
+
+/* Subsystem Definitions */
+#define LPFC_MBOX_SUBSYSTEM_COMMON	0x1
+#define LPFC_MBOX_SUBSYSTEM_FCOE	0xC
+
+/* Device Specific Definitions */
+
+/* The HOST ENDIAN defines are in Big Endian format. */
+#define HOST_ENDIAN_LOW_WORD0   0xFF3412FF
+#define HOST_ENDIAN_HIGH_WORD1	0xFF7856FF
+
+/* Common Opcodes */
+#define LPFC_MBOX_OPCODE_CQ_CREATE		0x0C
+#define LPFC_MBOX_OPCODE_EQ_CREATE		0x0D
+#define LPFC_MBOX_OPCODE_MQ_CREATE		0x15
+#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES	0x20
+#define LPFC_MBOX_OPCODE_NOP			0x21
+#define LPFC_MBOX_OPCODE_MQ_DESTROY		0x35
+#define LPFC_MBOX_OPCODE_CQ_DESTROY		0x36
+#define LPFC_MBOX_OPCODE_EQ_DESTROY		0x37
+#define LPFC_MBOX_OPCODE_FUNCTION_RESET		0x3D
+
+/* FCoE Opcodes */
+#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE			0x01
+#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY		0x02
+#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES		0x03
+#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES		0x04
+#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE			0x05
+#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY		0x06
+#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE		0x08
+#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF			0x09
+#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF		0x0A
+#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE		0x0B
+
+/* Mailbox command structures */
+struct eq_context {
+	uint32_t word0;
+#define lpfc_eq_context_size_SHIFT	31
+#define lpfc_eq_context_size_MASK	0x00000001
+#define lpfc_eq_context_size_WORD	word0
+#define LPFC_EQE_SIZE_4			0x0
+#define LPFC_EQE_SIZE_16		0x1
+#define lpfc_eq_context_valid_SHIFT	29
+#define lpfc_eq_context_valid_MASK	0x00000001
+#define lpfc_eq_context_valid_WORD	word0
+	uint32_t word1;
+#define lpfc_eq_context_count_SHIFT	26
+#define lpfc_eq_context_count_MASK	0x00000003
+#define lpfc_eq_context_count_WORD	word1
+#define LPFC_EQ_CNT_256		0x0
+#define LPFC_EQ_CNT_512		0x1
+#define LPFC_EQ_CNT_1024	0x2
+#define LPFC_EQ_CNT_2048	0x3
+#define LPFC_EQ_CNT_4096	0x4
+	uint32_t word2;
+#define lpfc_eq_context_delay_multi_SHIFT	13
+#define lpfc_eq_context_delay_multi_MASK	0x000003FF
+#define lpfc_eq_context_delay_multi_WORD	word2
+	uint32_t reserved3;
+};
+
+struct sgl_page_pairs {
+	uint32_t sgl_pg0_addr_lo;
+	uint32_t sgl_pg0_addr_hi;
+	uint32_t sgl_pg1_addr_lo;
+	uint32_t sgl_pg1_addr_hi;
+};
+
+struct lpfc_mbx_post_sgl_pages {
+	struct mbox_header header;
+	uint32_t word0;
+#define lpfc_post_sgl_pages_xri_SHIFT	0
+#define lpfc_post_sgl_pages_xri_MASK	0x0000FFFF
+#define lpfc_post_sgl_pages_xri_WORD	word0
+#define lpfc_post_sgl_pages_xricnt_SHIFT	16
+#define lpfc_post_sgl_pages_xricnt_MASK	0x0000FFFF
+#define lpfc_post_sgl_pages_xricnt_WORD	word0
+	struct sgl_page_pairs  sgl_pg_pairs[1];
+};
+
+/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
+struct lpfc_mbx_post_uembed_sgl_page1 {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word0;
+	struct sgl_page_pairs sgl_pg_pairs;
+};
+
+struct lpfc_mbx_sge {
+	uint32_t pa_lo;
+	uint32_t pa_hi;
+	uint32_t length;
+};
+
+struct lpfc_mbx_nembed_cmd {
+	struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+#define LPFC_SLI4_MBX_SGE_MAX_PAGES	19
+	struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_nembed_sge_virt {
+	void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_eq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_create_num_pages_SHIFT	0
+#define lpfc_mbx_eq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_eq_create_num_pages_WORD	word0
+			struct eq_context context;
+			struct dma_address page[LPFC_MAX_EQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_create_q_id_SHIFT	0
+#define lpfc_mbx_eq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_eq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_eq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_eq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_eq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_nop {
+	struct mbox_header header;
+	uint32_t context[2];
+};
+
+struct cq_context {
+	uint32_t word0;
+#define lpfc_cq_context_event_SHIFT	31
+#define lpfc_cq_context_event_MASK	0x00000001
+#define lpfc_cq_context_event_WORD	word0
+#define lpfc_cq_context_valid_SHIFT	29
+#define lpfc_cq_context_valid_MASK	0x00000001
+#define lpfc_cq_context_valid_WORD	word0
+#define lpfc_cq_context_count_SHIFT	27
+#define lpfc_cq_context_count_MASK	0x00000003
+#define lpfc_cq_context_count_WORD	word0
+#define LPFC_CQ_CNT_256		0x0
+#define LPFC_CQ_CNT_512		0x1
+#define LPFC_CQ_CNT_1024	0x2
+	uint32_t word1;
+#define lpfc_cq_eq_id_SHIFT		22
+#define lpfc_cq_eq_id_MASK		0x000000FF
+#define lpfc_cq_eq_id_WORD		word1
+	uint32_t reserved0;
+	uint32_t reserved1;
+};
+
+struct lpfc_mbx_cq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_num_pages_SHIFT	0
+#define lpfc_mbx_cq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_num_pages_WORD	word0
+			struct cq_context context;
+			struct dma_address page[LPFC_MAX_CQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_q_id_SHIFT	0
+#define lpfc_mbx_cq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_cq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_cq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_cq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct wq_context {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t reserved2;
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_wq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_create_num_pages_SHIFT	0
+#define lpfc_mbx_wq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_num_pages_WORD	word0
+#define lpfc_mbx_wq_create_cq_id_SHIFT		16
+#define lpfc_mbx_wq_create_cq_id_MASK		0x0000FFFF
+#define lpfc_mbx_wq_create_cq_id_WORD		word0
+			struct dma_address page[LPFC_MAX_WQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_create_q_id_SHIFT	0
+#define lpfc_mbx_wq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_wq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_wq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_wq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+#define LPFC_HDR_BUF_SIZE 128
+#define LPFC_DATA_BUF_SIZE 4096
+struct rq_context {
+	uint32_t word0;
+#define lpfc_rq_context_rq_size_SHIFT	16
+#define lpfc_rq_context_rq_size_MASK	0x0000000F
+#define lpfc_rq_context_rq_size_WORD	word0
+#define LPFC_RQ_RING_SIZE_512		9	/* 512 entries */
+#define LPFC_RQ_RING_SIZE_1024		10	/* 1024 entries */
+#define LPFC_RQ_RING_SIZE_2048		11	/* 2048 entries */
+#define LPFC_RQ_RING_SIZE_4096		12	/* 4096 entries */
+	uint32_t reserved1;
+	uint32_t word2;
+#define lpfc_rq_context_cq_id_SHIFT	16
+#define lpfc_rq_context_cq_id_MASK	0x000003FF
+#define lpfc_rq_context_cq_id_WORD	word2
+#define lpfc_rq_context_buf_size_SHIFT	0
+#define lpfc_rq_context_buf_size_MASK	0x0000FFFF
+#define lpfc_rq_context_buf_size_WORD	word2
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_rq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_num_pages_SHIFT	0
+#define lpfc_mbx_rq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_num_pages_WORD	word0
+			struct rq_context context;
+			struct dma_address page[LPFC_MAX_WQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_q_id_SHIFT	0
+#define lpfc_mbx_rq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_rq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_rq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_rq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct mq_context {
+	uint32_t word0;
+#define lpfc_mq_context_cq_id_SHIFT	22
+#define lpfc_mq_context_cq_id_MASK	0x000003FF
+#define lpfc_mq_context_cq_id_WORD	word0
+#define lpfc_mq_context_count_SHIFT	16
+#define lpfc_mq_context_count_MASK	0x0000000F
+#define lpfc_mq_context_count_WORD	word0
+#define LPFC_MQ_CNT_16		0x5
+#define LPFC_MQ_CNT_32		0x6
+#define LPFC_MQ_CNT_64		0x7
+#define LPFC_MQ_CNT_128		0x8
+	uint32_t word1;
+#define lpfc_mq_context_valid_SHIFT	31
+#define lpfc_mq_context_valid_MASK	0x00000001
+#define lpfc_mq_context_valid_WORD	word1
+	uint32_t reserved2;
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_mq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_num_pages_SHIFT	0
+#define lpfc_mbx_mq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_num_pages_WORD	word0
+			struct mq_context context;
+			struct dma_address page[LPFC_MAX_MQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT	0
+#define lpfc_mbx_mq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_mq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_mq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_post_hdr_tmpl {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT  0
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK   0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD   word10
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT   16
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK    0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD    word10
+	uint32_t rpi_paddr_lo;
+	uint32_t rpi_paddr_hi;
+};
+
+struct sli4_sge {	/* SLI-4 */
+	uint32_t addr_hi;
+	uint32_t addr_lo;
+
+	uint32_t word2;
+#define lpfc_sli4_sge_offset_SHIFT	0 /* Offset of buffer - Not used*/
+#define lpfc_sli4_sge_offset_MASK	0x00FFFFFF
+#define lpfc_sli4_sge_offset_WORD	word2
+#define lpfc_sli4_sge_last_SHIFT	31 /* Last SEG in the SGL sets
+						this  flag !! */
+#define lpfc_sli4_sge_last_MASK		0x00000001
+#define lpfc_sli4_sge_last_WORD		word2
+	uint32_t word3;
+#define lpfc_sli4_sge_len_SHIFT		0
+#define lpfc_sli4_sge_len_MASK		0x0001FFFF
+#define lpfc_sli4_sge_len_WORD		word3
+};
+
+struct fcf_record {
+	uint32_t max_rcv_size;
+	uint32_t fka_adv_period;
+	uint32_t fip_priority;
+	uint32_t word3;
+#define lpfc_fcf_record_mac_0_SHIFT		0
+#define lpfc_fcf_record_mac_0_MASK		0x000000FF
+#define lpfc_fcf_record_mac_0_WORD		word3
+#define lpfc_fcf_record_mac_1_SHIFT		8
+#define lpfc_fcf_record_mac_1_MASK		0x000000FF
+#define lpfc_fcf_record_mac_1_WORD		word3
+#define lpfc_fcf_record_mac_2_SHIFT		16
+#define lpfc_fcf_record_mac_2_MASK		0x000000FF
+#define lpfc_fcf_record_mac_2_WORD		word3
+#define lpfc_fcf_record_mac_3_SHIFT		24
+#define lpfc_fcf_record_mac_3_MASK		0x000000FF
+#define lpfc_fcf_record_mac_3_WORD		word3
+	uint32_t word4;
+#define lpfc_fcf_record_mac_4_SHIFT		0
+#define lpfc_fcf_record_mac_4_MASK		0x000000FF
+#define lpfc_fcf_record_mac_4_WORD		word4
+#define lpfc_fcf_record_mac_5_SHIFT		8
+#define lpfc_fcf_record_mac_5_MASK		0x000000FF
+#define lpfc_fcf_record_mac_5_WORD		word4
+#define lpfc_fcf_record_fcf_avail_SHIFT		16
+#define lpfc_fcf_record_fcf_avail_MASK		0x000000FF
+#define lpfc_fcf_record_fc_avail_WORD		word4
+#define lpfc_fcf_record_mac_addr_prov_SHIFT	24
+#define lpfc_fcf_record_mac_addr_prov_MASK	0x000000FF
+#define lpfc_fcf_record_mac_addr_prov_WORD	word4
+#define LPFC_FCF_FPMA           1 	/* Fabric Provided MAC Address */
+#define LPFC_FCF_SPMA           2       /* Server Provided MAC Address */
+	uint32_t word5;
+#define lpfc_fcf_record_fab_name_0_SHIFT	0
+#define lpfc_fcf_record_fab_name_0_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_0_WORD		word5
+#define lpfc_fcf_record_fab_name_1_SHIFT	8
+#define lpfc_fcf_record_fab_name_1_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_1_WORD		word5
+#define lpfc_fcf_record_fab_name_2_SHIFT	16
+#define lpfc_fcf_record_fab_name_2_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_2_WORD		word5
+#define lpfc_fcf_record_fab_name_3_SHIFT	24
+#define lpfc_fcf_record_fab_name_3_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_3_WORD		word5
+	uint32_t word6;
+#define lpfc_fcf_record_fab_name_4_SHIFT	0
+#define lpfc_fcf_record_fab_name_4_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_4_WORD		word6
+#define lpfc_fcf_record_fab_name_5_SHIFT	8
+#define lpfc_fcf_record_fab_name_5_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_5_WORD		word6
+#define lpfc_fcf_record_fab_name_6_SHIFT	16
+#define lpfc_fcf_record_fab_name_6_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_6_WORD		word6
+#define lpfc_fcf_record_fab_name_7_SHIFT	24
+#define lpfc_fcf_record_fab_name_7_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_7_WORD		word6
+	uint32_t word7;
+#define lpfc_fcf_record_fc_map_0_SHIFT		0
+#define lpfc_fcf_record_fc_map_0_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_0_WORD		word7
+#define lpfc_fcf_record_fc_map_1_SHIFT		8
+#define lpfc_fcf_record_fc_map_1_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_1_WORD		word7
+#define lpfc_fcf_record_fc_map_2_SHIFT		16
+#define lpfc_fcf_record_fc_map_2_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_2_WORD		word7
+#define lpfc_fcf_record_fcf_valid_SHIFT		24
+#define lpfc_fcf_record_fcf_valid_MASK		0x000000FF
+#define lpfc_fcf_record_fcf_valid_WORD		word7
+	uint32_t word8;
+#define lpfc_fcf_record_fcf_index_SHIFT		0
+#define lpfc_fcf_record_fcf_index_MASK		0x0000FFFF
+#define lpfc_fcf_record_fcf_index_WORD		word8
+#define lpfc_fcf_record_fcf_state_SHIFT		16
+#define lpfc_fcf_record_fcf_state_MASK		0x0000FFFF
+#define lpfc_fcf_record_fcf_state_WORD		word8
+	uint8_t vlan_bitmap[512];
+};
+
+struct lpfc_mbx_read_fcf_tbl {
+	union lpfc_sli4_cfg_shdr cfg_shdr;
+	union {
+		struct {
+			uint32_t word10;
+#define lpfc_mbx_read_fcf_tbl_indx_SHIFT	0
+#define lpfc_mbx_read_fcf_tbl_indx_MASK		0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_indx_WORD		word10
+		} request;
+		struct {
+			uint32_t eventag;
+		} response;
+	} u;
+	uint32_t word11;
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT	0
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK	0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD	word11
+};
+
+struct lpfc_mbx_add_fcf_tbl_entry {
+	union lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word10;
+#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT        0
+#define lpfc_mbx_add_fcf_tbl_fcfi_MASK         0x0000FFFF
+#define lpfc_mbx_add_fcf_tbl_fcfi_WORD         word10
+	struct lpfc_mbx_sge fcf_sge;
+};
+
+struct lpfc_mbx_del_fcf_tbl_entry {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_del_fcf_tbl_count_SHIFT	0
+#define lpfc_mbx_del_fcf_tbl_count_MASK		0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_count_WORD		word10
+#define lpfc_mbx_del_fcf_tbl_index_SHIFT	16
+#define lpfc_mbx_del_fcf_tbl_index_MASK		0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_index_WORD		word10
+};
+
+/* Status field for embedded SLI_CONFIG mailbox command */
+#define STATUS_SUCCESS					0x0
+#define STATUS_FAILED 					0x1
+#define STATUS_ILLEGAL_REQUEST				0x2
+#define STATUS_ILLEGAL_FIELD				0x3
+#define STATUS_INSUFFICIENT_BUFFER 			0x4
+#define STATUS_UNAUTHORIZED_REQUEST			0x5
+#define STATUS_FLASHROM_SAVE_FAILED			0x17
+#define STATUS_FLASHROM_RESTORE_FAILED			0x18
+#define STATUS_ICCBINDEX_ALLOC_FAILED			0x1a
+#define STATUS_IOCTLHANDLE_ALLOC_FAILED 		0x1b
+#define STATUS_INVALID_PHY_ADDR_FROM_OSM		0x1c
+#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM		0x1d
+#define STATUS_ASSERT_FAILED				0x1e
+#define STATUS_INVALID_SESSION				0x1f
+#define STATUS_INVALID_CONNECTION			0x20
+#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT		0x21
+#define STATUS_BTL_NO_FREE_SLOT_PATH			0x24
+#define STATUS_BTL_NO_FREE_SLOT_TGTID			0x25
+#define STATUS_OSM_DEVSLOT_NOT_FOUND			0x26
+#define STATUS_FLASHROM_READ_FAILED			0x27
+#define STATUS_POLL_IOCTL_TIMEOUT			0x28
+#define STATUS_ERROR_ACITMAIN				0x2a
+#define STATUS_REBOOT_REQUIRED				0x2c
+#define STATUS_FCF_IN_USE				0x3a
+
+struct lpfc_mbx_sli4_config {
+	struct mbox_header header;
+};
+
+struct lpfc_mbx_init_vfi {
+	uint32_t word1;
+#define lpfc_init_vfi_vr_SHIFT		31
+#define lpfc_init_vfi_vr_MASK		0x00000001
+#define lpfc_init_vfi_vr_WORD		word1
+#define lpfc_init_vfi_vt_SHIFT		30
+#define lpfc_init_vfi_vt_MASK		0x00000001
+#define lpfc_init_vfi_vt_WORD		word1
+#define lpfc_init_vfi_vf_SHIFT		29
+#define lpfc_init_vfi_vf_MASK		0x00000001
+#define lpfc_init_vfi_vf_WORD		word1
+#define lpfc_init_vfi_vfi_SHIFT		0
+#define lpfc_init_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_init_vfi_vfi_WORD		word1
+	uint32_t word2;
+#define lpfc_init_vfi_fcfi_SHIFT	0
+#define lpfc_init_vfi_fcfi_MASK		0x0000FFFF
+#define lpfc_init_vfi_fcfi_WORD		word2
+	uint32_t word3;
+#define lpfc_init_vfi_pri_SHIFT		13
+#define lpfc_init_vfi_pri_MASK		0x00000007
+#define lpfc_init_vfi_pri_WORD		word3
+#define lpfc_init_vfi_vf_id_SHIFT	1
+#define lpfc_init_vfi_vf_id_MASK	0x00000FFF
+#define lpfc_init_vfi_vf_id_WORD	word3
+	uint32_t word4;
+#define lpfc_init_vfi_hop_count_SHIFT	24
+#define lpfc_init_vfi_hop_count_MASK	0x000000FF
+#define lpfc_init_vfi_hop_count_WORD	word4
+};
+
+struct lpfc_mbx_reg_vfi {
+	uint32_t word1;
+#define lpfc_reg_vfi_vp_SHIFT		28
+#define lpfc_reg_vfi_vp_MASK		0x00000001
+#define lpfc_reg_vfi_vp_WORD		word1
+#define lpfc_reg_vfi_vfi_SHIFT		0
+#define lpfc_reg_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_vfi_WORD		word1
+	uint32_t word2;
+#define lpfc_reg_vfi_vpi_SHIFT		16
+#define lpfc_reg_vfi_vpi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_vpi_WORD		word2
+#define lpfc_reg_vfi_fcfi_SHIFT		0
+#define lpfc_reg_vfi_fcfi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_fcfi_WORD		word2
+	uint32_t word3_rsvd;
+	uint32_t word4_rsvd;
+	struct ulp_bde64 bde;
+	uint32_t word8_rsvd;
+	uint32_t word9_rsvd;
+	uint32_t word10;
+#define lpfc_reg_vfi_nport_id_SHIFT		0
+#define lpfc_reg_vfi_nport_id_MASK		0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD		word10
+};
+
+struct lpfc_mbx_init_vpi {
+	uint32_t word1;
+#define lpfc_init_vpi_vfi_SHIFT		16
+#define lpfc_init_vpi_vfi_MASK		0x0000FFFF
+#define lpfc_init_vpi_vfi_WORD		word1
+#define lpfc_init_vpi_vpi_SHIFT		0
+#define lpfc_init_vpi_vpi_MASK		0x0000FFFF
+#define lpfc_init_vpi_vpi_WORD		word1
+};
+
+struct lpfc_mbx_read_vpi {
+	uint32_t word1_rsvd;
+	uint32_t word2;
+#define lpfc_mbx_read_vpi_vnportid_SHIFT	0
+#define lpfc_mbx_read_vpi_vnportid_MASK		0x00FFFFFF
+#define lpfc_mbx_read_vpi_vnportid_WORD		word2
+	uint32_t word3_rsvd;
+	uint32_t word4;
+#define lpfc_mbx_read_vpi_acq_alpa_SHIFT	0
+#define lpfc_mbx_read_vpi_acq_alpa_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_acq_alpa_WORD		word4
+#define lpfc_mbx_read_vpi_pb_SHIFT		15
+#define lpfc_mbx_read_vpi_pb_MASK		0x00000001
+#define lpfc_mbx_read_vpi_pb_WORD		word4
+#define lpfc_mbx_read_vpi_spec_alpa_SHIFT	16
+#define lpfc_mbx_read_vpi_spec_alpa_MASK	0x000000FF
+#define lpfc_mbx_read_vpi_spec_alpa_WORD	word4
+#define lpfc_mbx_read_vpi_ns_SHIFT		30
+#define lpfc_mbx_read_vpi_ns_MASK		0x00000001
+#define lpfc_mbx_read_vpi_ns_WORD		word4
+#define lpfc_mbx_read_vpi_hl_SHIFT		31
+#define lpfc_mbx_read_vpi_hl_MASK		0x00000001
+#define lpfc_mbx_read_vpi_hl_WORD		word4
+	uint32_t word5_rsvd;
+	uint32_t word6;
+#define lpfc_mbx_read_vpi_vpi_SHIFT		0
+#define lpfc_mbx_read_vpi_vpi_MASK		0x0000FFFF
+#define lpfc_mbx_read_vpi_vpi_WORD		word6
+	uint32_t word7;
+#define lpfc_mbx_read_vpi_mac_0_SHIFT		0
+#define lpfc_mbx_read_vpi_mac_0_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_0_WORD		word7
+#define lpfc_mbx_read_vpi_mac_1_SHIFT		8
+#define lpfc_mbx_read_vpi_mac_1_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_1_WORD		word7
+#define lpfc_mbx_read_vpi_mac_2_SHIFT		16
+#define lpfc_mbx_read_vpi_mac_2_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_2_WORD		word7
+#define lpfc_mbx_read_vpi_mac_3_SHIFT		24
+#define lpfc_mbx_read_vpi_mac_3_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_3_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_read_vpi_mac_4_SHIFT		0
+#define lpfc_mbx_read_vpi_mac_4_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_4_WORD		word8
+#define lpfc_mbx_read_vpi_mac_5_SHIFT		8
+#define lpfc_mbx_read_vpi_mac_5_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_5_WORD		word8
+#define lpfc_mbx_read_vpi_vlan_tag_SHIFT	16
+#define lpfc_mbx_read_vpi_vlan_tag_MASK		0x00000FFF
+#define lpfc_mbx_read_vpi_vlan_tag_WORD		word8
+#define lpfc_mbx_read_vpi_vv_SHIFT		28
+#define lpfc_mbx_read_vpi_vv_MASK		0x0000001
+#define lpfc_mbx_read_vpi_vv_WORD		word8
+};
+
+struct lpfc_mbx_unreg_vfi {
+	uint32_t word1_rsvd;
+	uint32_t word2;
+#define lpfc_unreg_vfi_vfi_SHIFT	0
+#define lpfc_unreg_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_unreg_vfi_vfi_WORD		word2
+};
+
+struct lpfc_mbx_resume_rpi {
+	uint32_t word1;
+#define lpfc_resume_rpi_rpi_SHIFT	0
+#define lpfc_resume_rpi_rpi_MASK	0x0000FFFF
+#define lpfc_resume_rpi_rpi_WORD	word1
+	uint32_t event_tag;
+	uint32_t word3_rsvd;
+	uint32_t word4_rsvd;
+	uint32_t word5_rsvd;
+	uint32_t word6;
+#define lpfc_resume_rpi_vpi_SHIFT	0
+#define lpfc_resume_rpi_vpi_MASK	0x0000FFFF
+#define lpfc_resume_rpi_vpi_WORD	word6
+#define lpfc_resume_rpi_vfi_SHIFT	16
+#define lpfc_resume_rpi_vfi_MASK	0x0000FFFF
+#define lpfc_resume_rpi_vfi_WORD	word6
+};
+
+#define REG_FCF_INVALID_QID	0xFFFF
+struct lpfc_mbx_reg_fcfi {
+	uint32_t word1;
+#define lpfc_reg_fcfi_info_index_SHIFT	0
+#define lpfc_reg_fcfi_info_index_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_info_index_WORD	word1
+#define lpfc_reg_fcfi_fcfi_SHIFT	16
+#define lpfc_reg_fcfi_fcfi_MASK		0x0000FFFF
+#define lpfc_reg_fcfi_fcfi_WORD		word1
+	uint32_t word2;
+#define lpfc_reg_fcfi_rq_id1_SHIFT	0
+#define lpfc_reg_fcfi_rq_id1_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id1_WORD	word2
+#define lpfc_reg_fcfi_rq_id0_SHIFT	16
+#define lpfc_reg_fcfi_rq_id0_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id0_WORD	word2
+	uint32_t word3;
+#define lpfc_reg_fcfi_rq_id3_SHIFT	0
+#define lpfc_reg_fcfi_rq_id3_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id3_WORD	word3
+#define lpfc_reg_fcfi_rq_id2_SHIFT	16
+#define lpfc_reg_fcfi_rq_id2_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id2_WORD	word3
+	uint32_t word4;
+#define lpfc_reg_fcfi_type_match0_SHIFT	24
+#define lpfc_reg_fcfi_type_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match0_WORD	word4
+#define lpfc_reg_fcfi_type_mask0_SHIFT	16
+#define lpfc_reg_fcfi_type_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask0_WORD	word4
+#define lpfc_reg_fcfi_rctl_match0_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match0_WORD	word4
+#define lpfc_reg_fcfi_rctl_mask0_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask0_WORD	word4
+	uint32_t word5;
+#define lpfc_reg_fcfi_type_match1_SHIFT	24
+#define lpfc_reg_fcfi_type_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match1_WORD	word5
+#define lpfc_reg_fcfi_type_mask1_SHIFT	16
+#define lpfc_reg_fcfi_type_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask1_WORD	word5
+#define lpfc_reg_fcfi_rctl_match1_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match1_WORD	word5
+#define lpfc_reg_fcfi_rctl_mask1_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask1_WORD	word5
+	uint32_t word6;
+#define lpfc_reg_fcfi_type_match2_SHIFT	24
+#define lpfc_reg_fcfi_type_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match2_WORD	word6
+#define lpfc_reg_fcfi_type_mask2_SHIFT	16
+#define lpfc_reg_fcfi_type_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask2_WORD	word6
+#define lpfc_reg_fcfi_rctl_match2_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match2_WORD	word6
+#define lpfc_reg_fcfi_rctl_mask2_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask2_WORD	word6
+	uint32_t word7;
+#define lpfc_reg_fcfi_type_match3_SHIFT	24
+#define lpfc_reg_fcfi_type_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match3_WORD	word7
+#define lpfc_reg_fcfi_type_mask3_SHIFT	16
+#define lpfc_reg_fcfi_type_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask3_WORD	word7
+#define lpfc_reg_fcfi_rctl_match3_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match3_WORD	word7
+#define lpfc_reg_fcfi_rctl_mask3_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask3_WORD	word7
+	uint32_t word8;
+#define lpfc_reg_fcfi_mam_SHIFT		13
+#define lpfc_reg_fcfi_mam_MASK		0x00000003
+#define lpfc_reg_fcfi_mam_WORD		word8
+#define LPFC_MAM_BOTH		0	/* Both SPMA and FPMA */
+#define LPFC_MAM_SPMA		1	/* Server Provided MAC Address */
+#define LPFC_MAM_FPMA		2	/* Fabric Provided MAC Address */
+#define lpfc_reg_fcfi_vv_SHIFT		12
+#define lpfc_reg_fcfi_vv_MASK		0x00000001
+#define lpfc_reg_fcfi_vv_WORD		word8
+#define lpfc_reg_fcfi_vlan_tag_SHIFT	0
+#define lpfc_reg_fcfi_vlan_tag_MASK	0x00000FFF
+#define lpfc_reg_fcfi_vlan_tag_WORD	word8
+};
+
+struct lpfc_mbx_unreg_fcfi {
+	uint32_t word1_rsv;
+	uint32_t word2;
+#define lpfc_unreg_fcfi_SHIFT		0
+#define lpfc_unreg_fcfi_MASK		0x0000FFFF
+#define lpfc_unreg_fcfi_WORD		word2
+};
+
+struct lpfc_mbx_read_rev {
+	uint32_t word1;
+#define lpfc_mbx_rd_rev_sli_lvl_SHIFT  		16
+#define lpfc_mbx_rd_rev_sli_lvl_MASK   		0x0000000F
+#define lpfc_mbx_rd_rev_sli_lvl_WORD   		word1
+#define lpfc_mbx_rd_rev_fcoe_SHIFT		20
+#define lpfc_mbx_rd_rev_fcoe_MASK		0x00000001
+#define lpfc_mbx_rd_rev_fcoe_WORD		word1
+#define lpfc_mbx_rd_rev_vpd_SHIFT		29
+#define lpfc_mbx_rd_rev_vpd_MASK		0x00000001
+#define lpfc_mbx_rd_rev_vpd_WORD		word1
+	uint32_t first_hw_rev;
+	uint32_t second_hw_rev;
+	uint32_t word4_rsvd;
+	uint32_t third_hw_rev;
+	uint32_t word6;
+#define lpfc_mbx_rd_rev_fcph_low_SHIFT		0
+#define lpfc_mbx_rd_rev_fcph_low_MASK		0x000000FF
+#define lpfc_mbx_rd_rev_fcph_low_WORD		word6
+#define lpfc_mbx_rd_rev_fcph_high_SHIFT		8
+#define lpfc_mbx_rd_rev_fcph_high_MASK		0x000000FF
+#define lpfc_mbx_rd_rev_fcph_high_WORD		word6
+#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT	16
+#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK	0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD	word6
+#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT	24
+#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK	0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD	word6
+	uint32_t word7_rsvd;
+	uint32_t fw_id_rev;
+	uint8_t  fw_name[16];
+	uint32_t ulp_fw_id_rev;
+	uint8_t  ulp_fw_name[16];
+	uint32_t word18_47_rsvd[30];
+	uint32_t word48;
+#define lpfc_mbx_rd_rev_avail_len_SHIFT		0
+#define lpfc_mbx_rd_rev_avail_len_MASK		0x00FFFFFF
+#define lpfc_mbx_rd_rev_avail_len_WORD		word48
+	uint32_t vpd_paddr_low;
+	uint32_t vpd_paddr_high;
+	uint32_t avail_vpd_len;
+	uint32_t rsvd_52_63[12];
+};
+
+struct lpfc_mbx_read_config {
+	uint32_t word1;
+#define lpfc_mbx_rd_conf_max_bbc_SHIFT		0
+#define lpfc_mbx_rd_conf_max_bbc_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_max_bbc_WORD		word1
+#define lpfc_mbx_rd_conf_init_bbc_SHIFT		8
+#define lpfc_mbx_rd_conf_init_bbc_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_init_bbc_WORD		word1
+	uint32_t word2;
+#define lpfc_mbx_rd_conf_nport_did_SHIFT	0
+#define lpfc_mbx_rd_conf_nport_did_MASK		0x00FFFFFF
+#define lpfc_mbx_rd_conf_nport_did_WORD		word2
+#define lpfc_mbx_rd_conf_topology_SHIFT		24
+#define lpfc_mbx_rd_conf_topology_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_topology_WORD		word2
+	uint32_t word3;
+#define lpfc_mbx_rd_conf_ao_SHIFT		0
+#define lpfc_mbx_rd_conf_ao_MASK		0x00000001
+#define lpfc_mbx_rd_conf_ao_WORD		word3
+#define lpfc_mbx_rd_conf_bb_scn_SHIFT		8
+#define lpfc_mbx_rd_conf_bb_scn_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bb_scn_WORD		word3
+#define lpfc_mbx_rd_conf_cbb_scn_SHIFT		12
+#define lpfc_mbx_rd_conf_cbb_scn_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_cbb_scn_WORD		word3
+#define lpfc_mbx_rd_conf_mc_SHIFT		29
+#define lpfc_mbx_rd_conf_mc_MASK		0x00000001
+#define lpfc_mbx_rd_conf_mc_WORD		word3
+	uint32_t word4;
+#define lpfc_mbx_rd_conf_e_d_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_e_d_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_e_d_tov_WORD		word4
+	uint32_t word5;
+#define lpfc_mbx_rd_conf_lp_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_lp_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_lp_tov_WORD		word5
+	uint32_t word6;
+#define lpfc_mbx_rd_conf_r_a_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_r_a_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_r_a_tov_WORD		word6
+	uint32_t word7;
+#define lpfc_mbx_rd_conf_r_t_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_r_t_tov_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_r_t_tov_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_rd_conf_al_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_al_tov_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_al_tov_WORD		word8
+	uint32_t word9;
+#define lpfc_mbx_rd_conf_lmt_SHIFT		0
+#define lpfc_mbx_rd_conf_lmt_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_lmt_WORD		word9
+	uint32_t word10;
+#define lpfc_mbx_rd_conf_max_alpa_SHIFT		0
+#define lpfc_mbx_rd_conf_max_alpa_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_max_alpa_WORD		word10
+	uint32_t word11_rsvd;
+	uint32_t word12;
+#define lpfc_mbx_rd_conf_xri_base_SHIFT		0
+#define lpfc_mbx_rd_conf_xri_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_base_WORD		word12
+#define lpfc_mbx_rd_conf_xri_count_SHIFT	16
+#define lpfc_mbx_rd_conf_xri_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_count_WORD		word12
+	uint32_t word13;
+#define lpfc_mbx_rd_conf_rpi_base_SHIFT		0
+#define lpfc_mbx_rd_conf_rpi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_base_WORD		word13
+#define lpfc_mbx_rd_conf_rpi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_rpi_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_count_WORD		word13
+	uint32_t word14;
+#define lpfc_mbx_rd_conf_vpi_base_SHIFT		0
+#define lpfc_mbx_rd_conf_vpi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_base_WORD		word14
+#define lpfc_mbx_rd_conf_vpi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_vpi_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_count_WORD		word14
+	uint32_t word15;
+#define lpfc_mbx_rd_conf_vfi_base_SHIFT         0
+#define lpfc_mbx_rd_conf_vfi_base_MASK          0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_base_WORD          word15
+#define lpfc_mbx_rd_conf_vfi_count_SHIFT        16
+#define lpfc_mbx_rd_conf_vfi_count_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_count_WORD         word15
+	uint32_t word16;
+#define lpfc_mbx_rd_conf_fcfi_base_SHIFT	0
+#define lpfc_mbx_rd_conf_fcfi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_base_WORD		word16
+#define lpfc_mbx_rd_conf_fcfi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_fcfi_count_MASK	0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_count_WORD	word16
+	uint32_t word17;
+#define lpfc_mbx_rd_conf_rq_count_SHIFT		0
+#define lpfc_mbx_rd_conf_rq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rq_count_WORD		word17
+#define lpfc_mbx_rd_conf_eq_count_SHIFT		16
+#define lpfc_mbx_rd_conf_eq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_eq_count_WORD		word17
+	uint32_t word18;
+#define lpfc_mbx_rd_conf_wq_count_SHIFT		0
+#define lpfc_mbx_rd_conf_wq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_wq_count_WORD		word18
+#define lpfc_mbx_rd_conf_cq_count_SHIFT		16
+#define lpfc_mbx_rd_conf_cq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_cq_count_WORD		word18
+};
+
+struct lpfc_mbx_request_features {
+	uint32_t word1;
+#define lpfc_mbx_rq_ftr_qry_SHIFT		0
+#define lpfc_mbx_rq_ftr_qry_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_qry_WORD		word1
+	uint32_t word2;
+#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT		0
+#define lpfc_mbx_rq_ftr_rq_iaab_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_iaab_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT		1
+#define lpfc_mbx_rq_ftr_rq_npiv_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_npiv_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_dif_SHIFT		2
+#define lpfc_mbx_rq_ftr_rq_dif_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_dif_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_vf_SHIFT		3
+#define lpfc_mbx_rq_ftr_rq_vf_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_vf_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT		4
+#define lpfc_mbx_rq_ftr_rq_fcpi_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpi_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT		5
+#define lpfc_mbx_rq_ftr_rq_fcpt_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpt_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT		6
+#define lpfc_mbx_rq_ftr_rq_fcpc_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpc_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT		7
+#define lpfc_mbx_rq_ftr_rq_ifip_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_ifip_WORD		word2
+	uint32_t word3;
+#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT		0
+#define lpfc_mbx_rq_ftr_rsp_iaab_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_iaab_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT		1
+#define lpfc_mbx_rq_ftr_rsp_npiv_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_npiv_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT		2
+#define lpfc_mbx_rq_ftr_rsp_dif_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_dif_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT		3
+#define lpfc_mbx_rq_ftr_rsp_vf__MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_vf_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT		4
+#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT		5
+#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT		6
+#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT		7
+#define lpfc_mbx_rq_ftr_rsp_ifip_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ifip_WORD		word3
+};
+
+/* Mailbox Completion Queue Error Messages */
+#define MB_CQE_STATUS_SUCCESS 			0x0
+#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES	0x1
+#define MB_CQE_STATUS_INVALID_PARAMETER		0x2
+#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES	0x3
+#define MB_CEQ_STATUS_QUEUE_FLUSHING		0x4
+#define MB_CQE_STATUS_DMA_FAILED		0x5
+
+/* mailbox queue entry structure */
+struct lpfc_mqe {
+	uint32_t word0;
+#define lpfc_mqe_status_SHIFT		16
+#define lpfc_mqe_status_MASK		0x0000FFFF
+#define lpfc_mqe_status_WORD		word0
+#define lpfc_mqe_command_SHIFT		8
+#define lpfc_mqe_command_MASK		0x000000FF
+#define lpfc_mqe_command_WORD		word0
+	union {
+		uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
+		/* sli4 mailbox commands */
+		struct lpfc_mbx_sli4_config sli4_config;
+		struct lpfc_mbx_init_vfi init_vfi;
+		struct lpfc_mbx_reg_vfi reg_vfi;
+		struct lpfc_mbx_reg_vfi unreg_vfi;
+		struct lpfc_mbx_init_vpi init_vpi;
+		struct lpfc_mbx_resume_rpi resume_rpi;
+		struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
+		struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
+		struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
+		struct lpfc_mbx_reg_fcfi reg_fcfi;
+		struct lpfc_mbx_unreg_fcfi unreg_fcfi;
+		struct lpfc_mbx_mq_create mq_create;
+		struct lpfc_mbx_eq_create eq_create;
+		struct lpfc_mbx_cq_create cq_create;
+		struct lpfc_mbx_wq_create wq_create;
+		struct lpfc_mbx_rq_create rq_create;
+		struct lpfc_mbx_mq_destroy mq_destroy;
+		struct lpfc_mbx_eq_destroy eq_destroy;
+		struct lpfc_mbx_cq_destroy cq_destroy;
+		struct lpfc_mbx_wq_destroy wq_destroy;
+		struct lpfc_mbx_rq_destroy rq_destroy;
+		struct lpfc_mbx_post_sgl_pages post_sgl_pages;
+		struct lpfc_mbx_nembed_cmd nembed_cmd;
+		struct lpfc_mbx_read_rev read_rev;
+		struct lpfc_mbx_read_vpi read_vpi;
+		struct lpfc_mbx_read_config rd_config;
+		struct lpfc_mbx_request_features req_ftrs;
+		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+		struct lpfc_mbx_nop nop;
+	} un;
+};
+
+struct lpfc_mcqe {
+	uint32_t word0;
+#define lpfc_mcqe_status_SHIFT		0
+#define lpfc_mcqe_status_MASK		0x0000FFFF
+#define lpfc_mcqe_status_WORD		word0
+#define lpfc_mcqe_ext_status_SHIFT	16
+#define lpfc_mcqe_ext_status_MASK  	0x0000FFFF
+#define lpfc_mcqe_ext_status_WORD 	word0
+	uint32_t mcqe_tag0;
+	uint32_t mcqe_tag1;
+	uint32_t trailer;
+#define lpfc_trailer_valid_SHIFT	31
+#define lpfc_trailer_valid_MASK		0x00000001
+#define lpfc_trailer_valid_WORD		trailer
+#define lpfc_trailer_async_SHIFT	30
+#define lpfc_trailer_async_MASK		0x00000001
+#define lpfc_trailer_async_WORD		trailer
+#define lpfc_trailer_hpi_SHIFT		29
+#define lpfc_trailer_hpi_MASK		0x00000001
+#define lpfc_trailer_hpi_WORD		trailer
+#define lpfc_trailer_completed_SHIFT	28
+#define lpfc_trailer_completed_MASK	0x00000001
+#define lpfc_trailer_completed_WORD	trailer
+#define lpfc_trailer_consumed_SHIFT	27
+#define lpfc_trailer_consumed_MASK	0x00000001
+#define lpfc_trailer_consumed_WORD	trailer
+#define lpfc_trailer_type_SHIFT		16
+#define lpfc_trailer_type_MASK		0x000000FF
+#define lpfc_trailer_type_WORD		trailer
+#define lpfc_trailer_code_SHIFT		8
+#define lpfc_trailer_code_MASK		0x000000FF
+#define lpfc_trailer_code_WORD		trailer
+#define LPFC_TRAILER_CODE_LINK	0x1
+#define LPFC_TRAILER_CODE_FCOE	0x2
+#define LPFC_TRAILER_CODE_DCBX	0x3
+};
+
+struct lpfc_acqe_link {
+	uint32_t word0;
+#define lpfc_acqe_link_speed_SHIFT		24
+#define lpfc_acqe_link_speed_MASK		0x000000FF
+#define lpfc_acqe_link_speed_WORD		word0
+#define LPFC_ASYNC_LINK_SPEED_ZERO		0x0
+#define LPFC_ASYNC_LINK_SPEED_10MBPS		0x1
+#define LPFC_ASYNC_LINK_SPEED_100MBPS		0x2
+#define LPFC_ASYNC_LINK_SPEED_1GBPS		0x3
+#define LPFC_ASYNC_LINK_SPEED_10GBPS		0x4
+#define lpfc_acqe_link_duplex_SHIFT		16
+#define lpfc_acqe_link_duplex_MASK		0x000000FF
+#define lpfc_acqe_link_duplex_WORD		word0
+#define LPFC_ASYNC_LINK_DUPLEX_NONE		0x0
+#define LPFC_ASYNC_LINK_DUPLEX_HALF		0x1
+#define LPFC_ASYNC_LINK_DUPLEX_FULL		0x2
+#define lpfc_acqe_link_status_SHIFT		8
+#define lpfc_acqe_link_status_MASK		0x000000FF
+#define lpfc_acqe_link_status_WORD		word0
+#define LPFC_ASYNC_LINK_STATUS_DOWN		0x0
+#define LPFC_ASYNC_LINK_STATUS_UP		0x1
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN	0x2
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP	0x3
+#define lpfc_acqe_link_physical_SHIFT		0
+#define lpfc_acqe_link_physical_MASK		0x000000FF
+#define lpfc_acqe_link_physical_WORD		word0
+#define LPFC_ASYNC_LINK_PORT_A			0x0
+#define LPFC_ASYNC_LINK_PORT_B			0x1
+	uint32_t word1;
+#define lpfc_acqe_link_fault_SHIFT	0
+#define lpfc_acqe_link_fault_MASK	0x000000FF
+#define lpfc_acqe_link_fault_WORD	word1
+#define LPFC_ASYNC_LINK_FAULT_NONE	0x0
+#define LPFC_ASYNC_LINK_FAULT_LOCAL	0x1
+#define LPFC_ASYNC_LINK_FAULT_REMOTE	0x2
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+struct lpfc_acqe_fcoe {
+	uint32_t fcf_index;
+	uint32_t word1;
+#define lpfc_acqe_fcoe_fcf_count_SHIFT		0
+#define lpfc_acqe_fcoe_fcf_count_MASK		0x0000FFFF
+#define lpfc_acqe_fcoe_fcf_count_WORD		word1
+#define lpfc_acqe_fcoe_event_type_SHIFT		16
+#define lpfc_acqe_fcoe_event_type_MASK		0x0000FFFF
+#define lpfc_acqe_fcoe_event_type_WORD		word1
+#define LPFC_FCOE_EVENT_TYPE_NEW_FCF		0x1
+#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL	0x2
+#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD		0x3
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+struct lpfc_acqe_dcbx {
+	uint32_t tlv_ttl;
+	uint32_t reserved;
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+/*
+ * Define the bootstrap mailbox (bmbx) region used to communicate
+ * mailbox command between the host and port. The mailbox consists
+ * of a payload area of 256 bytes and a completion queue of length
+ * 16 bytes.
+ */
+struct lpfc_bmbx_create {
+	struct lpfc_mqe mqe;
+	struct lpfc_mcqe mcqe;
+};
+
+#define SGL_ALIGN_SZ 64
+#define SGL_PAGE_SIZE 4096
+/* align SGL addr on a size boundary - adjust address up */
+#define NO_XRI ((uint16_t)-1)
+struct wqe_common {
+	uint32_t word6;
+#define wqe_xri_SHIFT         0
+#define wqe_xri_MASK          0x0000FFFF
+#define wqe_xri_WORD          word6
+#define wqe_ctxt_tag_SHIFT    16
+#define wqe_ctxt_tag_MASK     0x0000FFFF
+#define wqe_ctxt_tag_WORD     word6
+	uint32_t word7;
+#define wqe_ct_SHIFT          2
+#define wqe_ct_MASK           0x00000003
+#define wqe_ct_WORD           word7
+#define wqe_status_SHIFT      4
+#define wqe_status_MASK       0x0000000f
+#define wqe_status_WORD       word7
+#define wqe_cmnd_SHIFT        8
+#define wqe_cmnd_MASK         0x000000ff
+#define wqe_cmnd_WORD         word7
+#define wqe_class_SHIFT       16
+#define wqe_class_MASK        0x00000007
+#define wqe_class_WORD        word7
+#define wqe_pu_SHIFT          20
+#define wqe_pu_MASK           0x00000003
+#define wqe_pu_WORD           word7
+#define wqe_erp_SHIFT         22
+#define wqe_erp_MASK          0x00000001
+#define wqe_erp_WORD          word7
+#define wqe_lnk_SHIFT         23
+#define wqe_lnk_MASK          0x00000001
+#define wqe_lnk_WORD          word7
+#define wqe_tmo_SHIFT         24
+#define wqe_tmo_MASK          0x000000ff
+#define wqe_tmo_WORD          word7
+	uint32_t abort_tag; /* word 8 in WQE */
+	uint32_t word9;
+#define wqe_reqtag_SHIFT      0
+#define wqe_reqtag_MASK       0x0000FFFF
+#define wqe_reqtag_WORD       word9
+#define wqe_rcvoxid_SHIFT     16
+#define wqe_rcvoxid_MASK       0x0000FFFF
+#define wqe_rcvoxid_WORD       word9
+	uint32_t word10;
+#define wqe_pri_SHIFT         16
+#define wqe_pri_MASK          0x00000007
+#define wqe_pri_WORD          word10
+#define wqe_pv_SHIFT          19
+#define wqe_pv_MASK           0x00000001
+#define wqe_pv_WORD           word10
+#define wqe_xc_SHIFT          21
+#define wqe_xc_MASK           0x00000001
+#define wqe_xc_WORD           word10
+#define wqe_ccpe_SHIFT        23
+#define wqe_ccpe_MASK         0x00000001
+#define wqe_ccpe_WORD         word10
+#define wqe_ccp_SHIFT         24
+#define wqe_ccp_MASK         0x000000ff
+#define wqe_ccp_WORD         word10
+	uint32_t word11;
+#define wqe_cmd_type_SHIFT  0
+#define wqe_cmd_type_MASK   0x0000000f
+#define wqe_cmd_type_WORD   word11
+#define wqe_wqec_SHIFT      7
+#define wqe_wqec_MASK       0x00000001
+#define wqe_wqec_WORD       word11
+#define wqe_cqid_SHIFT      16
+#define wqe_cqid_MASK       0x000003ff
+#define wqe_cqid_WORD       word11
+};
+
+struct wqe_did {
+	uint32_t word5;
+#define wqe_els_did_SHIFT         0
+#define wqe_els_did_MASK          0x00FFFFFF
+#define wqe_els_did_WORD          word5
+#define wqe_xmit_bls_ar_SHIFT         30
+#define wqe_xmit_bls_ar_MASK          0x00000001
+#define wqe_xmit_bls_ar_WORD          word5
+#define wqe_xmit_bls_xo_SHIFT         31
+#define wqe_xmit_bls_xo_MASK          0x00000001
+#define wqe_xmit_bls_xo_WORD          word5
+};
+
+struct els_request64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_len;
+	uint32_t word4;
+#define els_req64_sid_SHIFT         0
+#define els_req64_sid_MASK          0x00FFFFFF
+#define els_req64_sid_WORD          word4
+#define els_req64_sp_SHIFT          24
+#define els_req64_sp_MASK           0x00000001
+#define els_req64_sp_WORD           word4
+#define els_req64_vf_SHIFT          25
+#define els_req64_vf_MASK           0x00000001
+#define els_req64_vf_WORD           word4
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t word12;
+#define els_req64_vfid_SHIFT        1
+#define els_req64_vfid_MASK         0x00000FFF
+#define els_req64_vfid_WORD         word12
+#define els_req64_pri_SHIFT         13
+#define els_req64_pri_MASK          0x00000007
+#define els_req64_pri_WORD          word12
+	uint32_t word13;
+#define els_req64_hopcnt_SHIFT      24
+#define els_req64_hopcnt_MASK       0x000000ff
+#define els_req64_hopcnt_WORD       word13
+	uint32_t reserved[2];
+};
+
+struct xmit_els_rsp64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+
+struct xmit_bls_rsp64_wqe {
+	uint32_t payload0;
+	uint32_t word1;
+#define xmit_bls_rsp64_rxid_SHIFT  0
+#define xmit_bls_rsp64_rxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_rxid_WORD   word1
+#define xmit_bls_rsp64_oxid_SHIFT  16
+#define xmit_bls_rsp64_oxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_oxid_WORD   word1
+	uint32_t word2;
+#define xmit_bls_rsp64_seqcntlo_SHIFT  0
+#define xmit_bls_rsp64_seqcntlo_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcntlo_WORD   word2
+#define xmit_bls_rsp64_seqcnthi_SHIFT  16
+#define xmit_bls_rsp64_seqcnthi_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcnthi_WORD   word2
+	uint32_t rsrvd3;
+	uint32_t rsrvd4;
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+struct wqe_rctl_dfctl {
+	uint32_t word5;
+#define wqe_si_SHIFT 2
+#define wqe_si_MASK  0x000000001
+#define wqe_si_WORD  word5
+#define wqe_la_SHIFT 3
+#define wqe_la_MASK  0x000000001
+#define wqe_la_WORD  word5
+#define wqe_ls_SHIFT 7
+#define wqe_ls_MASK  0x000000001
+#define wqe_ls_WORD  word5
+#define wqe_dfctl_SHIFT 8
+#define wqe_dfctl_MASK  0x0000000ff
+#define wqe_dfctl_WORD  word5
+#define wqe_type_SHIFT 16
+#define wqe_type_MASK  0x0000000ff
+#define wqe_type_WORD  word5
+#define wqe_rctl_SHIFT 24
+#define wqe_rctl_MASK  0x0000000ff
+#define wqe_rctl_WORD  word5
+};
+
+struct xmit_seq64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t paylaod_offset;
+	uint32_t relative_offset;
+	struct wqe_rctl_dfctl wge_ctl;
+	struct wqe_common wqe_com; /* words 6-11 */
+	/* Note: word10 different REVISIT */
+	uint32_t xmit_len;
+	uint32_t rsvd_12_15[3];
+};
+struct xmit_bcast64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t paylaod_len;
+	uint32_t rsvd4;
+	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+
+struct gen_req64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t command_len;
+	uint32_t payload_len;
+	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+
+struct create_xri_wqe {
+	uint32_t rsrvd[5];           /* words 0-4 */
+	struct wqe_did	wqe_dest;  /* word 5 */
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+#define T_REQUEST_TAG 3
+#define T_XRI_TAG 1
+
+struct abort_cmd_wqe {
+	uint32_t rsrvd[3];
+	uint32_t word3;
+#define	abort_cmd_ia_SHIFT  0
+#define	abort_cmd_ia_MASK  0x000000001
+#define	abort_cmd_ia_WORD  word3
+#define	abort_cmd_criteria_SHIFT  8
+#define	abort_cmd_criteria_MASK  0x0000000ff
+#define	abort_cmd_criteria_WORD  word3
+	uint32_t rsrvd4;
+	uint32_t rsrvd5;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_iwrite64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_len;
+	uint32_t total_xfer_len;
+	uint32_t initial_xfer_len;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_iread64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_len;          /* word 3 */
+	uint32_t total_xfer_len;       /* word 4 */
+	uint32_t rsrvd5;               /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_icmnd64_wqe {
+	struct ulp_bde64 bde;	 /* words 0-2 */
+	uint32_t rsrvd[3];             /* words 3-5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+
+union lpfc_wqe {
+	uint32_t words[16];
+	struct lpfc_wqe_generic generic;
+	struct fcp_icmnd64_wqe fcp_icmd;
+	struct fcp_iread64_wqe fcp_iread;
+	struct fcp_iwrite64_wqe fcp_iwrite;
+	struct abort_cmd_wqe abort_cmd;
+	struct create_xri_wqe create_xri;
+	struct xmit_bcast64_wqe xmit_bcast64;
+	struct xmit_seq64_wqe xmit_sequence;
+	struct xmit_bls_rsp64_wqe xmit_bls_rsp;
+	struct xmit_els_rsp64_wqe xmit_els_rsp;
+	struct els_request64_wqe els_req;
+	struct gen_req64_wqe gen_req;
+};
+
+#define FCP_COMMAND 0x0
+#define FCP_COMMAND_DATA_OUT 0x1
+#define ELS_COMMAND_NON_FIP 0xC
+#define ELS_COMMAND_FIP 0xD
+#define OTHER_COMMAND 0x8
+
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 3f06ce2becf..e9e4a1df898 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -34,8 +34,10 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
 unsigned long _dump_buf_dif_order;
 spinlock_t _dump_buf_lock;
 
-static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
+static int lpfc_sli4_queue_create(struct lpfc_hba *);
+static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
+static int lpfc_setup_endian_order(struct lpfc_hba *);
+static int lpfc_sli4_read_config(struct lpfc_hba *);
+static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
+static void lpfc_free_sgl_list(struct lpfc_hba *);
+static int lpfc_init_sgl_list(struct lpfc_hba *);
+static int lpfc_init_active_sgl_array(struct lpfc_hba *);
+static void lpfc_free_active_sgl(struct lpfc_hba *);
+static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
+static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
+static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -646,6 +662,77 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
 
 	return 0;
 }
+/**
+ * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ *   0 - sucess.
+ *   Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s4(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *psb, *psb_next;
+	LIST_HEAD(aborts);
+	int ret;
+	unsigned long iflag = 0;
+	ret = lpfc_hba_down_post_s3(phba);
+	if (ret)
+		return ret;
+	/* At this point in time the HBA is either reset or DOA. Either
+	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
+	 * on the lpfc_sgl_list so that it can either be freed if the
+	 * driver is unloading or reposted if the driver is restarting
+	 * the port.
+	 */
+	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
+					/* scsl_buf_list */
+	/* abts_sgl_list_lock required because worker thread uses this
+	 * list.
+	 */
+	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
+			&phba->sli4_hba.lpfc_sgl_list);
+	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+	/* abts_scsi_buf_list_lock required because worker thread uses this
+	 * list.
+	 */
+	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
+			&aborts);
+	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
+		psb->pCmd = NULL;
+		psb->status = IOSTAT_SUCCESS;
+	}
+	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_post - Wrapper func for hba down post routine
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 routine for performing
+ * uninitialization after the HBA is reset when bring down the SLI Layer.
+ *
+ * Return codes
+ *   0 - sucess.
+ *   Any other value - error.
+ **/
+int
+lpfc_hba_down_post(struct lpfc_hba *phba)
+{
+	return (*phba->lpfc_hba_down_post)(phba);
+}
 
 /**
  * lpfc_hb_timeout - The HBA-timer timeout handler
@@ -852,6 +939,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
 	return;
 }
 
+/**
+ * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring a SLI4 HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
+static void
+lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
+{
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	lpfc_sli4_brdreset(phba);
+	lpfc_hba_down_post(phba);
+	lpfc_sli4_post_status_check(phba);
+	lpfc_unblock_mgmt_io(phba);
+	phba->link_state = LPFC_HBA_ERROR;
+}
+
 /**
  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
  * @phba: pointer to lpfc hba data structure.
@@ -1056,6 +1162,65 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
 	return;
 }
 
+/**
+ * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the SLI4 HBA hardware error attention
+ * conditions.
+ **/
+static void
+lpfc_handle_eratt_s4(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	uint32_t event_data;
+	struct Scsi_Host *shost;
+
+	/* If the pci channel is offline, ignore possible errors, since
+	 * we cannot communicate with the pci card anyway.
+	 */
+	if (pci_channel_offline(phba->pcidev))
+		return;
+	/* If resets are disabled then leave the HBA alone and return */
+	if (!phba->cfg_enable_hba_reset)
+		return;
+
+	/* Send an internal error event to mgmt application */
+	lpfc_board_errevt_to_mgmt(phba);
+
+	/* For now, the actual action for SLI4 device handling is not
+	 * specified yet, just treated it as adaptor hardware failure
+	 */
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
+			phba->work_status[0], phba->work_status[1]);
+
+	event_data = FC_REG_DUMP_EVENT;
+	shost = lpfc_shost_from_vport(vport);
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(event_data), (char *) &event_data,
+				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
+	lpfc_sli4_offline_eratt(phba);
+}
+
+/**
+ * lpfc_handle_eratt - Wrapper func for handling hba error attention
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba error attention handling
+ * routine from the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes
+ *   0 - sucess.
+ *   Any other value - error.
+ **/
+void
+lpfc_handle_eratt(struct lpfc_hba *phba)
+{
+	(*phba->lpfc_handle_eratt)(phba);
+}
+
 /**
  * lpfc_handle_latt - The HBA link event handler
  * @phba: pointer to lpfc hba data structure.
@@ -1312,6 +1477,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 	uint16_t dev_id = phba->pcidev->device;
 	int max_speed;
 	int GE = 0;
+	int oneConnect = 0; /* default is not a oneConnect */
 	struct {
 		char * name;
 		int    max_speed;
@@ -1457,6 +1623,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 	case PCI_DEVICE_ID_PROTEUS_S:
 		m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
 		break;
+	case PCI_DEVICE_ID_TIGERSHARK:
+		oneConnect = 1;
+		m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
+		break;
+	case PCI_DEVICE_ID_TIGERSHARK_S:
+		oneConnect = 1;
+		m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
+		break;
 	default:
 		m = (typeof(m)){ NULL };
 		break;
@@ -1464,13 +1638,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 
 	if (mdp && mdp[0] == '\0')
 		snprintf(mdp, 79,"%s", m.name);
-	if (descp && descp[0] == '\0')
-		snprintf(descp, 255,
-			"Emulex %s %d%s %s %s",
-			m.name, m.max_speed,
-			(GE) ? "GE" : "Gb",
-			m.bus,
-			(GE) ? "FCoE Adapter" : "Fibre Channel Adapter");
+	/* oneConnect hba requires special processing, they are all initiators
+	 * and we put the port number on the end
+	 */
+	if (descp && descp[0] == '\0') {
+		if (oneConnect)
+			snprintf(descp, 255,
+				"Emulex OneConnect %s, FCoE Initiator, Port %s",
+				m.name,
+				phba->Port);
+		else
+			snprintf(descp, 255,
+				"Emulex %s %d%s %s %s",
+				m.name, m.max_speed,
+				(GE) ? "GE" : "Gb",
+				m.bus,
+				(GE) ? "FCoE Adapter" :
+					"Fibre Channel Adapter");
+	}
 }
 
 /**
@@ -1911,14 +2096,21 @@ lpfc_online(struct lpfc_hba *phba)
 		return 1;
 	}
 
-	if (lpfc_sli_hba_setup(phba)) {	/* Initialize the HBA */
-		lpfc_unblock_mgmt_io(phba);
-		return 1;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
+			lpfc_unblock_mgmt_io(phba);
+			return 1;
+		}
+	} else {
+		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
+			lpfc_unblock_mgmt_io(phba);
+			return 1;
+		}
 	}
 
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			struct Scsi_Host *shost;
 			shost = lpfc_shost_from_vport(vports[i]);
 			spin_lock_irq(shost->host_lock);
@@ -1980,11 +2172,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
 	/* Issue an unreg_login to all nodes on all vports */
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL) {
-		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			struct Scsi_Host *shost;
 
 			if (vports[i]->load_flag & FC_UNLOADING)
 				continue;
+			vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
 			shost =	lpfc_shost_from_vport(vports[i]);
 			list_for_each_entry_safe(ndlp, next_ndlp,
 						 &vports[i]->fc_nodes,
@@ -2029,11 +2222,11 @@ lpfc_offline(struct lpfc_hba *phba)
 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
 		return;
 
-	/* stop all timers associated with this hba */
-	lpfc_stop_phba_timers(phba);
+	/* stop port and all timers associated with this hba */
+	lpfc_stop_port(phba);
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
 			lpfc_stop_vport_timers(vports[i]);
 	lpfc_destroy_vport_work_array(phba, vports);
 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2046,7 +2239,7 @@ lpfc_offline(struct lpfc_hba *phba)
 	spin_unlock_irq(&phba->hbalock);
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			shost = lpfc_shost_from_vport(vports[i]);
 			spin_lock_irq(shost->host_lock);
 			vports[i]->work_port_events = 0;
@@ -2139,6 +2332,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
 	shost->max_lun = vport->cfg_max_luns;
 	shost->this_id = -1;
 	shost->max_cmd_len = 16;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
+		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+	}
 
 	/*
 	 * Set initial can_queue value since 0 is no longer supported and
@@ -2156,6 +2353,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
 
 	/* Initialize all internally managed lists. */
 	INIT_LIST_HEAD(&vport->fc_nodes);
+	INIT_LIST_HEAD(&vport->rcv_buffer_list);
 	spin_lock_init(&vport->work_port_lock);
 
 	init_timer(&vport->fc_disctmo);
@@ -2347,192 +2545,501 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
 }
 
 /**
- * lpfc_enable_msix - Enable MSI-X interrupt mode
+ * lpfc_stop_port_s3 - Stop SLI3 device port
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to enable the MSI-X interrupt vectors. The kernel
- * function pci_enable_msix() is called to enable the MSI-X vectors. Note that
- * pci_enable_msix(), once invoked, enables either all or nothing, depending
- * on the current availability of PCI vector resources. The device driver is
- * responsible for calling the individual request_irq() to register each MSI-X
- * vector with a interrupt handler, which is done in this function. Note that
- * later when device is unloading, the driver should always call free_irq()
- * on all MSI-X vectors it has done request_irq() on before calling
- * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
- * will be left with MSI-X enabled and leaks its vectors.
- *
- * Return codes
- *   0 - sucessful
- *   other values - error
+ * This routine is invoked to stop an SLI3 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
  **/
-static int
-lpfc_enable_msix(struct lpfc_hba *phba)
+static void
+lpfc_stop_port_s3(struct lpfc_hba *phba)
 {
-	int rc, i;
-	LPFC_MBOXQ_t *pmb;
+	/* Clear all interrupt enable conditions */
+	writel(0, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	/* Clear all pending interrupts */
+	writel(0xffffffff, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
 
-	/* Set up MSI-X multi-message vectors */
-	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
-		phba->msix_entries[i].entry = i;
+	/* Reset some HBA SLI setup states */
+	lpfc_stop_hba_timers(phba);
+	phba->pport->work_port_events = 0;
+}
 
-	/* Configure MSI-X capability structure */
-	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
-				ARRAY_SIZE(phba->msix_entries));
-	if (rc) {
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0420 PCI enable MSI-X failed (%d)\n", rc);
-		goto msi_fail_out;
-	} else
-		for (i = 0; i < LPFC_MSIX_VECTORS; i++)
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-					"0477 MSI-X entry[%d]: vector=x%x "
-					"message=%d\n", i,
-					phba->msix_entries[i].vector,
-					phba->msix_entries[i].entry);
-	/*
-	 * Assign MSI-X vectors to interrupt handlers
-	 */
+/**
+ * lpfc_stop_port_s4 - Stop SLI4 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI4 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s4(struct lpfc_hba *phba)
+{
+	/* Reset some HBA SLI4 setup states */
+	lpfc_stop_hba_timers(phba);
+	phba->pport->work_port_events = 0;
+	phba->sli4_hba.intr_enable = 0;
+	/* Hard clear it for now, shall have more graceful way to wait later */
+	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+}
 
-	/* vector-0 is associated to slow-path handler */
-	rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
-			 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
-	if (rc) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"0421 MSI-X slow-path request_irq failed "
-				"(%d)\n", rc);
-		goto msi_fail_out;
-	}
+/**
+ * lpfc_stop_port - Wrapper function for stopping hba port
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ **/
+void
+lpfc_stop_port(struct lpfc_hba *phba)
+{
+	phba->lpfc_stop_port(phba);
+}
 
-	/* vector-1 is associated to fast-path handler */
-	rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler,
-			 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
+/**
+ * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the driver default fcf record from
+ * the port.  This routine currently acts on FCF Index 0.
+ *
+ **/
+void
+lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
+{
+	int rc = 0;
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
+	uint32_t mbox_tmo, req_len;
+	uint32_t shdr_status, shdr_add_status;
 
-	if (rc) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"0429 MSI-X fast-path request_irq failed "
-				"(%d)\n", rc);
-		goto irq_fail_out;
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
+		return;
 	}
 
+	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr);
+	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
+			      req_len, LPFC_SLI4_MBX_EMBED);
 	/*
-	 * Configure HBA MSI-X attention conditions to messages
+	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
+	 * supports multiple FCF indices.
 	 */
-	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
+	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
+	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
+	       phba->fcf.fcf_indx);
 
-	if (!pmb) {
-		rc = -ENOMEM;
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0474 Unable to allocate memory for issuing "
-				"MBOX_CONFIG_MSI command\n");
-		goto mem_fail_out;
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
 	}
-	rc = lpfc_config_msi(phba, pmb);
-	if (rc)
-		goto mbx_fail_out;
-	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
-	if (rc != MBX_SUCCESS) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
-				"0351 Config MSI mailbox command failed, "
-				"mbxCmd x%x, mbxStatus x%x\n",
-				pmb->mb.mbxCommand, pmb->mb.mbxStatus);
-		goto mbx_fail_out;
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status,
+			     &del_fcf_record->header.cfg_shdr.response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+				 &del_fcf_record->header.cfg_shdr.response);
+	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2516 DEL FCF of default FCF Index failed "
+				"mbx status x%x, status x%x add_status x%x\n",
+				rc, shdr_status, shdr_add_status);
 	}
-
-	/* Free memory allocated for mailbox command */
-	mempool_free(pmb, phba->mbox_mem_pool);
-	return rc;
-
-mbx_fail_out:
-	/* Free memory allocated for mailbox command */
-	mempool_free(pmb, phba->mbox_mem_pool);
-
-mem_fail_out:
-	/* free the irq already requested */
-	free_irq(phba->msix_entries[1].vector, phba);
-
-irq_fail_out:
-	/* free the irq already requested */
-	free_irq(phba->msix_entries[0].vector, phba);
-
-msi_fail_out:
-	/* Unconfigure MSI-X capability structure */
-	pci_disable_msix(phba->pcidev);
-	return rc;
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
 }
 
 /**
- * lpfc_disable_msix - Disable MSI-X interrupt mode
+ * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
  * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
  *
- * This routine is invoked to release the MSI-X vectors and then disable the
- * MSI-X interrupt mode.
+ * This routine is to parse the SLI4 link-attention link fault code and
+ * translate it into the base driver's read link attention mailbox command
+ * status.
+ *
+ * Return: Link-attention status in terms of base driver's coding.
  **/
-static void
-lpfc_disable_msix(struct lpfc_hba *phba)
+static uint16_t
+lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
+			   struct lpfc_acqe_link *acqe_link)
 {
-	int i;
+	uint16_t latt_fault;
 
-	/* Free up MSI-X multi-message vectors */
-	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
-		free_irq(phba->msix_entries[i].vector, phba);
-	/* Disable MSI-X */
-	pci_disable_msix(phba->pcidev);
+	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
+	case LPFC_ASYNC_LINK_FAULT_NONE:
+	case LPFC_ASYNC_LINK_FAULT_LOCAL:
+	case LPFC_ASYNC_LINK_FAULT_REMOTE:
+		latt_fault = 0;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0398 Invalid link fault code: x%x\n",
+				bf_get(lpfc_acqe_link_fault, acqe_link));
+		latt_fault = MBXERR_ERROR;
+		break;
+	}
+	return latt_fault;
 }
 
 /**
- * lpfc_enable_msi - Enable MSI interrupt mode
+ * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
  * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
  *
- * This routine is invoked to enable the MSI interrupt mode. The kernel
- * function pci_enable_msi() is called to enable the MSI vector. The
- * device driver is responsible for calling the request_irq() to register
- * MSI vector with a interrupt the handler, which is done in this function.
+ * This routine is to parse the SLI4 link attention type and translate it
+ * into the base driver's link attention type coding.
  *
- * Return codes
- * 	0 - sucessful
- * 	other values - error
- */
-static int
-lpfc_enable_msi(struct lpfc_hba *phba)
+ * Return: Link attention type in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
+			  struct lpfc_acqe_link *acqe_link)
 {
-	int rc;
-
-	rc = pci_enable_msi(phba->pcidev);
-	if (!rc)
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0462 PCI enable MSI mode success.\n");
-	else {
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0471 PCI enable MSI mode failed (%d)\n", rc);
-		return rc;
-	}
+	uint8_t att_type;
 
-	rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
-			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
-	if (rc) {
-		pci_disable_msi(phba->pcidev);
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"0478 MSI request_irq failed (%d)\n", rc);
+	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
+	case LPFC_ASYNC_LINK_STATUS_DOWN:
+	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
+		att_type = AT_LINK_DOWN;
+		break;
+	case LPFC_ASYNC_LINK_STATUS_UP:
+		/* Ignore physical link up events - wait for logical link up */
+		att_type = AT_RESERVED;
+		break;
+	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
+		att_type = AT_LINK_UP;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0399 Invalid link attention type: x%x\n",
+				bf_get(lpfc_acqe_link_status, acqe_link));
+		att_type = AT_RESERVED;
+		break;
 	}
-	return rc;
+	return att_type;
 }
 
 /**
- * lpfc_disable_msi - Disable MSI interrupt mode
+ * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
  * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
  *
- * This routine is invoked to disable the MSI interrupt mode. The driver
- * calls free_irq() on MSI vector it has done request_irq() on before
- * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
- * a device will be left with MSI enabled and leaks its vector.
- */
-
-static void
-lpfc_disable_msi(struct lpfc_hba *phba)
+ * This routine is to parse the SLI4 link-attention link speed and translate
+ * it into the base driver's link-attention link speed coding.
+ *
+ * Return: Link-attention link speed in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
+				struct lpfc_acqe_link *acqe_link)
 {
-	free_irq(phba->pcidev->irq, phba);
-	pci_disable_msi(phba->pcidev);
+	uint8_t link_speed;
+
+	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
+	case LPFC_ASYNC_LINK_SPEED_ZERO:
+		link_speed = LA_UNKNW_LINK;
+		break;
+	case LPFC_ASYNC_LINK_SPEED_10MBPS:
+		link_speed = LA_UNKNW_LINK;
+		break;
+	case LPFC_ASYNC_LINK_SPEED_100MBPS:
+		link_speed = LA_UNKNW_LINK;
+		break;
+	case LPFC_ASYNC_LINK_SPEED_1GBPS:
+		link_speed = LA_1GHZ_LINK;
+		break;
+	case LPFC_ASYNC_LINK_SPEED_10GBPS:
+		link_speed = LA_10GHZ_LINK;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0483 Invalid link-attention link speed: x%x\n",
+				bf_get(lpfc_acqe_link_speed, acqe_link));
+		link_speed = LA_UNKNW_LINK;
+		break;
+	}
+	return link_speed;
+}
+
+/**
+ * lpfc_sli4_async_link_evt - Process the asynchronous link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous link event.
+ **/
+static void
+lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
+			 struct lpfc_acqe_link *acqe_link)
+{
+	struct lpfc_dmabuf *mp;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	READ_LA_VAR *la;
+	uint8_t att_type;
+
+	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
+	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
+		return;
+	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0395 The mboxq allocation failed\n");
+		return;
+	}
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0396 The lpfc_dmabuf allocation failed\n");
+		goto out_free_pmb;
+	}
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp->virt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0397 The mbuf allocation failed\n");
+		goto out_free_dmabuf;
+	}
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	/* Block ELS IOCBs until we have done process link event */
+	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+
+	/* Update link event statistics */
+	phba->sli.slistat.link_event++;
+
+	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
+	lpfc_read_la(phba, pmb, mp);
+	pmb->vport = phba->pport;
+
+	/* Parse and translate status field */
+	mb = &pmb->u.mb;
+	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
+
+	/* Parse and translate link attention fields */
+	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
+	la->eventTag = acqe_link->event_tag;
+	la->attType = att_type;
+	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
+
+	/* Fake the the following irrelvant fields */
+	la->topology = TOPOLOGY_PT_PT;
+	la->granted_AL_PA = 0;
+	la->il = 0;
+	la->pb = 0;
+	la->fa = 0;
+	la->mm = 0;
+
+	/* Keep the link status for extra SLI4 state machine reference */
+	phba->sli4_hba.link_state.speed =
+				bf_get(lpfc_acqe_link_speed, acqe_link);
+	phba->sli4_hba.link_state.duplex =
+				bf_get(lpfc_acqe_link_duplex, acqe_link);
+	phba->sli4_hba.link_state.status =
+				bf_get(lpfc_acqe_link_status, acqe_link);
+	phba->sli4_hba.link_state.physical =
+				bf_get(lpfc_acqe_link_physical, acqe_link);
+	phba->sli4_hba.link_state.fault =
+				bf_get(lpfc_acqe_link_fault, acqe_link);
+
+	/* Invoke the lpfc_handle_latt mailbox command callback function */
+	lpfc_mbx_cmpl_read_la(phba, pmb);
+
 	return;
+
+out_free_dmabuf:
+	kfree(mp);
+out_free_pmb:
+	mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async fcoe completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous fcoe event.
+ **/
+static void
+lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
+			 struct lpfc_acqe_fcoe *acqe_fcoe)
+{
+	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
+	int rc;
+
+	switch (event_type) {
+	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+			"2546 New FCF found index 0x%x tag 0x%x \n",
+			acqe_fcoe->fcf_index,
+			acqe_fcoe->event_tag);
+		/*
+		 * If the current FCF is in discovered state,
+		 * do nothing.
+		 */
+		spin_lock_irq(&phba->hbalock);
+		if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
+			spin_unlock_irq(&phba->hbalock);
+			break;
+		}
+		spin_unlock_irq(&phba->hbalock);
+
+		/* Read the FCF table and re-discover SAN. */
+		rc = lpfc_sli4_read_fcf_record(phba,
+			LPFC_FCOE_FCF_GET_FIRST);
+		if (rc)
+			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"2547 Read FCF record failed 0x%x\n",
+				rc);
+		break;
+
+	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2548 FCF Table full count 0x%x tag 0x%x \n",
+			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
+			acqe_fcoe->event_tag);
+		break;
+
+	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+			"2549 FCF disconnected fron network index 0x%x"
+			" tag 0x%x \n", acqe_fcoe->fcf_index,
+			acqe_fcoe->event_tag);
+		/* If the event is not for currently used fcf do nothing */
+		if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
+			break;
+		/*
+		 * Currently, driver support only one FCF - so treat this as
+		 * a link down.
+		 */
+		lpfc_linkdown(phba);
+		/* Unregister FCF if no devices connected to it */
+		lpfc_unregister_unused_fcf(phba);
+		break;
+
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0288 Unknown FCoE event type 0x%x event tag "
+			"0x%x\n", event_type, acqe_fcoe->event_tag);
+		break;
+	}
+}
+
+/**
+ * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async dcbx completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous dcbx event.
+ **/
+static void
+lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
+			 struct lpfc_acqe_dcbx *acqe_dcbx)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0290 The SLI4 DCBX asynchronous event is not "
+			"handled yet\n");
+}
+
+/**
+ * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 asynchronous events.
+ **/
+void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the async event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~ASYNC_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the async events */
+	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Process the asynchronous event */
+		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
+		case LPFC_TRAILER_CODE_LINK:
+			lpfc_sli4_async_link_evt(phba,
+						 &cq_event->cqe.acqe_link);
+			break;
+		case LPFC_TRAILER_CODE_FCOE:
+			lpfc_sli4_async_fcoe_evt(phba,
+						 &cq_event->cqe.acqe_fcoe);
+			break;
+		case LPFC_TRAILER_CODE_DCBX:
+			lpfc_sli4_async_dcbx_evt(phba,
+						 &cq_event->cqe.acqe_dcbx);
+			break;
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"1804 Invalid asynchrous event code: "
+					"x%x\n", bf_get(lpfc_trailer_code,
+					&cq_event->cqe.mcqe_cmpl));
+			break;
+		}
+		/* Free the completion event processed to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+/**
+ * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
+ * @phba: pointer to lpfc hba data structure.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine is invoked to set up the per HBA PCI-Device group function
+ * API jump table entries.
+ *
+ * Return: 0 if success, otherwise -ENODEV
+ **/
+int
+lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+	int rc;
+
+	/* Set up lpfc PCI-device group */
+	phba->pci_dev_grp = dev_grp;
+
+	/* The LPFC_PCI_DEV_OC uses SLI4 */
+	if (dev_grp == LPFC_PCI_DEV_OC)
+		phba->sli_rev = LPFC_SLI_REV4;
+
+	/* Set up device INIT API function jump table */
+	rc = lpfc_init_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+	/* Set up SCSI API function jump table */
+	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+	/* Set up SLI API function jump table */
+	rc = lpfc_sli_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+	/* Set up MBOX API function jump table */
+	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+
+	return 0;
 }
 
 /**
@@ -2764,102 +3271,393 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_init_api_table_setup - Set up init api fucntion jump table
- * @phba: The hba struct for which this call is being executed.
- * @dev_grp: The HBA PCI-Device group number.
- *
- * This routine sets up the device INIT interface API function jump table
- * in @phba struct.
- *
- * Returns: 0 - success, -ENODEV - failure.
- **/
-int
-lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
-{
-	switch (dev_grp) {
-	case LPFC_PCI_DEV_LP:
-		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
-		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
-		phba->lpfc_stop_port = lpfc_stop_port_s3;
-		break;
-	default:
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1431 Invalid HBA PCI-device group: 0x%x\n",
-				dev_grp);
-		return -ENODEV;
-		break;
-	}
-	return 0;
-}
-
-/**
- * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
+ * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to set up the driver internal resources before the
- * device specific resource setup to support the HBA device it attached to.
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-4 HBA device it attached to.
  *
  * Return codes
- *	0 - sucessful
- *	other values - error
+ * 	0 - sucessful
+ * 	other values - error
  **/
 static int
-lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
+lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 {
+	struct lpfc_sli *psli;
+	int rc;
+	int i, hbq_count;
+
+	/* Before proceed, wait for POST done and device ready */
+	rc = lpfc_sli4_post_status_check(phba);
+	if (rc)
+		return -ENODEV;
+
 	/*
-	 * Driver resources common to all SLI revisions
+	 * Initialize timers used by driver
 	 */
-	atomic_set(&phba->fast_event_count, 0);
-	spin_lock_init(&phba->hbalock);
 
-	/* Initialize ndlp management spinlock */
-	spin_lock_init(&phba->ndlp_lock);
+	/* Heartbeat timer */
+	init_timer(&phba->hb_tmofunc);
+	phba->hb_tmofunc.function = lpfc_hb_timeout;
+	phba->hb_tmofunc.data = (unsigned long)phba;
 
-	INIT_LIST_HEAD(&phba->port_list);
-	INIT_LIST_HEAD(&phba->work_list);
-	init_waitqueue_head(&phba->wait_4_mlo_m_q);
+	psli = &phba->sli;
+	/* MBOX heartbeat timer */
+	init_timer(&psli->mbox_tmo);
+	psli->mbox_tmo.function = lpfc_mbox_timeout;
+	psli->mbox_tmo.data = (unsigned long) phba;
+	/* Fabric block timer */
+	init_timer(&phba->fabric_block_timer);
+	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+	phba->fabric_block_timer.data = (unsigned long) phba;
+	/* EA polling mode timer */
+	init_timer(&phba->eratt_poll);
+	phba->eratt_poll.function = lpfc_poll_eratt;
+	phba->eratt_poll.data = (unsigned long) phba;
+	/*
+	 * We need to do a READ_CONFIG mailbox command here before
+	 * calling lpfc_get_cfgparam. For VFs this will report the
+	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
+	 * All of the resources allocated
+	 * for this Port are tied to these values.
+	 */
+	/* Get all the module params for configuring this host */
+	lpfc_get_cfgparam(phba);
+	phba->max_vpi = LPFC_MAX_VPI;
+	/* This will be set to correct value after the read_config mbox */
+	phba->max_vports = 0;
 
-	/* Initialize the wait queue head for the kernel thread */
-	init_waitqueue_head(&phba->work_waitq);
+	/* Program the default value of vlan_id and fc_map */
+	phba->valid_vlan = 0;
+	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
-	/* Initialize the scsi buffer list used by driver for scsi IO */
-	spin_lock_init(&phba->scsi_buf_list_lock);
-	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+	/*
+	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+	 * used to create the sg_dma_buf_pool must be dynamically calculated.
+	 * 2 segments are added since the IOCB needs a command and response bde.
+	 * To insure that the scsi sgl does not cross a 4k page boundary only
+	 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
+	 * Table of sgl sizes and seg_cnt:
+	 * sgl size, 	sg_seg_cnt	total seg
+	 * 1k		50		52
+	 * 2k		114		116
+	 * 4k		242		244
+	 * 8k		498		500
+	 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
+	 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
+	 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
+	 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
+	 */
+	if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
+		phba->cfg_sg_seg_cnt = 50;
+	else if (phba->cfg_sg_seg_cnt <= 114)
+		phba->cfg_sg_seg_cnt = 114;
+	else if (phba->cfg_sg_seg_cnt <= 242)
+		phba->cfg_sg_seg_cnt = 242;
+	else
+		phba->cfg_sg_seg_cnt = 498;
 
-	/* Initialize the fabric iocb list */
-	INIT_LIST_HEAD(&phba->fabric_iocb_list);
+	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
+					+ sizeof(struct fcp_rsp);
+	phba->cfg_sg_dma_buf_size +=
+		((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
 
-	/* Initialize list to save ELS buffers */
-	INIT_LIST_HEAD(&phba->elsbuf);
+	/* Initialize buffer queue management fields */
+	hbq_count = lpfc_sli_hbq_count();
+	for (i = 0; i < hbq_count; ++i)
+		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+	INIT_LIST_HEAD(&phba->rb_pend_list);
+	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
+	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
 
-	/* Initialize FCF connection rec list */
-	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+	/*
+	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
+	 */
+	/* Initialize the Abort scsi buffer list used by driver */
+	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+	/* This abort list used by worker thread */
+	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
 
-	return 0;
-}
+	/*
+	 * Initialize dirver internal slow-path work queues
+	 */
 
-/**
- * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to set up the driver internal resources after the
- * device specific resource setup to support the HBA device it attached to.
- *
- * Return codes
- * 	0 - sucessful
- * 	other values - error
- **/
-static int
-lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
-{
-	int error;
+	/* Driver internel slow-path CQ Event pool */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
+	/* Response IOCB work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
+	/* Asynchronous event CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
+	/* Fast-path XRI aborted CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+	/* Slow-path XRI aborted CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
+	/* Receive queue CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+
+	/* Initialize the driver internal SLI layer lists. */
+	lpfc_sli_setup(phba);
+	lpfc_sli_queue_setup(phba);
 
-	/* Startup the kernel thread for this host adapter. */
-	phba->worker_thread = kthread_run(lpfc_do_work, phba,
-					  "lpfc_worker_%d", phba->brd_no);
-	if (IS_ERR(phba->worker_thread)) {
-		error = PTR_ERR(phba->worker_thread);
-		return error;
+	/* Allocate device driver memory */
+	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
+	if (rc)
+		return -ENOMEM;
+
+	/* Create the bootstrap mailbox command */
+	rc = lpfc_create_bootstrap_mbox(phba);
+	if (unlikely(rc))
+		goto out_free_mem;
+
+	/* Set up the host's endian order with the device. */
+	rc = lpfc_setup_endian_order(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
+
+	/* Set up the hba's configuration parameters. */
+	rc = lpfc_sli4_read_config(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
+
+	/* Perform a function reset */
+	rc = lpfc_pci_function_reset(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
+
+	/* Create all the SLI4 queues */
+	rc = lpfc_sli4_queue_create(phba);
+	if (rc)
+		goto out_free_bsmbx;
+
+	/* Create driver internal CQE event pool */
+	rc = lpfc_sli4_cq_event_pool_create(phba);
+	if (rc)
+		goto out_destroy_queue;
+
+	/* Initialize and populate the iocb list per host */
+	rc = lpfc_init_sgl_list(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1400 Failed to initialize sgl list.\n");
+		goto out_destroy_cq_event_pool;
+	}
+	rc = lpfc_init_active_sgl_array(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1430 Failed to initialize sgl list.\n");
+		goto out_free_sgl_list;
+	}
+
+	rc = lpfc_sli4_init_rpi_hdrs(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1432 Failed to initialize rpi headers.\n");
+		goto out_free_active_sgl;
+	}
+
+	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
+				    phba->cfg_fcp_eq_count), GFP_KERNEL);
+	if (!phba->sli4_hba.fcp_eq_hdl) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2572 Failed allocate memory for fast-path "
+				"per-EQ handle array\n");
+		goto out_remove_rpi_hdrs;
+	}
+
+	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
+				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
+	if (!phba->sli4_hba.msix_entries) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2573 Failed allocate memory for msi-x "
+				"interrupt vector entries\n");
+		goto out_free_fcp_eq_hdl;
+	}
+
+	return rc;
+
+out_free_fcp_eq_hdl:
+	kfree(phba->sli4_hba.fcp_eq_hdl);
+out_remove_rpi_hdrs:
+	lpfc_sli4_remove_rpi_hdrs(phba);
+out_free_active_sgl:
+	lpfc_free_active_sgl(phba);
+out_free_sgl_list:
+	lpfc_free_sgl_list(phba);
+out_destroy_cq_event_pool:
+	lpfc_sli4_cq_event_pool_destroy(phba);
+out_destroy_queue:
+	lpfc_sli4_queue_destroy(phba);
+out_free_bsmbx:
+	lpfc_destroy_bootstrap_mbox(phba);
+out_free_mem:
+	lpfc_mem_free(phba);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-4 HBA device it attached to.
+ **/
+static void
+lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
+{
+	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+
+	/* unregister default FCFI from the HBA */
+	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
+
+	/* Free the default FCR table */
+	lpfc_sli_remove_dflt_fcf(phba);
+
+	/* Free memory allocated for msi-x interrupt vector entries */
+	kfree(phba->sli4_hba.msix_entries);
+
+	/* Free memory allocated for fast-path work queue handles */
+	kfree(phba->sli4_hba.fcp_eq_hdl);
+
+	/* Free the allocated rpi headers. */
+	lpfc_sli4_remove_rpi_hdrs(phba);
+
+	/* Free the ELS sgl list */
+	lpfc_free_active_sgl(phba);
+	lpfc_free_sgl_list(phba);
+
+	/* Free the SCSI sgl management array */
+	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+
+	/* Free the SLI4 queues */
+	lpfc_sli4_queue_destroy(phba);
+
+	/* Free the completion queue EQ event pool */
+	lpfc_sli4_cq_event_release_all(phba);
+	lpfc_sli4_cq_event_pool_destroy(phba);
+
+	/* Reset SLI4 HBA FCoE function */
+	lpfc_pci_function_reset(phba);
+
+	/* Free the bsmbx region. */
+	lpfc_destroy_bootstrap_mbox(phba);
+
+	/* Free the SLI Layer memory with SLI4 HBAs */
+	lpfc_mem_free_all(phba);
+
+	/* Free the current connect table */
+	list_for_each_entry_safe(conn_entry, next_conn_entry,
+		&phba->fcf_conn_rec_list, list)
+		kfree(conn_entry);
+
+	return;
+}
+
+/**
+ * lpfc_init_api_table_setup - Set up init api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the device INIT interface API function jump table
+ * in @phba struct.
+ *
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
+		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
+		phba->lpfc_stop_port = lpfc_stop_port_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
+		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
+		phba->lpfc_stop_port = lpfc_stop_port_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1431 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources before the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ *	0 - sucessful
+ *	other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
+{
+	/*
+	 * Driver resources common to all SLI revisions
+	 */
+	atomic_set(&phba->fast_event_count, 0);
+	spin_lock_init(&phba->hbalock);
+
+	/* Initialize ndlp management spinlock */
+	spin_lock_init(&phba->ndlp_lock);
+
+	INIT_LIST_HEAD(&phba->port_list);
+	INIT_LIST_HEAD(&phba->work_list);
+	init_waitqueue_head(&phba->wait_4_mlo_m_q);
+
+	/* Initialize the wait queue head for the kernel thread */
+	init_waitqueue_head(&phba->work_waitq);
+
+	/* Initialize the scsi buffer list used by driver for scsi IO */
+	spin_lock_init(&phba->scsi_buf_list_lock);
+	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+
+	/* Initialize the fabric iocb list */
+	INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+	/* Initialize list to save ELS buffers */
+	INIT_LIST_HEAD(&phba->elsbuf);
+
+	/* Initialize FCF connection rec list */
+	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+
+	return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources after the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
+{
+	int error;
+
+	/* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					  "lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		return error;
 	}
 
 	return 0;
@@ -2956,92 +3754,432 @@ out_free_iocbq:
 }
 
 /**
- * lpfc_hba_alloc - Allocate driver hba data structure for a device.
- * @pdev: pointer to pci device data structure.
- *
- * This routine is invoked to allocate the driver hba data structure for an
- * HBA device. If the allocation is successful, the phba reference to the
- * PCI device data structure is set.
+ * lpfc_free_sgl_list - Free sgl list.
+ * @phba: pointer to lpfc hba data structure.
  *
- * Return codes
- *      pointer to @phba - sucessful
- *      NULL - error
+ * This routine is invoked to free the driver's sgl list and memory.
  **/
-static struct lpfc_hba *
-lpfc_hba_alloc(struct pci_dev *pdev)
+static void
+lpfc_free_sgl_list(struct lpfc_hba *phba)
 {
-	struct lpfc_hba *phba;
-
-	/* Allocate memory for HBA structure */
-	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
-	if (!phba) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1417 Failed to allocate hba struct.\n");
-		return NULL;
-	}
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	LIST_HEAD(sglq_list);
+	int rc = 0;
 
-	/* Set reference to PCI device in HBA structure */
-	phba->pcidev = pdev;
+	spin_lock_irq(&phba->hbalock);
+	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
+	spin_unlock_irq(&phba->hbalock);
 
-	/* Assign an unused board number */
-	phba->brd_no = lpfc_get_instance();
-	if (phba->brd_no < 0) {
-		kfree(phba);
-		return NULL;
+	list_for_each_entry_safe(sglq_entry, sglq_next,
+				 &sglq_list, list) {
+		list_del(&sglq_entry->list);
+		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
+		kfree(sglq_entry);
+		phba->sli4_hba.total_sglq_bufs--;
+	}
+	rc = lpfc_sli4_remove_all_sgl_pages(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2005 Unable to deregister pages from HBA: %x", rc);
 	}
+	kfree(phba->sli4_hba.lpfc_els_sgl_array);
+}
 
-	return phba;
+/**
+ * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate the driver's active sgl memory.
+ * This array will hold the sglq_entry's for active IOs.
+ **/
+static int
+lpfc_init_active_sgl_array(struct lpfc_hba *phba)
+{
+	int size;
+	size = sizeof(struct lpfc_sglq *);
+	size *= phba->sli4_hba.max_cfg_param.max_xri;
+
+	phba->sli4_hba.lpfc_sglq_active_list =
+		kzalloc(size, GFP_KERNEL);
+	if (!phba->sli4_hba.lpfc_sglq_active_list)
+		return -ENOMEM;
+	return 0;
 }
 
 /**
- * lpfc_hba_free - Free driver hba data structure with a device.
+ * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to free the driver hba data structure with an
- * HBA device.
+ * This routine is invoked to walk through the array of active sglq entries
+ * and free all of the resources.
+ * This is just a place holder for now.
  **/
 static void
-lpfc_hba_free(struct lpfc_hba *phba)
+lpfc_free_active_sgl(struct lpfc_hba *phba)
 {
-	/* Release the driver assigned board number */
-	idr_remove(&lpfc_hba_index, phba->brd_no);
-
-	kfree(phba);
-	return;
+	kfree(phba->sli4_hba.lpfc_sglq_active_list);
 }
 
 /**
- * lpfc_create_shost - Create hba physical port with associated scsi host.
+ * lpfc_init_sgl_list - Allocate and initialize sgl list.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to create HBA physical port and associate a SCSI
- * host with it.
+ * This routine is invoked to allocate and initizlize the driver's sgl
+ * list and set up the sgl xritag tag array accordingly.
  *
  * Return codes
- *      0 - sucessful
- *      other values - error
+ *	0 - sucessful
+ *	other values - error
  **/
 static int
-lpfc_create_shost(struct lpfc_hba *phba)
+lpfc_init_sgl_list(struct lpfc_hba *phba)
 {
-	struct lpfc_vport *vport;
-	struct Scsi_Host  *shost;
+	struct lpfc_sglq *sglq_entry = NULL;
+	int i;
+	int els_xri_cnt;
+
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2400 lpfc_init_sgl_list els %d.\n",
+				els_xri_cnt);
+	/* Initialize and populate the sglq list per host/VF. */
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+	/* Sanity check on XRI management */
+	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2562 No room left for SCSI XRI allocation: "
+				"max_xri=%d, els_xri=%d\n",
+				phba->sli4_hba.max_cfg_param.max_xri,
+				els_xri_cnt);
+		return -ENOMEM;
+	}
 
-	/* Initialize HBA FC structure */
-	phba->fc_edtov = FF_DEF_EDTOV;
-	phba->fc_ratov = FF_DEF_RATOV;
-	phba->fc_altov = FF_DEF_ALTOV;
-	phba->fc_arbtov = FF_DEF_ARBTOV;
+	/* Allocate memory for the ELS XRI management array */
+	phba->sli4_hba.lpfc_els_sgl_array =
+			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
+			GFP_KERNEL);
 
-	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
-	if (!vport)
-		return -ENODEV;
+	if (!phba->sli4_hba.lpfc_els_sgl_array) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2401 Failed to allocate memory for ELS "
+				"XRI management array of size %d.\n",
+				els_xri_cnt);
+		return -ENOMEM;
+	}
 
-	shost = lpfc_shost_from_vport(vport);
-	phba->pport = vport;
-	lpfc_debugfs_initialize(vport);
-	/* Put reference to SCSI host to driver's device private data */
-	pci_set_drvdata(phba->pcidev, shost);
+	/* Keep the SCSI XRI into the XRI management array */
+	phba->sli4_hba.scsi_xri_max =
+			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+	phba->sli4_hba.scsi_xri_cnt = 0;
+
+	phba->sli4_hba.lpfc_scsi_psb_array =
+			kzalloc((sizeof(struct lpfc_scsi_buf *) *
+			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
+
+	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2563 Failed to allocate memory for SCSI "
+				"XRI management array of size %d.\n",
+				phba->sli4_hba.scsi_xri_max);
+		kfree(phba->sli4_hba.lpfc_els_sgl_array);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < els_xri_cnt; i++) {
+		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
+		if (sglq_entry == NULL) {
+			printk(KERN_ERR "%s: only allocated %d sgls of "
+				"expected %d count. Unloading driver.\n",
+				__func__, i, els_xri_cnt);
+			goto out_free_mem;
+		}
+
+		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
+		if (sglq_entry->sli4_xritag == NO_XRI) {
+			kfree(sglq_entry);
+			printk(KERN_ERR "%s: failed to allocate XRI.\n"
+				"Unloading driver.\n", __func__);
+			goto out_free_mem;
+		}
+		sglq_entry->buff_type = GEN_BUFF_TYPE;
+		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
+		if (sglq_entry->virt == NULL) {
+			kfree(sglq_entry);
+			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
+				"Unloading driver.\n", __func__);
+			goto out_free_mem;
+		}
+		sglq_entry->sgl = sglq_entry->virt;
+		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
+
+		/* The list order is used by later block SGL registraton */
+		spin_lock_irq(&phba->hbalock);
+		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
+		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
+		phba->sli4_hba.total_sglq_bufs++;
+		spin_unlock_irq(&phba->hbalock);
+	}
+	return 0;
+
+out_free_mem:
+	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+	lpfc_free_sgl_list(phba);
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.
+ * No locks are held here because this is an initialization routine
+ * called only from probe or lpfc_online when interrupts are not
+ * enabled and the driver is reinitializing the device.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
+{
+	int rc = 0;
+	int longs;
+	uint16_t rpi_count;
+	struct lpfc_rpi_hdr *rpi_hdr;
+
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
+
+	/*
+	 * Provision an rpi bitmask range for discovery. The total count
+	 * is the difference between max and base + 1.
+	 */
+	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
+		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
+
+	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
+					   GFP_KERNEL);
+	if (!phba->sli4_hba.rpi_bmask)
+		return -ENOMEM;
+
+	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+	if (!rpi_hdr) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0391 Error during rpi post operation\n");
+		lpfc_sli4_remove_rpis(phba);
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate a single 4KB memory region to
+ * support rpis and stores them in the phba.  This single region
+ * provides support for up to 64 rpis.  The region is used globally
+ * by the device.
+ *
+ * Returns:
+ *   A valid rpi hdr on success.
+ *   A NULL pointer on any failure.
+ **/
+struct lpfc_rpi_hdr *
+lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
+{
+	uint16_t rpi_limit, curr_rpi_range;
+	struct lpfc_dmabuf *dmabuf;
+	struct lpfc_rpi_hdr *rpi_hdr;
+
+	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
+		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
+
+	spin_lock_irq(&phba->hbalock);
+	curr_rpi_range = phba->sli4_hba.next_rpi;
+	spin_unlock_irq(&phba->hbalock);
+
+	/*
+	 * The port has a limited number of rpis. The increment here
+	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
+	 * and to allow the full max_rpi range per port.
+	 */
+	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
+		return NULL;
+
+	/*
+	 * First allocate the protocol header region for the port.  The
+	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
+	 */
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return NULL;
+
+	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+					  LPFC_HDR_TEMPLATE_SIZE,
+					  &dmabuf->phys,
+					  GFP_KERNEL);
+	if (!dmabuf->virt) {
+		rpi_hdr = NULL;
+		goto err_free_dmabuf;
+	}
+
+	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
+	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
+		rpi_hdr = NULL;
+		goto err_free_coherent;
+	}
+
+	/* Save the rpi header data for cleanup later. */
+	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
+	if (!rpi_hdr)
+		goto err_free_coherent;
+
+	rpi_hdr->dmabuf = dmabuf;
+	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
+	rpi_hdr->page_count = 1;
+	spin_lock_irq(&phba->hbalock);
+	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
+	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
+
+	/*
+	 * The next_rpi stores the next module-64 rpi value to post
+	 * in any subsequent rpi memory region postings.
+	 */
+	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
+	spin_unlock_irq(&phba->hbalock);
+	return rpi_hdr;
+
+ err_free_coherent:
+	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
+			  dmabuf->virt, dmabuf->phys);
+ err_free_dmabuf:
+	kfree(dmabuf);
+	return NULL;
+}
+
+/**
+ * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove all memory resources allocated
+ * to support rpis. This routine presumes the caller has released all
+ * rpis consumed by fabric or port logins and is prepared to have
+ * the header pages removed.
+ **/
+void
+lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
+{
+	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+
+	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
+				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+		list_del(&rpi_hdr->list);
+		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
+				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
+		kfree(rpi_hdr->dmabuf);
+		kfree(rpi_hdr);
+	}
+
+	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
+	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
+}
+
+/**
+ * lpfc_hba_alloc - Allocate driver hba data structure for a device.
+ * @pdev: pointer to pci device data structure.
+ *
+ * This routine is invoked to allocate the driver hba data structure for an
+ * HBA device. If the allocation is successful, the phba reference to the
+ * PCI device data structure is set.
+ *
+ * Return codes
+ *      pointer to @phba - sucessful
+ *      NULL - error
+ **/
+static struct lpfc_hba *
+lpfc_hba_alloc(struct pci_dev *pdev)
+{
+	struct lpfc_hba *phba;
+
+	/* Allocate memory for HBA structure */
+	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
+	if (!phba) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1417 Failed to allocate hba struct.\n");
+		return NULL;
+	}
+
+	/* Set reference to PCI device in HBA structure */
+	phba->pcidev = pdev;
+
+	/* Assign an unused board number */
+	phba->brd_no = lpfc_get_instance();
+	if (phba->brd_no < 0) {
+		kfree(phba);
+		return NULL;
+	}
+
+	return phba;
+}
+
+/**
+ * lpfc_hba_free - Free driver hba data structure with a device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver hba data structure with an
+ * HBA device.
+ **/
+static void
+lpfc_hba_free(struct lpfc_hba *phba)
+{
+	/* Release the driver assigned board number */
+	idr_remove(&lpfc_hba_index, phba->brd_no);
+
+	kfree(phba);
+	return;
+}
+
+/**
+ * lpfc_create_shost - Create hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create HBA physical port and associate a SCSI
+ * host with it.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      other values - error
+ **/
+static int
+lpfc_create_shost(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport;
+	struct Scsi_Host  *shost;
+
+	/* Initialize HBA FC structure */
+	phba->fc_edtov = FF_DEF_EDTOV;
+	phba->fc_ratov = FF_DEF_RATOV;
+	phba->fc_altov = FF_DEF_ALTOV;
+	phba->fc_arbtov = FF_DEF_ARBTOV;
+
+	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
+	if (!vport)
+		return -ENODEV;
+
+	shost = lpfc_shost_from_vport(vport);
+	phba->pport = vport;
+	lpfc_debugfs_initialize(vport);
+	/* Put reference to SCSI host to driver's device private data */
+	pci_set_drvdata(phba->pcidev, shost);
 
 	return 0;
 }
@@ -3316,340 +4454,2755 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
+ * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-3 interface specs. The kernel function pci_enable_msix() is
- * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
- * invoked, enables either all or nothing, depending on the current
- * availability of PCI vector resources. The device driver is responsible
- * for calling the individual request_irq() to register each MSI-X vector
- * with a interrupt handler, which is done in this function. Note that
- * later when device is unloading, the driver should always call free_irq()
- * on all MSI-X vectors it has done request_irq() on before calling
- * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
- * will be left with MSI-X enabled and leaks its vectors.
+ * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
+ * done and check status.
  *
- * Return codes
- *   0 - sucessful
- *   other values - error
+ * Return 0 if successful, otherwise -ENODEV.
  **/
-static int
-lpfc_sli_enable_msix(struct lpfc_hba *phba)
+int
+lpfc_sli4_post_status_check(struct lpfc_hba *phba)
 {
-	int rc, i;
-	LPFC_MBOXQ_t *pmb;
+	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
+	uint32_t onlnreg0, onlnreg1;
+	int i, port_error = -ENODEV;
 
-	/* Set up MSI-X multi-message vectors */
-	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
-		phba->msix_entries[i].entry = i;
+	if (!phba->sli4_hba.STAregaddr)
+		return -ENODEV;
 
-	/* Configure MSI-X capability structure */
-	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
-				ARRAY_SIZE(phba->msix_entries));
-	if (rc) {
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0420 PCI enable MSI-X failed (%d)\n", rc);
-		goto msi_fail_out;
+	/* With uncoverable error, log the error message and return error */
+	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
+	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
+	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
+		uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
+		uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
+		if (uerrlo_reg.word0 || uerrhi_reg.word0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1422 HBA Unrecoverable error: "
+					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+					"online0_reg=0x%x, online1_reg=0x%x\n",
+					uerrlo_reg.word0, uerrhi_reg.word0,
+					onlnreg0, onlnreg1);
+		}
+		return -ENODEV;
 	}
-	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0477 MSI-X entry[%d]: vector=x%x "
-				"message=%d\n", i,
-				phba->msix_entries[i].vector,
-				phba->msix_entries[i].entry);
-	/*
-	 * Assign MSI-X vectors to interrupt handlers
-	 */
 
-	/* vector-0 is associated to slow-path handler */
-	rc = request_irq(phba->msix_entries[0].vector,
-			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
-			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
-	if (rc) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"0421 MSI-X slow-path request_irq failed "
-				"(%d)\n", rc);
-		goto msi_fail_out;
+	/* Wait up to 30 seconds for the SLI Port POST done and ready */
+	for (i = 0; i < 3000; i++) {
+		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
+		/* Encounter fatal POST error, break out */
+		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
+			port_error = -ENODEV;
+			break;
+		}
+		if (LPFC_POST_STAGE_ARMFW_READY ==
+		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
+			port_error = 0;
+			break;
+		}
+		msleep(10);
 	}
 
-	/* vector-1 is associated to fast-path handler */
-	rc = request_irq(phba->msix_entries[1].vector,
-			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
-			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
+	if (port_error)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"1408 Failure HBA POST Status: sta_reg=0x%x, "
+			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
+			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
+			bf_get(lpfc_hst_state_perr, &sta_reg),
+			bf_get(lpfc_hst_state_sfi, &sta_reg),
+			bf_get(lpfc_hst_state_nip, &sta_reg),
+			bf_get(lpfc_hst_state_ipc, &sta_reg),
+			bf_get(lpfc_hst_state_xrom, &sta_reg),
+			bf_get(lpfc_hst_state_dl, &sta_reg),
+			bf_get(lpfc_hst_state_port_status, &sta_reg));
+
+	/* Log device information */
+	scratchpad.word0 =  readl(phba->sli4_hba.SCRATCHPADregaddr);
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
+			"FeatureL1=0x%x, FeatureL2=0x%x\n",
+			bf_get(lpfc_scratchpad_chiptype, &scratchpad),
+			bf_get(lpfc_scratchpad_slirev, &scratchpad),
+			bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
+			bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
+
+	return port_error;
+}
 
-	if (rc) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"0429 MSI-X fast-path request_irq failed "
-				"(%d)\n", rc);
-		goto irq_fail_out;
-	}
+/**
+ * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up SLI4 BAR0 PCI config space register
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
+{
+	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_UERR_STATUS_LO;
+	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_UERR_STATUS_HI;
+	phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_ONLINE0;
+	phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_ONLINE1;
+	phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_SCRATCHPAD;
+}
 
-	/*
-	 * Configure HBA MSI-X attention conditions to messages
-	 */
-	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+/**
+ * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
+{
 
-	if (!pmb) {
-		rc = -ENOMEM;
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0474 Unable to allocate memory for issuing "
-				"MBOX_CONFIG_MSI command\n");
-		goto mem_fail_out;
-	}
-	rc = lpfc_config_msi(phba, pmb);
-	if (rc)
-		goto mbx_fail_out;
-	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
-	if (rc != MBX_SUCCESS) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
-				"0351 Config MSI mailbox command failed, "
-				"mbxCmd x%x, mbxStatus x%x\n",
-				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
-		goto mbx_fail_out;
-	}
-
-	/* Free memory allocated for mailbox command */
-	mempool_free(pmb, phba->mbox_mem_pool);
-	return rc;
-
-mbx_fail_out:
-	/* Free memory allocated for mailbox command */
-	mempool_free(pmb, phba->mbox_mem_pool);
-
-mem_fail_out:
-	/* free the irq already requested */
-	free_irq(phba->msix_entries[1].vector, phba);
-
-irq_fail_out:
-	/* free the irq already requested */
-	free_irq(phba->msix_entries[0].vector, phba);
-
-msi_fail_out:
-	/* Unconfigure MSI-X capability structure */
-	pci_disable_msix(phba->pcidev);
-	return rc;
+	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+				    LPFC_HST_STATE;
+	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+				    LPFC_HST_ISR0;
+	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+				    LPFC_HST_IMR0;
+	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+				     LPFC_HST_ISCR0;
+	return;
 }
 
 /**
- * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
+ * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
  * @phba: pointer to lpfc hba data structure.
+ * @vf: virtual function number
  *
- * This routine is invoked to release the MSI-X vectors and then disable the
- * MSI-X interrupt mode to device with SLI-3 interface spec.
+ * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
+ * based on the given viftual function number, @vf.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
  **/
-static void
-lpfc_sli_disable_msix(struct lpfc_hba *phba)
+static int
+lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
 {
-	int i;
-
-	/* Free up MSI-X multi-message vectors */
-	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
-		free_irq(phba->msix_entries[i].vector, phba);
-	/* Disable MSI-X */
-	pci_disable_msix(phba->pcidev);
+	if (vf > LPFC_VIR_FUNC_MAX)
+		return -ENODEV;
 
-	return;
+	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
+	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
+	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
+	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
+	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
+	return 0;
 }
 
 /**
- * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
+ * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to enable the MSI interrupt mode to device with
- * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
- * enable the MSI vector. The device driver is responsible for calling the
- * request_irq() to register MSI vector with a interrupt the handler, which
- * is done in this function.
+ * This routine is invoked to create the bootstrap mailbox
+ * region consistent with the SLI-4 interface spec.  This
+ * routine allocates all memory necessary to communicate
+ * mailbox commands to the port and sets up all alignment
+ * needs.  No locks are expected to be held when calling
+ * this routine.
  *
  * Return codes
  * 	0 - sucessful
- * 	other values - error
- */
+ * 	ENOMEM - could not allocated memory.
+ **/
 static int
-lpfc_sli_enable_msi(struct lpfc_hba *phba)
+lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
 {
-	int rc;
+	uint32_t bmbx_size;
+	struct lpfc_dmabuf *dmabuf;
+	struct dma_address *dma_address;
+	uint32_t pa_addr;
+	uint64_t phys_addr;
+
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return -ENOMEM;
 
-	rc = pci_enable_msi(phba->pcidev);
-	if (!rc)
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0462 PCI enable MSI mode success.\n");
-	else {
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0471 PCI enable MSI mode failed (%d)\n", rc);
-		return rc;
+	/*
+	 * The bootstrap mailbox region is comprised of 2 parts
+	 * plus an alignment restriction of 16 bytes.
+	 */
+	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
+	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+					  bmbx_size,
+					  &dmabuf->phys,
+					  GFP_KERNEL);
+	if (!dmabuf->virt) {
+		kfree(dmabuf);
+		return -ENOMEM;
 	}
+	memset(dmabuf->virt, 0, bmbx_size);
 
-	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
-			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
-	if (rc) {
-		pci_disable_msi(phba->pcidev);
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"0478 MSI request_irq failed (%d)\n", rc);
-	}
-	return rc;
+	/*
+	 * Initialize the bootstrap mailbox pointers now so that the register
+	 * operations are simple later.  The mailbox dma address is required
+	 * to be 16-byte aligned.  Also align the virtual memory as each
+	 * maibox is copied into the bmbx mailbox region before issuing the
+	 * command to the port.
+	 */
+	phba->sli4_hba.bmbx.dmabuf = dmabuf;
+	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
+
+	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
+					      LPFC_ALIGN_16_BYTE);
+	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
+					      LPFC_ALIGN_16_BYTE);
+
+	/*
+	 * Set the high and low physical addresses now.  The SLI4 alignment
+	 * requirement is 16 bytes and the mailbox is posted to the port
+	 * as two 30-bit addresses.  The other data is a bit marking whether
+	 * the 30-bit address is the high or low address.
+	 * Upcast bmbx aphys to 64bits so shift instruction compiles
+	 * clean on 32 bit machines.
+	 */
+	dma_address = &phba->sli4_hba.bmbx.dma_address;
+	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
+	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
+	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
+					   LPFC_BMBX_BIT1_ADDR_HI);
+
+	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
+	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
+					   LPFC_BMBX_BIT1_ADDR_LO);
+	return 0;
 }
 
 /**
- * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
+ * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to disable the MSI interrupt mode to device with
- * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
- * done request_irq() on before calling pci_disable_msi(). Failure to do so
- * results in a BUG_ON() and a device will be left with MSI enabled and leaks
- * its vector.
- */
+ * This routine is invoked to teardown the bootstrap mailbox
+ * region and release all host resources. This routine requires
+ * the caller to ensure all mailbox commands recovered, no
+ * additional mailbox comands are sent, and interrupts are disabled
+ * before calling this routine.
+ *
+ **/
 static void
-lpfc_sli_disable_msi(struct lpfc_hba *phba)
+lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
 {
-	free_irq(phba->pcidev->irq, phba);
-	pci_disable_msi(phba->pcidev);
-	return;
+	dma_free_coherent(&phba->pcidev->dev,
+			  phba->sli4_hba.bmbx.bmbx_size,
+			  phba->sli4_hba.bmbx.dmabuf->virt,
+			  phba->sli4_hba.bmbx.dmabuf->phys);
+
+	kfree(phba->sli4_hba.bmbx.dmabuf);
+	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
 }
 
 /**
- * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
+ * lpfc_sli4_read_config - Get the config parameters.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to enable device interrupt and associate driver's
- * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
- * spec. Depends on the interrupt mode configured to the driver, the driver
- * will try to fallback from the configured interrupt mode to an interrupt
- * mode which is supported by the platform, kernel, and device in the order
- * of:
- * MSI-X -> MSI -> IRQ.
+ * This routine is invoked to read the configuration parameters from the HBA.
+ * The configuration parameters are used to set the base and maximum values
+ * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
+ * allocation for the port.
  *
  * Return codes
- *   0 - sucessful
- *   other values - error
+ * 	0 - sucessful
+ * 	ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
  **/
-static uint32_t
-lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+static int
+lpfc_sli4_read_config(struct lpfc_hba *phba)
 {
-	uint32_t intr_mode = LPFC_INTR_ERROR;
-	int retval;
+	LPFC_MBOXQ_t *pmb;
+	struct lpfc_mbx_read_config *rd_config;
+	uint32_t rc = 0;
 
-	if (cfg_mode == 2) {
-		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
-		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
-		if (!retval) {
-			/* Now, try to enable MSI-X interrupt mode */
-			retval = lpfc_sli_enable_msix(phba);
-			if (!retval) {
-				/* Indicate initialization to MSI-X mode */
-				phba->intr_type = MSIX;
-				intr_mode = 2;
-			}
-		}
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2011 Unable to allocate memory for issuing "
+				"SLI_CONFIG_SPECIAL mailbox command\n");
+		return -ENOMEM;
 	}
 
-	/* Fallback to MSI if MSI-X initialization failed */
-	if (cfg_mode >= 1 && phba->intr_type == NONE) {
-		retval = lpfc_sli_enable_msi(phba);
-		if (!retval) {
-			/* Indicate initialization to MSI mode */
-			phba->intr_type = MSI;
-			intr_mode = 1;
-		}
-	}
+	lpfc_read_config(phba, pmb);
 
-	/* Fallback to INTx if both MSI-X/MSI initalization failed */
-	if (phba->intr_type == NONE) {
-		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
-				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
-		if (!retval) {
-			/* Indicate initialization to INTx mode */
-			phba->intr_type = INTx;
-			intr_mode = 0;
-		}
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2012 Mailbox failed , mbxCmd x%x "
+			"READ_CONFIG, mbxStatus x%x\n",
+			bf_get(lpfc_mqe_command, &pmb->u.mqe),
+			bf_get(lpfc_mqe_status, &pmb->u.mqe));
+		rc = -EIO;
+	} else {
+		rd_config = &pmb->u.mqe.un.rd_config;
+		phba->sli4_hba.max_cfg_param.max_xri =
+			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+		phba->sli4_hba.max_cfg_param.xri_base =
+			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_vpi =
+			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.vpi_base =
+			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_rpi =
+			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.rpi_base =
+			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_vfi =
+			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.vfi_base =
+			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_fcfi =
+			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.fcfi_base =
+			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_eq =
+			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_rq =
+			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_wq =
+			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_cq =
+			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
+		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
+		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
+		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
+		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
+		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
+		phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
+		phba->max_vports = phba->max_vpi;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2003 cfg params XRI(B:%d M:%d), "
+				"VPI(B:%d M:%d) "
+				"VFI(B:%d M:%d) "
+				"RPI(B:%d M:%d) "
+				"FCFI(B:%d M:%d)\n",
+				phba->sli4_hba.max_cfg_param.xri_base,
+				phba->sli4_hba.max_cfg_param.max_xri,
+				phba->sli4_hba.max_cfg_param.vpi_base,
+				phba->sli4_hba.max_cfg_param.max_vpi,
+				phba->sli4_hba.max_cfg_param.vfi_base,
+				phba->sli4_hba.max_cfg_param.max_vfi,
+				phba->sli4_hba.max_cfg_param.rpi_base,
+				phba->sli4_hba.max_cfg_param.max_rpi,
+				phba->sli4_hba.max_cfg_param.fcfi_base,
+				phba->sli4_hba.max_cfg_param.max_fcfi);
 	}
-	return intr_mode;
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
+	if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
+		phba->cfg_hba_queue_depth =
+				phba->sli4_hba.max_cfg_param.max_xri;
+	return rc;
 }
 
 /**
- * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
+ * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to disable device interrupt and disassociate the
- * driver's interrupt handler(s) from interrupt vector(s) to device with
- * SLI-3 interface spec. Depending on the interrupt mode, the driver will
- * release the interrupt vector(s) for the message signaled interrupt.
+ * This routine is invoked to setup the host-side endian order to the
+ * HBA consistent with the SLI-4 interface spec.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
  **/
-static void
-lpfc_sli_disable_intr(struct lpfc_hba *phba)
+static int
+lpfc_setup_endian_order(struct lpfc_hba *phba)
 {
-	/* Disable the currently initialized interrupt mode */
-	if (phba->intr_type == MSIX)
-		lpfc_sli_disable_msix(phba);
-	else if (phba->intr_type == MSI)
-		lpfc_sli_disable_msi(phba);
-	else if (phba->intr_type == INTx)
-		free_irq(phba->pcidev->irq, phba);
+	LPFC_MBOXQ_t *mboxq;
+	uint32_t rc = 0;
+	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
+				      HOST_ENDIAN_HIGH_WORD1};
 
-	/* Reset interrupt management states */
-	phba->intr_type = NONE;
-	phba->sli.slistat.sli_intr = 0;
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0492 Unable to allocate memory for issuing "
+				"SLI_CONFIG_SPECIAL mailbox command\n");
+		return -ENOMEM;
+	}
 
-	return;
+	/*
+	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
+	 * words to contain special data values and no other data.
+	 */
+	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
+				"status x%x\n",
+				rc);
+		rc = -EIO;
+	}
+
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return rc;
 }
 
 /**
- * lpfc_unset_hba - Unset SLI3 hba device initialization
+ * lpfc_sli4_queue_create - Create all the SLI4 queues
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to unset the HBA device initialization steps to
- * a device with SLI-3 interface spec.
+ * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
  **/
-static void
-lpfc_unset_hba(struct lpfc_hba *phba)
+static int
+lpfc_sli4_queue_create(struct lpfc_hba *phba)
 {
-	struct lpfc_vport *vport = phba->pport;
-	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_queue *qdesc;
+	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+	int cfg_fcp_wq_count;
+	int cfg_fcp_eq_count;
 
-	spin_lock_irq(shost->host_lock);
-	vport->load_flag |= FC_UNLOADING;
-	spin_unlock_irq(shost->host_lock);
+	/*
+	 * Sanity check for confiugred queue parameters against the run-time
+	 * device parameters
+	 */
 
-	lpfc_stop_hba_timers(phba);
+	/* Sanity check on FCP fast-path WQ parameters */
+	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
+	if (cfg_fcp_wq_count >
+	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
+		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
+				   LPFC_SP_WQN_DEF;
+		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2581 Not enough WQs (%d) from "
+					"the pci function for supporting "
+					"FCP WQs (%d)\n",
+					phba->sli4_hba.max_cfg_param.max_wq,
+					phba->cfg_fcp_wq_count);
+			goto out_error;
+		}
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2582 Not enough WQs (%d) from the pci "
+				"function for supporting the requested "
+				"FCP WQs (%d), the actual FCP WQs can "
+				"be supported: %d\n",
+				phba->sli4_hba.max_cfg_param.max_wq,
+				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
+	}
+	/* The actual number of FCP work queues adopted */
+	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
+
+	/* Sanity check on FCP fast-path EQ parameters */
+	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
+	if (cfg_fcp_eq_count >
+	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
+		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
+				   LPFC_SP_EQN_DEF;
+		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2574 Not enough EQs (%d) from the "
+					"pci function for supporting FCP "
+					"EQs (%d)\n",
+					phba->sli4_hba.max_cfg_param.max_eq,
+					phba->cfg_fcp_eq_count);
+			goto out_error;
+		}
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2575 Not enough EQs (%d) from the pci "
+				"function for supporting the requested "
+				"FCP EQs (%d), the actual FCP EQs can "
+				"be supported: %d\n",
+				phba->sli4_hba.max_cfg_param.max_eq,
+				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
+	}
+	/* It does not make sense to have more EQs than WQs */
+	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2593 The number of FCP EQs (%d) is more "
+				"than the number of FCP WQs (%d), take "
+				"the number of FCP EQs same as than of "
+				"WQs (%d)\n", cfg_fcp_eq_count,
+				phba->cfg_fcp_wq_count,
+				phba->cfg_fcp_wq_count);
+		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
+	}
+	/* The actual number of FCP event queues adopted */
+	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
+	/* The overall number of event queues used */
+	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
 
-	phba->pport->work_port_events = 0;
+	/*
+	 * Create Event Queues (EQs)
+	 */
 
-	lpfc_sli_hba_down(phba);
+	/* Get EQ depth from module parameter, fake the default for now */
+	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
+	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
 
-	lpfc_sli_brdrestart(phba);
+	/* Create slow path event queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+				      phba->sli4_hba.eq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0496 Failed allocate slow-path EQ\n");
+		goto out_error;
+	}
+	phba->sli4_hba.sp_eq = qdesc;
+
+	/* Create fast-path FCP Event Queue(s) */
+	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
+			       phba->cfg_fcp_eq_count), GFP_KERNEL);
+	if (!phba->sli4_hba.fp_eq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2576 Failed allocate memory for fast-path "
+				"EQ record array\n");
+		goto out_free_sp_eq;
+	}
+	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+					      phba->sli4_hba.eq_ecount);
+		if (!qdesc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0497 Failed allocate fast-path EQ\n");
+			goto out_free_fp_eq;
+		}
+		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
+	}
+
+	/*
+	 * Create Complete Queues (CQs)
+	 */
+
+	/* Get CQ depth from module parameter, fake the default for now */
+	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
+	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
+
+	/* Create slow-path Mailbox Command Complete Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+				      phba->sli4_hba.cq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0500 Failed allocate slow-path mailbox CQ\n");
+		goto out_free_fp_eq;
+	}
+	phba->sli4_hba.mbx_cq = qdesc;
+
+	/* Create slow-path ELS Complete Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+				      phba->sli4_hba.cq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0501 Failed allocate slow-path ELS CQ\n");
+		goto out_free_mbx_cq;
+	}
+	phba->sli4_hba.els_cq = qdesc;
+
+	/* Create slow-path Unsolicited Receive Complete Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+				      phba->sli4_hba.cq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0502 Failed allocate slow-path USOL RX CQ\n");
+		goto out_free_els_cq;
+	}
+	phba->sli4_hba.rxq_cq = qdesc;
+
+	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
+	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
+				phba->cfg_fcp_eq_count), GFP_KERNEL);
+	if (!phba->sli4_hba.fcp_cq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2577 Failed allocate memory for fast-path "
+				"CQ record array\n");
+		goto out_free_rxq_cq;
+	}
+	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+					      phba->sli4_hba.cq_ecount);
+		if (!qdesc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0499 Failed allocate fast-path FCP "
+					"CQ (%d)\n", fcp_cqidx);
+			goto out_free_fcp_cq;
+		}
+		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
+	}
+
+	/* Create Mailbox Command Queue */
+	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
+	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
+
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+				      phba->sli4_hba.mq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0505 Failed allocate slow-path MQ\n");
+		goto out_free_fcp_cq;
+	}
+	phba->sli4_hba.mbx_wq = qdesc;
+
+	/*
+	 * Create all the Work Queues (WQs)
+	 */
+	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
+	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
+
+	/* Create slow-path ELS Work Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+				      phba->sli4_hba.wq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0504 Failed allocate slow-path ELS WQ\n");
+		goto out_free_mbx_wq;
+	}
+	phba->sli4_hba.els_wq = qdesc;
+
+	/* Create fast-path FCP Work Queue(s) */
+	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
+				phba->cfg_fcp_wq_count), GFP_KERNEL);
+	if (!phba->sli4_hba.fcp_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2578 Failed allocate memory for fast-path "
+				"WQ record array\n");
+		goto out_free_els_wq;
+	}
+	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+					      phba->sli4_hba.wq_ecount);
+		if (!qdesc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0503 Failed allocate fast-path FCP "
+					"WQ (%d)\n", fcp_wqidx);
+			goto out_free_fcp_wq;
+		}
+		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
+	}
+
+	/*
+	 * Create Receive Queue (RQ)
+	 */
+	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
+	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+
+	/* Create Receive Queue for header */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+				      phba->sli4_hba.rq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0506 Failed allocate receive HRQ\n");
+		goto out_free_fcp_wq;
+	}
+	phba->sli4_hba.hdr_rq = qdesc;
+
+	/* Create Receive Queue for data */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+				      phba->sli4_hba.rq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0507 Failed allocate receive DRQ\n");
+		goto out_free_hdr_rq;
+	}
+	phba->sli4_hba.dat_rq = qdesc;
+
+	return 0;
+
+out_free_hdr_rq:
+	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+	phba->sli4_hba.hdr_rq = NULL;
+out_free_fcp_wq:
+	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
+		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
+		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
+	}
+	kfree(phba->sli4_hba.fcp_wq);
+out_free_els_wq:
+	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+	phba->sli4_hba.els_wq = NULL;
+out_free_mbx_wq:
+	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+	phba->sli4_hba.mbx_wq = NULL;
+out_free_fcp_cq:
+	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
+		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
+		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
+	}
+	kfree(phba->sli4_hba.fcp_cq);
+out_free_rxq_cq:
+	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
+	phba->sli4_hba.rxq_cq = NULL;
+out_free_els_cq:
+	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+	phba->sli4_hba.els_cq = NULL;
+out_free_mbx_cq:
+	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+	phba->sli4_hba.mbx_cq = NULL;
+out_free_fp_eq:
+	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
+		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
+		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
+	}
+	kfree(phba->sli4_hba.fp_eq);
+out_free_sp_eq:
+	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
+	phba->sli4_hba.sp_eq = NULL;
+out_error:
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+static void
+lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
+{
+	int fcp_qidx;
+
+	/* Release mailbox command work queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+	phba->sli4_hba.mbx_wq = NULL;
+
+	/* Release ELS work queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+	phba->sli4_hba.els_wq = NULL;
+
+	/* Release FCP work queue */
+	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
+		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+	kfree(phba->sli4_hba.fcp_wq);
+	phba->sli4_hba.fcp_wq = NULL;
+
+	/* Release unsolicited receive queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+	phba->sli4_hba.hdr_rq = NULL;
+	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
+	phba->sli4_hba.dat_rq = NULL;
+
+	/* Release unsolicited receive complete queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
+	phba->sli4_hba.rxq_cq = NULL;
+
+	/* Release ELS complete queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+	phba->sli4_hba.els_cq = NULL;
+
+	/* Release mailbox command complete queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+	phba->sli4_hba.mbx_cq = NULL;
+
+	/* Release FCP response complete queue */
+	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+	kfree(phba->sli4_hba.fcp_cq);
+	phba->sli4_hba.fcp_cq = NULL;
+
+	/* Release fast-path event queue */
+	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+	kfree(phba->sli4_hba.fp_eq);
+	phba->sli4_hba.fp_eq = NULL;
+
+	/* Release slow-path event queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
+	phba->sli4_hba.sp_eq = NULL;
+
+	return;
+}
+
+/**
+ * lpfc_sli4_queue_setup - Set up all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+{
+	int rc = -ENOMEM;
+	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+	int fcp_cq_index = 0;
+
+	/*
+	 * Set up Event Queues (EQs)
+	 */
+
+	/* Set up slow-path event queue */
+	if (!phba->sli4_hba.sp_eq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0520 Slow-path EQ not allocated\n");
+		goto out_error;
+	}
+	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
+			    LPFC_SP_DEF_IMAX);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0521 Failed setup of slow-path EQ: "
+				"rc = 0x%x\n", rc);
+		goto out_error;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2583 Slow-path EQ setup: queue-id=%d\n",
+			phba->sli4_hba.sp_eq->queue_id);
+
+	/* Set up fast-path event queue */
+	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0522 Fast-path EQ (%d) not "
+					"allocated\n", fcp_eqidx);
+			goto out_destroy_fp_eq;
+		}
+		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
+				    phba->cfg_fcp_imax);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0523 Failed setup of fast-path EQ "
+					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
+			goto out_destroy_fp_eq;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2584 Fast-path EQ setup: "
+				"queue[%d]-id=%d\n", fcp_eqidx,
+				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
+	}
+
+	/*
+	 * Set up Complete Queues (CQs)
+	 */
+
+	/* Set up slow-path MBOX Complete Queue as the first CQ */
+	if (!phba->sli4_hba.mbx_cq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0528 Mailbox CQ not allocated\n");
+		goto out_destroy_fp_eq;
+	}
+	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
+			    LPFC_MCQ, LPFC_MBOX);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0529 Failed setup of slow-path mailbox CQ: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_fp_eq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
+			phba->sli4_hba.mbx_cq->queue_id,
+			phba->sli4_hba.sp_eq->queue_id);
+
+	/* Set up slow-path ELS Complete Queue */
+	if (!phba->sli4_hba.els_cq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0530 ELS CQ not allocated\n");
+		goto out_destroy_mbx_cq;
+	}
+	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
+			    LPFC_WCQ, LPFC_ELS);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0531 Failed setup of slow-path ELS CQ: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_mbx_cq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
+			phba->sli4_hba.els_cq->queue_id,
+			phba->sli4_hba.sp_eq->queue_id);
+
+	/* Set up slow-path Unsolicited Receive Complete Queue */
+	if (!phba->sli4_hba.rxq_cq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0532 USOL RX CQ not allocated\n");
+		goto out_destroy_els_cq;
+	}
+	rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
+			    LPFC_RCQ, LPFC_USOL);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0533 Failed setup of slow-path USOL RX CQ: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_els_cq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
+			phba->sli4_hba.rxq_cq->queue_id,
+			phba->sli4_hba.sp_eq->queue_id);
+
+	/* Set up fast-path FCP Response Complete Queue */
+	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0526 Fast-path FCP CQ (%d) not "
+					"allocated\n", fcp_cqidx);
+			goto out_destroy_fcp_cq;
+		}
+		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
+				    phba->sli4_hba.fp_eq[fcp_cqidx],
+				    LPFC_WCQ, LPFC_FCP);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0527 Failed setup of fast-path FCP "
+					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
+			goto out_destroy_fcp_cq;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2588 FCP CQ setup: cq[%d]-id=%d, "
+				"parent eq[%d]-id=%d\n",
+				fcp_cqidx,
+				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+				fcp_cqidx,
+				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
+	}
+
+	/*
+	 * Set up all the Work Queues (WQs)
+	 */
+
+	/* Set up Mailbox Command Queue */
+	if (!phba->sli4_hba.mbx_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0538 Slow-path MQ not allocated\n");
+		goto out_destroy_fcp_cq;
+	}
+	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
+			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0539 Failed setup of slow-path MQ: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_fcp_cq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
+			phba->sli4_hba.mbx_wq->queue_id,
+			phba->sli4_hba.mbx_cq->queue_id);
+
+	/* Set up slow-path ELS Work Queue */
+	if (!phba->sli4_hba.els_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0536 Slow-path ELS WQ not allocated\n");
+		goto out_destroy_mbx_wq;
+	}
+	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
+			    phba->sli4_hba.els_cq, LPFC_ELS);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0537 Failed setup of slow-path ELS WQ: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_mbx_wq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
+			phba->sli4_hba.els_wq->queue_id,
+			phba->sli4_hba.els_cq->queue_id);
+
+	/* Set up fast-path FCP Work Queue */
+	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
+		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0534 Fast-path FCP WQ (%d) not "
+					"allocated\n", fcp_wqidx);
+			goto out_destroy_fcp_wq;
+		}
+		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
+				    phba->sli4_hba.fcp_cq[fcp_cq_index],
+				    LPFC_FCP);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0535 Failed setup of fast-path FCP "
+					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
+			goto out_destroy_fcp_wq;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2591 FCP WQ setup: wq[%d]-id=%d, "
+				"parent cq[%d]-id=%d\n",
+				fcp_wqidx,
+				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+				fcp_cq_index,
+				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
+		/* Round robin FCP Work Queue's Completion Queue assignment */
+		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
+	}
+
+	/*
+	 * Create Receive Queue (RQ)
+	 */
+	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0540 Receive Queue not allocated\n");
+		goto out_destroy_fcp_wq;
+	}
+	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+			    phba->sli4_hba.rxq_cq, LPFC_USOL);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0541 Failed setup of Receive Queue: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_fcp_wq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
+			"parent cq-id=%d\n",
+			phba->sli4_hba.hdr_rq->queue_id,
+			phba->sli4_hba.dat_rq->queue_id,
+			phba->sli4_hba.rxq_cq->queue_id);
+	return 0;
+
+out_destroy_fcp_wq:
+	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
+		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+out_destroy_mbx_wq:
+	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+out_destroy_fcp_cq:
+	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
+		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
+out_destroy_els_cq:
+	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+out_destroy_mbx_cq:
+	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+out_destroy_fp_eq:
+	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
+		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+out_error:
+	return rc;
+}
+
+/**
+ * lpfc_sli4_queue_unset - Unset all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+void
+lpfc_sli4_queue_unset(struct lpfc_hba *phba)
+{
+	int fcp_qidx;
+
+	/* Unset mailbox command work queue */
+	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+	/* Unset ELS work queue */
+	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+	/* Unset unsolicited receive queue */
+	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
+	/* Unset FCP work queue */
+	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
+		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+	/* Unset mailbox command complete queue */
+	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+	/* Unset ELS complete queue */
+	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+	/* Unset unsolicited receive complete queue */
+	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
+	/* Unset FCP response complete queue */
+	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+	/* Unset fast-path event queue */
+	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+	/* Unset slow-path event queue */
+	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and set up a pool of completion queue
+ * events. The body of the completion queue event is a completion queue entry
+ * CQE. For now, this pool is used for the interrupt service routine to queue
+ * the following HBA completion queue events for the worker thread to process:
+ *   - Mailbox asynchronous events
+ *   - Receive queue completion unsolicited events
+ * Later, this can be used for all the slow-path events.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      -ENOMEM - No availble memory
+ **/
+static int
+lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+	int i;
+
+	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
+		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
+		if (!cq_event)
+			goto out_pool_create_fail;
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_cqe_event_pool);
+	}
+	return 0;
+
+out_pool_create_fail:
+	lpfc_sli4_cq_event_pool_destroy(phba);
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the pool of completion queue events at
+ * driver unload time. Note that, it is the responsibility of the driver
+ * cleanup routine to free all the outstanding completion-queue events
+ * allocated from this pool back into the pool before invoking this routine
+ * to destroy the pool.
+ **/
+static void
+lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event, *next_cq_event;
+
+	list_for_each_entry_safe(cq_event, next_cq_event,
+				 &phba->sli4_hba.sp_cqe_event_pool, list) {
+		list_del(&cq_event->list);
+		kfree(cq_event);
+	}
+}
+
+/**
+ * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock free version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ *         NULL otherwise.
+ **/
+struct lpfc_cq_event *
+__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event = NULL;
+
+	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
+			 struct lpfc_cq_event, list);
+	return cq_event;
+}
+
+/**
+ * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ *         NULL otherwise.
+ **/
+struct lpfc_cq_event *
+lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	cq_event = __lpfc_sli4_cq_event_alloc(phba);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return cq_event;
+}
+
+/**
+ * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock free version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+			     struct lpfc_cq_event *cq_event)
+{
+	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
+}
+
+/**
+ * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+			   struct lpfc_cq_event *cq_event)
+{
+	unsigned long iflags;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_sli4_cq_event_release(phba, cq_event);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the pending completion-queue events to the
+ * back into the free pool for device reset.
+ **/
+static void
+lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
+{
+	LIST_HEAD(cqelist);
+	struct lpfc_cq_event *cqe;
+	unsigned long iflags;
+
+	/* Retrieve all the pending WCQEs from pending WCQE lists */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	/* Pending FCP XRI abort events */
+	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+			 &cqelist);
+	/* Pending ELS XRI abort events */
+	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+			 &cqelist);
+	/* Pending asynnc events */
+	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
+			 &cqelist);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	while (!list_empty(&cqelist)) {
+		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
+		lpfc_sli4_cq_event_release(phba, cqe);
+	}
+}
+
+/**
+ * lpfc_pci_function_reset - Reset pci function.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request a PCI function reset. It will destroys
+ * all resources assigned to the PCI function which originates this request.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_pci_function_reset(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	uint32_t rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0494 Unable to allocate memory for issuing "
+				"SLI_FUNCTION_RESET mailbox command\n");
+		return -ENOMEM;
+	}
+
+	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
+			 LPFC_SLI4_MBX_EMBED);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0495 SLI_FUNCTION_RESET mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
+ * @phba: pointer to lpfc hba data structure.
+ * @cnt: number of nop mailbox commands to send.
+ *
+ * This routine is invoked to send a number @cnt of NOP mailbox command and
+ * wait for each command to complete.
+ *
+ * Return: the number of NOP mailbox command completed.
+ **/
+static int
+lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int length, cmdsent;
+	uint32_t mbox_tmo;
+	uint32_t rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (cnt == 0) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2518 Requested to send 0 NOP mailbox cmd\n");
+		return cnt;
+	}
+
+	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2519 Unable to allocate memory for issuing "
+				"NOP mailbox command\n");
+		return 0;
+	}
+
+	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
+	length = (sizeof(struct lpfc_mbx_nop) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
+
+	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
+		if (!phba->sli4_hba.intr_enable)
+			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		else
+			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+		if (rc == MBX_TIMEOUT)
+			break;
+		/* Check return status */
+		shdr = (union lpfc_sli4_cfg_shdr *)
+			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+					 &shdr->response);
+		if (shdr_status || shdr_add_status || rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"2520 NOP mailbox command failed "
+					"status x%x add_status x%x mbx "
+					"status x%x\n", shdr_status,
+					shdr_add_status, rc);
+			break;
+		}
+	}
+
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+
+	return cmdsent;
+}
+
+/**
+ * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
+ * @phba: pointer to lpfc hba data structure.
+ * @fcfi: fcf index.
+ *
+ * This routine is invoked to unregister a FCFI from device.
+ **/
+void
+lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
+{
+	LPFC_MBOXQ_t *mbox;
+	uint32_t mbox_tmo;
+	int rc;
+	unsigned long flags;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+	if (!mbox)
+		return;
+
+	lpfc_unreg_fcfi(mbox, fcfi);
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	if (rc != MBX_SUCCESS)
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2517 Unregister FCFI command failed "
+				"status %d, mbxStatus x%x\n", rc,
+				bf_get(lpfc_mqe_status, &mbox->u.mqe));
+	else {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		/* Mark the FCFI is no longer registered */
+		phba->fcf.fcf_flag &=
+			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+	}
+}
+
+/**
+ * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-4 interface spec.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ **/
+static int
+lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+	unsigned long bar0map_len, bar1map_len, bar2map_len;
+	int error = -ENODEV;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return error;
+	else
+		pdev = phba->pcidev;
+
+	/* Set the device DMA mask size */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+			return error;
+
+	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
+	 * number of bytes required by each mapping. They are actually
+	 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
+	 */
+	phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
+	bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
+
+	phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
+	bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
+
+	phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
+	bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
+
+	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
+	phba->sli4_hba.conf_regs_memmap_p =
+				ioremap(phba->pci_bar0_map, bar0map_len);
+	if (!phba->sli4_hba.conf_regs_memmap_p) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for SLI4 PCI config registers.\n");
+		goto out;
+	}
+
+	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
+	phba->sli4_hba.ctrl_regs_memmap_p =
+				ioremap(phba->pci_bar1_map, bar1map_len);
+	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for SLI4 HBA control registers.\n");
+		goto out_iounmap_conf;
+	}
+
+	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
+	phba->sli4_hba.drbl_regs_memmap_p =
+				ioremap(phba->pci_bar2_map, bar2map_len);
+	if (!phba->sli4_hba.drbl_regs_memmap_p) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for SLI4 HBA doorbell registers.\n");
+		goto out_iounmap_ctrl;
+	}
+
+	/* Set up BAR0 PCI config space register memory map */
+	lpfc_sli4_bar0_register_memmap(phba);
+
+	/* Set up BAR1 register memory map */
+	lpfc_sli4_bar1_register_memmap(phba);
+
+	/* Set up BAR2 register memory map */
+	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+	if (error)
+		goto out_iounmap_all;
+
+	return 0;
+
+out_iounmap_all:
+	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+out_iounmap_ctrl:
+	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+out_iounmap_conf:
+	iounmap(phba->sli4_hba.conf_regs_memmap_p);
+out:
+	return error;
+}
+
+/**
+ * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return;
+	else
+		pdev = phba->pcidev;
+
+	/* Free coherent DMA memory allocated */
+
+	/* Unmap I/O memory space */
+	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+	iounmap(phba->sli4_hba.conf_regs_memmap_p);
+
+	return;
+}
+
+/**
+ * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-3 interface specs. The kernel function pci_enable_msix() is
+ * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
+ * invoked, enables either all or nothing, depending on the current
+ * availability of PCI vector resources. The device driver is responsible
+ * for calling the individual request_irq() to register each MSI-X vector
+ * with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
+ *
+ * Return codes
+ *   0 - sucessful
+ *   other values - error
+ **/
+static int
+lpfc_sli_enable_msix(struct lpfc_hba *phba)
+{
+	int rc, i;
+	LPFC_MBOXQ_t *pmb;
+
+	/* Set up MSI-X multi-message vectors */
+	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+		phba->msix_entries[i].entry = i;
+
+	/* Configure MSI-X capability structure */
+	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
+				ARRAY_SIZE(phba->msix_entries));
+	if (rc) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0420 PCI enable MSI-X failed (%d)\n", rc);
+		goto msi_fail_out;
+	}
+	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0477 MSI-X entry[%d]: vector=x%x "
+				"message=%d\n", i,
+				phba->msix_entries[i].vector,
+				phba->msix_entries[i].entry);
+	/*
+	 * Assign MSI-X vectors to interrupt handlers
+	 */
+
+	/* vector-0 is associated to slow-path handler */
+	rc = request_irq(phba->msix_entries[0].vector,
+			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
+			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0421 MSI-X slow-path request_irq failed "
+				"(%d)\n", rc);
+		goto msi_fail_out;
+	}
+
+	/* vector-1 is associated to fast-path handler */
+	rc = request_irq(phba->msix_entries[1].vector,
+			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
+			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
+
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0429 MSI-X fast-path request_irq failed "
+				"(%d)\n", rc);
+		goto irq_fail_out;
+	}
+
+	/*
+	 * Configure HBA MSI-X attention conditions to messages
+	 */
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+	if (!pmb) {
+		rc = -ENOMEM;
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0474 Unable to allocate memory for issuing "
+				"MBOX_CONFIG_MSI command\n");
+		goto mem_fail_out;
+	}
+	rc = lpfc_config_msi(phba, pmb);
+	if (rc)
+		goto mbx_fail_out;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+				"0351 Config MSI mailbox command failed, "
+				"mbxCmd x%x, mbxStatus x%x\n",
+				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
+		goto mbx_fail_out;
+	}
+
+	/* Free memory allocated for mailbox command */
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return rc;
+
+mbx_fail_out:
+	/* Free memory allocated for mailbox command */
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+mem_fail_out:
+	/* free the irq already requested */
+	free_irq(phba->msix_entries[1].vector, phba);
+
+irq_fail_out:
+	/* free the irq already requested */
+	free_irq(phba->msix_entries[0].vector, phba);
+
+msi_fail_out:
+	/* Unconfigure MSI-X capability structure */
+	pci_disable_msix(phba->pcidev);
+	return rc;
+}
+
+/**
+ * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_disable_msix(struct lpfc_hba *phba)
+{
+	int i;
+
+	/* Free up MSI-X multi-message vectors */
+	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+		free_irq(phba->msix_entries[i].vector, phba);
+	/* Disable MSI-X */
+	pci_disable_msix(phba->pcidev);
+
+	return;
+}
+
+/**
+ * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
+ * enable the MSI vector. The device driver is responsible for calling the
+ * request_irq() to register MSI vector with a interrupt the handler, which
+ * is done in this function.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ */
+static int
+lpfc_sli_enable_msi(struct lpfc_hba *phba)
+{
+	int rc;
+
+	rc = pci_enable_msi(phba->pcidev);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0462 PCI enable MSI mode success.\n");
+	else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0471 PCI enable MSI mode failed (%d)\n", rc);
+		return rc;
+	}
+
+	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+	if (rc) {
+		pci_disable_msi(phba->pcidev);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0478 MSI request_irq failed (%d)\n", rc);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ */
+static void
+lpfc_sli_disable_msi(struct lpfc_hba *phba)
+{
+	free_irq(phba->pcidev->irq, phba);
+	pci_disable_msi(phba->pcidev);
+	return;
+}
+
+/**
+ * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
+ * spec. Depends on the interrupt mode configured to the driver, the driver
+ * will try to fallback from the configured interrupt mode to an interrupt
+ * mode which is supported by the platform, kernel, and device in the order
+ * of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ *   0 - sucessful
+ *   other values - error
+ **/
+static uint32_t
+lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+	uint32_t intr_mode = LPFC_INTR_ERROR;
+	int retval;
+
+	if (cfg_mode == 2) {
+		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+		if (!retval) {
+			/* Now, try to enable MSI-X interrupt mode */
+			retval = lpfc_sli_enable_msix(phba);
+			if (!retval) {
+				/* Indicate initialization to MSI-X mode */
+				phba->intr_type = MSIX;
+				intr_mode = 2;
+			}
+		}
+	}
+
+	/* Fallback to MSI if MSI-X initialization failed */
+	if (cfg_mode >= 1 && phba->intr_type == NONE) {
+		retval = lpfc_sli_enable_msi(phba);
+		if (!retval) {
+			/* Indicate initialization to MSI mode */
+			phba->intr_type = MSI;
+			intr_mode = 1;
+		}
+	}
+
+	/* Fallback to INTx if both MSI-X/MSI initalization failed */
+	if (phba->intr_type == NONE) {
+		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+		if (!retval) {
+			/* Indicate initialization to INTx mode */
+			phba->intr_type = INTx;
+			intr_mode = 0;
+		}
+	}
+	return intr_mode;
+}
+
+/**
+ * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate the
+ * driver's interrupt handler(s) from interrupt vector(s) to device with
+ * SLI-3 interface spec. Depending on the interrupt mode, the driver will
+ * release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli_disable_intr(struct lpfc_hba *phba)
+{
+	/* Disable the currently initialized interrupt mode */
+	if (phba->intr_type == MSIX)
+		lpfc_sli_disable_msix(phba);
+	else if (phba->intr_type == MSI)
+		lpfc_sli_disable_msi(phba);
+	else if (phba->intr_type == INTx)
+		free_irq(phba->pcidev->irq, phba);
+
+	/* Reset interrupt management states */
+	phba->intr_type = NONE;
+	phba->sli.slistat.sli_intr = 0;
+
+	return;
+}
+
+/**
+ * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
+ * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
+ * enables either all or nothing, depending on the current availability of
+ * PCI vector resources. The device driver is responsible for calling the
+ * individual request_irq() to register each MSI-X vector with a interrupt
+ * handler, which is done in this function. Note that later when device is
+ * unloading, the driver should always call free_irq() on all MSI-X vectors
+ * it has done request_irq() on before calling pci_disable_msix(). Failure
+ * to do so results in a BUG_ON() and a device will be left with MSI-X
+ * enabled and leaks its vectors.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_sli4_enable_msix(struct lpfc_hba *phba)
+{
+	int rc, index;
+
+	/* Set up MSI-X multi-message vectors */
+	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
+		phba->sli4_hba.msix_entries[index].entry = index;
+
+	/* Configure MSI-X capability structure */
+	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
+			     phba->sli4_hba.cfg_eqn);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0484 PCI enable MSI-X failed (%d)\n", rc);
+		goto msi_fail_out;
+	}
+	/* Log MSI-X vector assignment */
+	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0489 MSI-X entry[%d]: vector=x%x "
+				"message=%d\n", index,
+				phba->sli4_hba.msix_entries[index].vector,
+				phba->sli4_hba.msix_entries[index].entry);
+	/*
+	 * Assign MSI-X vectors to interrupt handlers
+	 */
+
+	/* The first vector must associated to slow-path handler for MQ */
+	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0485 MSI-X slow-path request_irq failed "
+				"(%d)\n", rc);
+		goto msi_fail_out;
+	}
+
+	/* The rest of the vector(s) are associated to fast-path handler(s) */
+	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
+		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
+		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
+		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
+				 LPFC_FP_DRIVER_HANDLER_NAME,
+				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"0486 MSI-X fast-path (%d) "
+					"request_irq failed (%d)\n", index, rc);
+			goto cfg_fail_out;
+		}
+	}
+
+	return rc;
+
+cfg_fail_out:
+	/* free the irq already requested */
+	for (--index; index >= 1; index--)
+		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
+			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+
+	/* free the irq already requested */
+	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+
+msi_fail_out:
+	/* Unconfigure MSI-X capability structure */
+	pci_disable_msix(phba->pcidev);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_disable_msix(struct lpfc_hba *phba)
+{
+	int index;
+
+	/* Free up MSI-X multi-message vectors */
+	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+
+	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
+		free_irq(phba->sli4_hba.msix_entries[index].vector,
+			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+	/* Disable MSI-X */
+	pci_disable_msix(phba->pcidev);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The kernel function pci_enable_msi() is called
+ * to enable the MSI vector. The device driver is responsible for calling
+ * the request_irq() to register MSI vector with a interrupt the handler,
+ * which is done in this function.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ **/
+static int
+lpfc_sli4_enable_msi(struct lpfc_hba *phba)
+{
+	int rc, index;
+
+	rc = pci_enable_msi(phba->pcidev);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0487 PCI enable MSI mode success.\n");
+	else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0488 PCI enable MSI mode failed (%d)\n", rc);
+		return rc;
+	}
+
+	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+	if (rc) {
+		pci_disable_msi(phba->pcidev);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0490 MSI request_irq failed (%d)\n", rc);
+	}
+
+	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
+		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ **/
+static void
+lpfc_sli4_disable_msi(struct lpfc_hba *phba)
+{
+	free_irq(phba->pcidev->irq, phba);
+	pci_disable_msi(phba->pcidev);
+	return;
+}
+
+/**
+ * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-4
+ * interface spec. Depends on the interrupt mode configured to the driver,
+ * the driver will try to fallback from the configured interrupt mode to an
+ * interrupt mode which is supported by the platform, kernel, and device in
+ * the order of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ **/
+static uint32_t
+lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+	uint32_t intr_mode = LPFC_INTR_ERROR;
+	int retval, index;
+
+	if (cfg_mode == 2) {
+		/* Preparation before conf_msi mbox cmd */
+		retval = 0;
+		if (!retval) {
+			/* Now, try to enable MSI-X interrupt mode */
+			retval = lpfc_sli4_enable_msix(phba);
+			if (!retval) {
+				/* Indicate initialization to MSI-X mode */
+				phba->intr_type = MSIX;
+				intr_mode = 2;
+			}
+		}
+	}
+
+	/* Fallback to MSI if MSI-X initialization failed */
+	if (cfg_mode >= 1 && phba->intr_type == NONE) {
+		retval = lpfc_sli4_enable_msi(phba);
+		if (!retval) {
+			/* Indicate initialization to MSI mode */
+			phba->intr_type = MSI;
+			intr_mode = 1;
+		}
+	}
+
+	/* Fallback to INTx if both MSI-X/MSI initalization failed */
+	if (phba->intr_type == NONE) {
+		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+		if (!retval) {
+			/* Indicate initialization to INTx mode */
+			phba->intr_type = INTx;
+			intr_mode = 0;
+			for (index = 0; index < phba->cfg_fcp_eq_count;
+			     index++) {
+				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+			}
+		}
+	}
+	return intr_mode;
+}
+
+/**
+ * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate
+ * the driver's interrupt handler(s) from interrupt vector(s) to device
+ * with SLI-4 interface spec. Depending on the interrupt mode, the driver
+ * will release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli4_disable_intr(struct lpfc_hba *phba)
+{
+	/* Disable the currently initialized interrupt mode */
+	if (phba->intr_type == MSIX)
+		lpfc_sli4_disable_msix(phba);
+	else if (phba->intr_type == MSI)
+		lpfc_sli4_disable_msi(phba);
+	else if (phba->intr_type == INTx)
+		free_irq(phba->pcidev->irq, phba);
+
+	/* Reset interrupt management states */
+	phba->intr_type = NONE;
+	phba->sli.slistat.sli_intr = 0;
+
+	return;
+}
+
+/**
+ * lpfc_unset_hba - Unset SLI3 hba device initialization
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-3 interface spec.
+ **/
+static void
+lpfc_unset_hba(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_stop_hba_timers(phba);
+
+	phba->pport->work_port_events = 0;
+
+	lpfc_sli_hba_down(phba);
+
+	lpfc_sli_brdrestart(phba);
+
+	lpfc_sli_disable_intr(phba);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_unset_hba(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(shost->host_lock);
+
+	phba->pport->work_port_events = 0;
+
+	lpfc_sli4_hba_down(phba);
+
+	lpfc_sli4_disable_intr(phba);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_hba_unset - Unset the fcoe hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to reset the HBA's FCoE
+ * function. The caller is not required to hold any lock. This routine
+ * issues PCI function reset mailbox command to reset the FCoE function.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static void
+lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+{
+	int wait_cnt = 0;
+	LPFC_MBOXQ_t *mboxq;
+
+	lpfc_stop_hba_timers(phba);
+	phba->sli4_hba.intr_enable = 0;
+
+	/*
+	 * Gracefully wait out the potential current outstanding asynchronous
+	 * mailbox command.
+	 */
+
+	/* First, block any pending async mailbox command from posted */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, trying to wait it out if we can */
+	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		msleep(10);
+		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
+			break;
+	}
+	/* Forcefully release the outstanding mailbox command if timed out */
+	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		spin_lock_irq(&phba->hbalock);
+		mboxq = phba->sli.mbox_active;
+		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		__lpfc_mbox_cmpl_put(phba, mboxq);
+		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		phba->sli.mbox_active = NULL;
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	/* Tear down the queues in the HBA */
+	lpfc_sli4_queue_unset(phba);
+
+	/* Disable PCI subsystem interrupt */
+	lpfc_sli4_disable_intr(phba);
+
+	/* Stop kthread signal shall trigger work_done one more time */
+	kthread_stop(phba->worker_thread);
+
+	/* Stop the SLI4 device port */
+	phba->pport->work_port_events = 0;
+}
+
+/**
+ * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be called to attach a device with SLI-3 interface spec
+ * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it can
+ * support this kind of device. If the match is successful, the driver core
+ * invokes this routine. If this routine determines it can claim the HBA, it
+ * does all the initialization that it needs to do to handle the HBA properly.
+ *
+ * Return code
+ * 	0 - driver can claim the device
+ * 	negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	struct lpfc_hba   *phba;
+	struct lpfc_vport *vport = NULL;
+	int error;
+	uint32_t cfg_mode, intr_mode;
+
+	/* Allocate memory for HBA structure */
+	phba = lpfc_hba_alloc(pdev);
+	if (!phba)
+		return -ENOMEM;
+
+	/* Perform generic PCI device enabling operation */
+	error = lpfc_enable_pci_dev(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1401 Failed to enable pci device.\n");
+		goto out_free_phba;
+	}
+
+	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
+	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
+	if (error)
+		goto out_disable_pci_dev;
+
+	/* Set up SLI-3 specific device PCI memory space */
+	error = lpfc_sli_pci_mem_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1402 Failed to set up pci memory space.\n");
+		goto out_disable_pci_dev;
+	}
+
+	/* Set up phase-1 common device driver resources */
+	error = lpfc_setup_driver_resource_phase1(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1403 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s3;
+	}
+
+	/* Set up SLI-3 specific device driver resources */
+	error = lpfc_sli_driver_resource_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1404 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s3;
+	}
+
+	/* Initialize and populate the iocb list per host */
+	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1405 Failed to initialize iocb list.\n");
+		goto out_unset_driver_resource_s3;
+	}
+
+	/* Set up common device driver resources */
+	error = lpfc_setup_driver_resource_phase2(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1406 Failed to set up driver resource.\n");
+		goto out_free_iocb_list;
+	}
+
+	/* Create SCSI host to the physical port */
+	error = lpfc_create_shost(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1407 Failed to create scsi host.\n");
+		goto out_unset_driver_resource;
+	}
+
+	/* Configure sysfs attributes */
+	vport = phba->pport;
+	error = lpfc_alloc_sysfs_attr(vport);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1476 Failed to allocate sysfs attr\n");
+		goto out_destroy_shost;
+	}
+
+	/* Now, trying to enable interrupt and bring up the device */
+	cfg_mode = phba->cfg_use_msi;
+	while (true) {
+		/* Put device to a known state before enabling interrupt */
+		lpfc_stop_port(phba);
+		/* Configure and enable interrupt */
+		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
+		if (intr_mode == LPFC_INTR_ERROR) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0431 Failed to enable interrupt.\n");
+			error = -ENODEV;
+			goto out_free_sysfs_attr;
+		}
+		/* SLI-3 HBA setup */
+		if (lpfc_sli_hba_setup(phba)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1477 Failed to set up hba\n");
+			error = -ENODEV;
+			goto out_remove_device;
+		}
+
+		/* Wait 50ms for the interrupts of previous mailbox commands */
+		msleep(50);
+		/* Check active interrupts on message signaled interrupts */
+		if (intr_mode == 0 ||
+		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+			/* Log the current active interrupt mode */
+			phba->intr_mode = intr_mode;
+			lpfc_log_intr_mode(phba, intr_mode);
+			break;
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"0447 Configure interrupt mode (%d) "
+					"failed active interrupt test.\n",
+					intr_mode);
+			/* Disable the current interrupt mode */
+			lpfc_sli_disable_intr(phba);
+			/* Try next level of interrupt mode */
+			cfg_mode = --intr_mode;
+		}
+	}
+
+	/* Perform post initialization setup */
+	lpfc_post_init_setup(phba);
+
+	/* Check if there are static vports to be created. */
+	lpfc_create_static_vport(phba);
+
+	return 0;
+
+out_remove_device:
+	lpfc_unset_hba(phba);
+out_free_sysfs_attr:
+	lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+	lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+	lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+	lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s3:
+	lpfc_sli_driver_resource_unset(phba);
+out_unset_pci_mem_s3:
+	lpfc_sli_pci_mem_unset(phba);
+out_disable_pci_dev:
+	lpfc_disable_pci_dev(phba);
+out_free_phba:
+	lpfc_hba_free(phba);
+	return error;
+}
+
+/**
+ * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called to disattach a device with SLI-3 interface
+ * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void __devexit
+lpfc_pci_remove_one_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_vport **vports;
+	struct lpfc_hba   *phba = vport->phba;
+	int i;
+	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+	spin_lock_irq(&phba->hbalock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_free_sysfs_attr(vport);
+
+	/* Release all the vports against this physical port */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
+			fc_vport_terminate(vports[i]->fc_vport);
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Remove FC host and then SCSI host with the physical port */
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+	lpfc_cleanup(vport);
+
+	/*
+	 * Bring down the SLI Layer. This step disable all interrupts,
+	 * clears the rings, discards all mailbox commands, and resets
+	 * the HBA.
+	 */
+
+	/* HBA interrupt will be diabled after this call */
+	lpfc_sli_hba_down(phba);
+	/* Stop kthread signal shall trigger work_done one more time */
+	kthread_stop(phba->worker_thread);
+	/* Final cleanup of txcmplq and reset the HBA */
+	lpfc_sli_brdrestart(phba);
+
+	lpfc_stop_hba_timers(phba);
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&vport->listentry);
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_debugfs_terminate(vport);
+
+	/* Disable interrupt */
+	lpfc_sli_disable_intr(phba);
+
+	pci_set_drvdata(pdev, NULL);
+	scsi_host_put(shost);
+
+	/*
+	 * Call scsi_free before mem_free since scsi bufs are released to their
+	 * corresponding pools here.
+	 */
+	lpfc_scsi_free(phba);
+	lpfc_mem_free_all(phba);
+
+	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+			  phba->hbqslimp.virt, phba->hbqslimp.phys);
+
+	/* Free resources associated with SLI2 interface */
+	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+			  phba->slim2p.virt, phba->slim2p.phys);
+
+	/* unmap adapter SLIM and Control Registers */
+	iounmap(phba->ctrl_regs_memmap_p);
+	iounmap(phba->slim_memmap_p);
+
+	lpfc_hba_free(phba);
+
+	pci_release_selected_regions(pdev, bars);
+	pci_disable_device(pdev);
+}
+
+/**
+ * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When
+ * PM invokes this method, it quiesces the device by stopping the driver's
+ * worker thread for the device, turning off device's interrupt and DMA,
+ * and bring the device offline. Note that as the driver implements the
+ * minimum PM requirements to a power-aware driver's PM support for the
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver will
+ * fully reinitialize its device during resume() method call, the driver will
+ * set device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0473 PCI device Power Management suspend.\n");
+
+	/* Bring down the device */
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	kthread_stop(phba->worker_thread);
+
+	/* Disable interrupt from device */
+	lpfc_sli_disable_intr(phba);
+
+	/* Save device state to PCI config space */
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+/**
+ * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When PM
+ * invokes this method, it restores the device's PCI config space state and
+ * fully reinitializes the device and brings it online. Note that as the
+ * driver implements the minimum PM requirements to a power-aware driver's
+ * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
+ * FREEZE) to the suspend() method call will be treated as SUSPEND and the
+ * driver will fully reinitialize its device during resume() method call,
+ * the device will be set to PCI_D0 directly in PCI config space before
+ * restoring the state.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_resume_one_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	uint32_t intr_mode;
+	int error;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0452 PCI device Power Management resume.\n");
+
+	/* Restore device state from PCI config space */
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	/* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					"lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0434 PM resume failed to start worker "
+				"thread: error=x%x.\n", error);
+		return error;
+	}
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0430 PM resume Failed to enable interrupt\n");
+		return -EIO;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Restart HBA and bring it online */
+	lpfc_sli_brdrestart(phba);
+	lpfc_online(phba);
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return 0;
+}
+
+/**
+ * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is called from the PCI subsystem for I/O error handling to
+ * device with SLI-3 interface spec. This function is called by the PCI
+ * subsystem after a PCI bus error affecting this device has been detected.
+ * When this function is invoked, it will need to stop all the I/Os and
+ * interrupt(s) to the device. Once that is done, it will return
+ * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
+ * as desired.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring  *pring;
+
+	if (state == pci_channel_io_perm_failure) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0472 PCI channel I/O permanent failure\n");
+		/* Block all SCSI devices' I/Os on the host */
+		lpfc_scsi_dev_block(phba);
+		/* Clean up all driver's outstanding SCSI I/Os */
+		lpfc_sli_flush_fcp_rings(phba);
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_disable_device(pdev);
+	/*
+	 * There may be I/Os dropped by the firmware.
+	 * Error iocb (I/O) on txcmplq and let the SCSI layer
+	 * retry it after re-establishing link.
+	 */
+	pring = &psli->ring[psli->fcp_ring];
+	lpfc_sli_abort_iocb_ring(phba, pring);
+
+	/* Disable interrupt */
+	lpfc_sli_disable_intr(phba);
+
+	/* Request a slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to
+ * device with SLI-3 interface spec. This is called after PCI bus has been
+ * reset to restart the PCI card from scratch, as if from a cold-boot.
+ * During the PCI subsystem error recovery, after driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method
+ * to recover the device. This function will initialize the HBA device,
+ * enable the interrupt, but it will just put the HBA to offline state
+ * without passing any I/O traffic.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t intr_mode;
+
+	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+	if (pci_enable_device_mem(pdev)) {
+		printk(KERN_ERR "lpfc: Cannot re-enable "
+			"PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_restore_state(pdev);
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0427 Cannot re-enable interrupt after "
+				"slot reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Take device offline; this will perform cleanup */
+	lpfc_offline(phba);
+	lpfc_sli_brdrestart(phba);
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-3 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ */
+static void
+lpfc_io_resume_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	lpfc_sli_disable_intr(phba);
+	lpfc_online(phba);
+}
 
-	return;
+/**
+ * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * returns the number of ELS/CT IOCBs to reserve
+ **/
+int
+lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
+{
+	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+	if (max_xri <= 100)
+		return 4;
+	else if (max_xri <= 256)
+		return 8;
+	else if (max_xri <= 512)
+		return 16;
+	else if (max_xri <= 1024)
+		return 32;
+	else
+		return 48;
 }
 
 /**
- * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
+ * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
  * @pdev: pointer to PCI device
  * @pid: pointer to PCI device identifier
  *
- * This routine is to be called to attach a device with SLI-3 interface spec
- * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
- * information of the device and driver to see if the driver state that it can
- * support this kind of device. If the match is successful, the driver core
- * invokes this routine. If this routine determines it can claim the HBA, it
- * does all the initialization that it needs to do to handle the HBA properly.
+ * information of the device and driver to see if the driver state that it
+ * can support this kind of device. If the match is successful, the driver
+ * core invokes this routine. If this routine determines it can claim the HBA,
+ * it does all the initialization that it needs to do to handle the HBA
+ * properly.
  *
  * Return code
  * 	0 - driver can claim the device
  * 	negative value - driver can not claim the device
  **/
 static int __devinit
-lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
+lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
 	struct lpfc_hba   *phba;
 	struct lpfc_vport *vport = NULL;
 	int error;
 	uint32_t cfg_mode, intr_mode;
+	int mcnt;
 
 	/* Allocate memory for HBA structure */
 	phba = lpfc_hba_alloc(pdev);
@@ -3660,20 +7213,20 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
 	error = lpfc_enable_pci_dev(phba);
 	if (error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1401 Failed to enable pci device.\n");
+				"1409 Failed to enable pci device.\n");
 		goto out_free_phba;
 	}
 
-	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
-	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
+	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
+	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
 	if (error)
 		goto out_disable_pci_dev;
 
-	/* Set up SLI-3 specific device PCI memory space */
-	error = lpfc_sli_pci_mem_setup(phba);
+	/* Set up SLI-4 specific device PCI memory space */
+	error = lpfc_sli4_pci_mem_setup(phba);
 	if (error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1402 Failed to set up pci memory space.\n");
+				"1410 Failed to set up pci memory space.\n");
 		goto out_disable_pci_dev;
 	}
 
@@ -3681,31 +7234,32 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
 	error = lpfc_setup_driver_resource_phase1(phba);
 	if (error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1403 Failed to set up driver resource.\n");
-		goto out_unset_pci_mem_s3;
+				"1411 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s4;
 	}
 
-	/* Set up SLI-3 specific device driver resources */
-	error = lpfc_sli_driver_resource_setup(phba);
+	/* Set up SLI-4 Specific device driver resources */
+	error = lpfc_sli4_driver_resource_setup(phba);
 	if (error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1404 Failed to set up driver resource.\n");
-		goto out_unset_pci_mem_s3;
+				"1412 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s4;
 	}
 
 	/* Initialize and populate the iocb list per host */
-	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
+	error = lpfc_init_iocb_list(phba,
+			phba->sli4_hba.max_cfg_param.max_xri);
 	if (error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1405 Failed to initialize iocb list.\n");
-		goto out_unset_driver_resource_s3;
+				"1413 Failed to initialize iocb list.\n");
+		goto out_unset_driver_resource_s4;
 	}
 
 	/* Set up common device driver resources */
 	error = lpfc_setup_driver_resource_phase2(phba);
 	if (error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1406 Failed to set up driver resource.\n");
+				"1414 Failed to set up driver resource.\n");
 		goto out_free_iocb_list;
 	}
 
@@ -3713,7 +7267,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
 	error = lpfc_create_shost(phba);
 	if (error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1407 Failed to create scsi host.\n");
+				"1415 Failed to create scsi host.\n");
 		goto out_unset_driver_resource;
 	}
 
@@ -3722,7 +7276,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
 	error = lpfc_alloc_sysfs_attr(vport);
 	if (error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1476 Failed to allocate sysfs attr\n");
+				"1416 Failed to allocate sysfs attr\n");
 		goto out_destroy_shost;
 	}
 
@@ -3732,52 +7286,51 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
 		/* Put device to a known state before enabling interrupt */
 		lpfc_stop_port(phba);
 		/* Configure and enable interrupt */
-		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
+		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
 		if (intr_mode == LPFC_INTR_ERROR) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"0431 Failed to enable interrupt.\n");
+					"0426 Failed to enable interrupt.\n");
 			error = -ENODEV;
 			goto out_free_sysfs_attr;
 		}
-		/* SLI-3 HBA setup */
-		if (lpfc_sli_hba_setup(phba)) {
+		/* Set up SLI-4 HBA */
+		if (lpfc_sli4_hba_setup(phba)) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"1477 Failed to set up hba\n");
+					"1421 Failed to set up hba\n");
 			error = -ENODEV;
-			goto out_remove_device;
+			goto out_disable_intr;
 		}
 
-		/* Wait 50ms for the interrupts of previous mailbox commands */
-		msleep(50);
-		/* Check active interrupts on message signaled interrupts */
+		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
+		if (intr_mode != 0)
+			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
+							    LPFC_ACT_INTR_CNT);
+
+		/* Check active interrupts received only for MSI/MSI-X */
 		if (intr_mode == 0 ||
-		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
 			/* Log the current active interrupt mode */
 			phba->intr_mode = intr_mode;
 			lpfc_log_intr_mode(phba, intr_mode);
 			break;
-		} else {
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-					"0447 Configure interrupt mode (%d) "
-					"failed active interrupt test.\n",
-					intr_mode);
-			/* Disable the current interrupt mode */
-			lpfc_sli_disable_intr(phba);
-			/* Try next level of interrupt mode */
-			cfg_mode = --intr_mode;
 		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0451 Configure interrupt mode (%d) "
+				"failed active interrupt test.\n",
+				intr_mode);
+		/* Unset the preivous SLI-4 HBA setup */
+		lpfc_sli4_unset_hba(phba);
+		/* Try next level of interrupt mode */
+		cfg_mode = --intr_mode;
 	}
 
 	/* Perform post initialization setup */
 	lpfc_post_init_setup(phba);
 
-	/* Check if there are static vports to be created. */
-	lpfc_create_static_vport(phba);
-
 	return 0;
 
-out_remove_device:
-	lpfc_unset_hba(phba);
+out_disable_intr:
+	lpfc_sli4_disable_intr(phba);
 out_free_sysfs_attr:
 	lpfc_free_sysfs_attr(vport);
 out_destroy_shost:
@@ -3786,10 +7339,10 @@ out_unset_driver_resource:
 	lpfc_unset_driver_resource_phase2(phba);
 out_free_iocb_list:
 	lpfc_free_iocb_list(phba);
-out_unset_driver_resource_s3:
-	lpfc_sli_driver_resource_unset(phba);
-out_unset_pci_mem_s3:
-	lpfc_sli_pci_mem_unset(phba);
+out_unset_driver_resource_s4:
+	lpfc_sli4_driver_resource_unset(phba);
+out_unset_pci_mem_s4:
+	lpfc_sli4_pci_mem_unset(phba);
 out_disable_pci_dev:
 	lpfc_disable_pci_dev(phba);
 out_free_phba:
@@ -3798,28 +7351,29 @@ out_free_phba:
 }
 
 /**
- * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
+ * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
  * @pdev: pointer to PCI device
  *
- * This routine is to be called to disattach a device with SLI-3 interface
- * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
  * removed from PCI bus, it performs all the necessary cleanup for the HBA
  * device to be removed from the PCI subsystem properly.
  **/
 static void __devexit
-lpfc_pci_remove_one_s3(struct pci_dev *pdev)
+lpfc_pci_remove_one_s4(struct pci_dev *pdev)
 {
-	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_vport **vports;
-	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_hba *phba = vport->phba;
 	int i;
-	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
 
+	/* Mark the device unloading flag */
 	spin_lock_irq(&phba->hbalock);
 	vport->load_flag |= FC_UNLOADING;
 	spin_unlock_irq(&phba->hbalock);
 
+	/* Free the HBA sysfs attributes */
 	lpfc_free_sysfs_attr(vport);
 
 	/* Release all the vports against this physical port */
@@ -3832,73 +7386,56 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
 	/* Remove FC host and then SCSI host with the physical port */
 	fc_remove_host(shost);
 	scsi_remove_host(shost);
+
+	/* Perform cleanup on the physical port */
 	lpfc_cleanup(vport);
 
 	/*
-	 * Bring down the SLI Layer. This step disable all interrupts,
+	 * Bring down the SLI Layer. This step disables all interrupts,
 	 * clears the rings, discards all mailbox commands, and resets
-	 * the HBA.
+	 * the HBA FCoE function.
 	 */
+	lpfc_debugfs_terminate(vport);
+	lpfc_sli4_hba_unset(phba);
 
-	/* HBA interrupt will be diabled after this call */
-	lpfc_sli_hba_down(phba);
-	/* Stop kthread signal shall trigger work_done one more time */
-	kthread_stop(phba->worker_thread);
-	/* Final cleanup of txcmplq and reset the HBA */
-	lpfc_sli_brdrestart(phba);
-
-	lpfc_stop_hba_timers(phba);
 	spin_lock_irq(&phba->hbalock);
 	list_del_init(&vport->listentry);
 	spin_unlock_irq(&phba->hbalock);
 
-	lpfc_debugfs_terminate(vport);
-
-	/* Disable interrupt */
-	lpfc_sli_disable_intr(phba);
-
-	pci_set_drvdata(pdev, NULL);
-	scsi_host_put(shost);
-
-	/*
-	 * Call scsi_free before mem_free since scsi bufs are released to their
-	 * corresponding pools here.
+	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
+	 * buffers are released to their corresponding pools here.
 	 */
 	lpfc_scsi_free(phba);
-	lpfc_mem_free_all(phba);
-
-	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
-			  phba->hbqslimp.virt, phba->hbqslimp.phys);
+	lpfc_sli4_driver_resource_unset(phba);
 
-	/* Free resources associated with SLI2 interface */
-	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
-			  phba->slim2p.virt, phba->slim2p.phys);
+	/* Unmap adapter Control and Doorbell registers */
+	lpfc_sli4_pci_mem_unset(phba);
 
-	/* unmap adapter SLIM and Control Registers */
-	iounmap(phba->ctrl_regs_memmap_p);
-	iounmap(phba->slim_memmap_p);
+	/* Release PCI resources and disable device's PCI function */
+	scsi_host_put(shost);
+	lpfc_disable_pci_dev(phba);
 
+	/* Finally, free the driver's device data structure */
 	lpfc_hba_free(phba);
 
-	pci_release_selected_regions(pdev, bars);
-	pci_disable_device(pdev);
+	return;
 }
 
 /**
- * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
+ * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
  * @pdev: pointer to PCI device
  * @msg: power management message
  *
- * This routine is to be called from the kernel's PCI subsystem to support
- * system Power Management (PM) to device with SLI-3 interface spec. When
- * PM invokes this method, it quiesces the device by stopping the driver's
- * worker thread for the device, turning off device's interrupt and DMA,
- * and bring the device offline. Note that as the driver implements the
- * minimum PM requirements to a power-aware driver's PM support for the
- * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
- * to the suspend() method call will be treated as SUSPEND and the driver will
- * fully reinitialize its device during resume() method call, the driver will
- * set device to PCI_D3hot state in PCI config space instead of setting it
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
+ * this method, it quiesces the device by stopping the driver's worker
+ * thread for the device, turning off device's interrupt and DMA, and bring
+ * the device offline. Note that as the driver implements the minimum PM
+ * requirements to a power-aware driver's PM support for suspend/resume -- all
+ * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
+ * method call will be treated as SUSPEND and the driver will fully
+ * reinitialize its device during resume() method call, the driver will set
+ * device to PCI_D3hot state in PCI config space instead of setting it
  * according to the @msg provided by the PM.
  *
  * Return code
@@ -3906,13 +7443,13 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
  * 	Error otherwise
  **/
 static int
-lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
+lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-			"0473 PCI device Power Management suspend.\n");
+			"0298 PCI device Power Management suspend.\n");
 
 	/* Bring down the device */
 	lpfc_offline_prep(phba);
@@ -3920,7 +7457,7 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
 	kthread_stop(phba->worker_thread);
 
 	/* Disable interrupt from device */
-	lpfc_sli_disable_intr(phba);
+	lpfc_sli4_disable_intr(phba);
 
 	/* Save device state to PCI config space */
 	pci_save_state(pdev);
@@ -3930,26 +7467,26 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
 }
 
 /**
- * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
+ * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
  * @pdev: pointer to PCI device
  *
- * This routine is to be called from the kernel's PCI subsystem to support
- * system Power Management (PM) to device with SLI-3 interface spec. When PM
- * invokes this method, it restores the device's PCI config space state and
- * fully reinitializes the device and brings it online. Note that as the
- * driver implements the minimum PM requirements to a power-aware driver's
- * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
- * FREEZE) to the suspend() method call will be treated as SUSPEND and the
- * driver will fully reinitialize its device during resume() method call,
- * the device will be set to PCI_D0 directly in PCI config space before
- * restoring the state.
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
+ * this method, it restores the device's PCI config space state and fully
+ * reinitializes the device and brings it online. Note that as the driver
+ * implements the minimum PM requirements to a power-aware driver's PM for
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver
+ * will fully reinitialize its device during resume() method call, the device
+ * will be set to PCI_D0 directly in PCI config space before restoring the
+ * state.
  *
  * Return code
  * 	0 - driver suspended the device
  * 	Error otherwise
  **/
 static int
-lpfc_pci_resume_one_s3(struct pci_dev *pdev)
+lpfc_pci_resume_one_s4(struct pci_dev *pdev)
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3957,7 +7494,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
 	int error;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-			"0452 PCI device Power Management resume.\n");
+			"0292 PCI device Power Management resume.\n");
 
 	/* Restore device state from PCI config space */
 	pci_set_power_state(pdev, PCI_D0);
@@ -3965,22 +7502,22 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
 	if (pdev->is_busmaster)
 		pci_set_master(pdev);
 
-	/* Startup the kernel thread for this host adapter. */
+	 /* Startup the kernel thread for this host adapter. */
 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
 					"lpfc_worker_%d", phba->brd_no);
 	if (IS_ERR(phba->worker_thread)) {
 		error = PTR_ERR(phba->worker_thread);
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0434 PM resume failed to start worker "
+				"0293 PM resume failed to start worker "
 				"thread: error=x%x.\n", error);
 		return error;
 	}
 
 	/* Configure and enable interrupt */
-	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
 	if (intr_mode == LPFC_INTR_ERROR) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0430 PM resume Failed to enable interrupt\n");
+				"0294 PM resume Failed to enable interrupt\n");
 		return -EIO;
 	} else
 		phba->intr_mode = intr_mode;
@@ -3996,134 +7533,65 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
 }
 
 /**
- * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
+ * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
  * @pdev: pointer to PCI device.
  * @state: the current PCI connection state.
  *
- * This routine is called from the PCI subsystem for I/O error handling to
- * device with SLI-3 interface spec. This function is called by the PCI
- * subsystem after a PCI bus error affecting this device has been detected.
- * When this function is invoked, it will need to stop all the I/Os and
- * interrupt(s) to the device. Once that is done, it will return
- * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
- * as desired.
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. This function is called by the PCI subsystem
+ * after a PCI bus error affecting this device has been detected. When this
+ * function is invoked, it will need to stop all the I/Os and interrupt(s)
+ * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
+ * for the PCI subsystem to perform proper recovery as desired.
  *
  * Return codes
  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  **/
 static pci_ers_result_t
-lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
+lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
 {
-	struct Scsi_Host *shost = pci_get_drvdata(pdev);
-	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_sli_ring  *pring;
-
-	if (state == pci_channel_io_perm_failure) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0472 PCI channel I/O permanent failure\n");
-		/* Block all SCSI devices' I/Os on the host */
-		lpfc_scsi_dev_block(phba);
-		/* Clean up all driver's outstanding SCSI I/Os */
-		lpfc_sli_flush_fcp_rings(phba);
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-
-	pci_disable_device(pdev);
-	/*
-	 * There may be I/Os dropped by the firmware.
-	 * Error iocb (I/O) on txcmplq and let the SCSI layer
-	 * retry it after re-establishing link.
-	 */
-	pring = &psli->ring[psli->fcp_ring];
-	lpfc_sli_abort_iocb_ring(phba, pring);
-
-	/* Disable interrupt */
-	lpfc_sli_disable_intr(phba);
-
-	/* Request a slot reset. */
 	return PCI_ERS_RESULT_NEED_RESET;
 }
 
 /**
- * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
+ * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
  * @pdev: pointer to PCI device.
  *
- * This routine is called from the PCI subsystem for error handling to
- * device with SLI-3 interface spec. This is called after PCI bus has been
- * reset to restart the PCI card from scratch, as if from a cold-boot.
- * During the PCI subsystem error recovery, after driver returns
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called after PCI bus has been reset to
+ * restart the PCI card from scratch, as if from a cold-boot. During the
+ * PCI subsystem error recovery, after the driver returns
  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
- * recovery and then call this routine before calling the .resume method
- * to recover the device. This function will initialize the HBA device,
- * enable the interrupt, but it will just put the HBA to offline state
- * without passing any I/O traffic.
+ * recovery and then call this routine before calling the .resume method to
+ * recover the device. This function will initialize the HBA device, enable
+ * the interrupt, but it will just put the HBA to offline state without
+ * passing any I/O traffic.
  *
  * Return codes
  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  */
 static pci_ers_result_t
-lpfc_io_slot_reset_s3(struct pci_dev *pdev)
+lpfc_io_slot_reset_s4(struct pci_dev *pdev)
 {
-	struct Scsi_Host *shost = pci_get_drvdata(pdev);
-	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-	struct lpfc_sli *psli = &phba->sli;
-	uint32_t intr_mode;
-
-	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
-	if (pci_enable_device_mem(pdev)) {
-		printk(KERN_ERR "lpfc: Cannot re-enable "
-			"PCI device after reset.\n");
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-
-	pci_restore_state(pdev);
-	if (pdev->is_busmaster)
-		pci_set_master(pdev);
-
-	spin_lock_irq(&phba->hbalock);
-	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
-	spin_unlock_irq(&phba->hbalock);
-
-	/* Configure and enable interrupt */
-	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
-	if (intr_mode == LPFC_INTR_ERROR) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0427 Cannot re-enable interrupt after "
-				"slot reset.\n");
-		return PCI_ERS_RESULT_DISCONNECT;
-	} else
-		phba->intr_mode = intr_mode;
-
-	/* Take device offline; this will perform cleanup */
-	lpfc_offline(phba);
-	lpfc_sli_brdrestart(phba);
-
-	/* Log the current active interrupt mode */
-	lpfc_log_intr_mode(phba, phba->intr_mode);
-
 	return PCI_ERS_RESULT_RECOVERED;
 }
 
 /**
- * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
+ * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
  * @pdev: pointer to PCI device
  *
  * This routine is called from the PCI subsystem for error handling to device
- * with SLI-3 interface spec. It is called when kernel error recovery tells
+ * with SLI-4 interface spec. It is called when kernel error recovery tells
  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
  * error recovery. After this call, traffic can start to flow from this device
  * again.
- */
+ **/
 static void
-lpfc_io_resume_s3(struct pci_dev *pdev)
+lpfc_io_resume_s4(struct pci_dev *pdev)
 {
-	struct Scsi_Host *shost = pci_get_drvdata(pdev);
-	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-
-	lpfc_online(phba);
+	return;
 }
 
 /**
@@ -4154,6 +7622,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 		return -ENODEV;
 
 	switch (dev_id) {
+	case PCI_DEVICE_ID_TIGERSHARK:
+	case PCI_DEVICE_ID_TIGERSHARK_S:
+		rc = lpfc_pci_probe_one_s4(pdev, pid);
+		break;
 	default:
 		rc = lpfc_pci_probe_one_s3(pdev, pid);
 		break;
@@ -4181,6 +7653,9 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 	case LPFC_PCI_DEV_LP:
 		lpfc_pci_remove_one_s3(pdev);
 		break;
+	case LPFC_PCI_DEV_OC:
+		lpfc_pci_remove_one_s4(pdev);
+		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1424 Invalid PCI device group: 0x%x\n",
@@ -4215,6 +7690,9 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
 	case LPFC_PCI_DEV_LP:
 		rc = lpfc_pci_suspend_one_s3(pdev, msg);
 		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_pci_suspend_one_s4(pdev, msg);
+		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1425 Invalid PCI device group: 0x%x\n",
@@ -4248,6 +7726,9 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
 	case LPFC_PCI_DEV_LP:
 		rc = lpfc_pci_resume_one_s3(pdev);
 		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_pci_resume_one_s4(pdev);
+		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1426 Invalid PCI device group: 0x%x\n",
@@ -4283,6 +7764,9 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 	case LPFC_PCI_DEV_LP:
 		rc = lpfc_io_error_detected_s3(pdev, state);
 		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_io_error_detected_s4(pdev, state);
+		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1427 Invalid PCI device group: 0x%x\n",
@@ -4317,6 +7801,9 @@ lpfc_io_slot_reset(struct pci_dev *pdev)
 	case LPFC_PCI_DEV_LP:
 		rc = lpfc_io_slot_reset_s3(pdev);
 		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_io_slot_reset_s4(pdev);
+		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1428 Invalid PCI device group: 0x%x\n",
@@ -4346,6 +7833,9 @@ lpfc_io_resume(struct pci_dev *pdev)
 	case LPFC_PCI_DEV_LP:
 		lpfc_io_resume_s3(pdev);
 		break;
+	case LPFC_PCI_DEV_OC:
+		lpfc_io_resume_s4(pdev);
+		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1429 Invalid PCI device group: 0x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 134fc7fc212..7f5899b70bd 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -28,8 +28,10 @@
 
 #include <scsi/scsi.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 35a97673339..516f4802f84 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -28,8 +28,10 @@
 
 #include <scsi/scsi.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
  * @phba: HBA to allocate pools for
  *
  * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
- * lpfc_mbuf_pool, lpfc_hbq_pool.  Creates and allocates kmalloc-backed mempools
+ * lpfc_mbuf_pool, lpfc_hrb_pool.  Creates and allocates kmalloc-backed mempools
  * for LPFC_MBOXQ_t and lpfc_nodelist.  Also allocates the VPI bitmask.
  *
  * Notes: Not interrupt-safe.  Must be called with no locks held.  If any
@@ -56,19 +58,30 @@
  *   -ENOMEM on failure (if any memory allocations fail)
  **/
 int
-lpfc_mem_alloc(struct lpfc_hba * phba)
+lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 {
 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
 	int longs;
 	int i;
 
-	phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
-				phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->lpfc_scsi_dma_buf_pool =
+			pci_pool_create("lpfc_scsi_dma_buf_pool",
+				phba->pcidev,
+				phba->cfg_sg_dma_buf_size,
+				phba->cfg_sg_dma_buf_size,
+				0);
+	else
+		phba->lpfc_scsi_dma_buf_pool =
+			pci_pool_create("lpfc_scsi_dma_buf_pool",
+				phba->pcidev, phba->cfg_sg_dma_buf_size,
+				align, 0);
 	if (!phba->lpfc_scsi_dma_buf_pool)
 		goto fail;
 
 	phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
-							LPFC_BPL_SIZE, 8,0);
+							LPFC_BPL_SIZE,
+							align, 0);
 	if (!phba->lpfc_mbuf_pool)
 		goto fail_free_dma_buf_pool;
 
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
 						sizeof(struct lpfc_nodelist));
 	if (!phba->nlp_mem_pool)
 		goto fail_free_mbox_pool;
-
-	phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev,
-					      LPFC_BPL_SIZE, 8, 0);
-	if (!phba->lpfc_hbq_pool)
+	phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
+					      phba->pcidev,
+					      LPFC_HDR_BUF_SIZE, align, 0);
+	if (!phba->lpfc_hrb_pool)
 		goto fail_free_nlp_mem_pool;
+	phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
+					      phba->pcidev,
+					      LPFC_DATA_BUF_SIZE, align, 0);
+	if (!phba->lpfc_drb_pool)
+		goto fail_free_hbq_pool;
 
 	/* vpi zero is reserved for the physical port so add 1 to max */
 	longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
 	phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
 	if (!phba->vpi_bmask)
-		goto fail_free_hbq_pool;
+		goto fail_free_dbq_pool;
 
 	return 0;
 
+ fail_free_dbq_pool:
+	pci_pool_destroy(phba->lpfc_drb_pool);
+	phba->lpfc_drb_pool = NULL;
  fail_free_hbq_pool:
-	lpfc_sli_hbqbuf_free_all(phba);
-	pci_pool_destroy(phba->lpfc_hbq_pool);
+	pci_pool_destroy(phba->lpfc_hrb_pool);
+	phba->lpfc_hrb_pool = NULL;
  fail_free_nlp_mem_pool:
 	mempool_destroy(phba->nlp_mem_pool);
 	phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
 }
 
 /**
- * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc
+ * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
  * @phba: HBA to free memory for
  *
- * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
- * lpfc_hbq_pool.  Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
- * lpfc_nodelist.  Also frees the VPI bitmask
+ * Description: Free the memory allocated by lpfc_mem_alloc routine. This
+ * routine is a the counterpart of lpfc_mem_alloc.
  *
  * Returns: None
  **/
 void
-lpfc_mem_free(struct lpfc_hba * phba)
+lpfc_mem_free(struct lpfc_hba *phba)
 {
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
-	LPFC_MBOXQ_t *mbox, *next_mbox;
-	struct lpfc_dmabuf   *mp;
 	int i;
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
 
+	/* Free VPI bitmask memory */
 	kfree(phba->vpi_bmask);
+
+	/* Free HBQ pools */
 	lpfc_sli_hbqbuf_free_all(phba);
+	pci_pool_destroy(phba->lpfc_drb_pool);
+	phba->lpfc_drb_pool = NULL;
+	pci_pool_destroy(phba->lpfc_hrb_pool);
+	phba->lpfc_hrb_pool = NULL;
+
+	/* Free NLP memory pool */
+	mempool_destroy(phba->nlp_mem_pool);
+	phba->nlp_mem_pool = NULL;
+
+	/* Free mbox memory pool */
+	mempool_destroy(phba->mbox_mem_pool);
+	phba->mbox_mem_pool = NULL;
+
+	/* Free MBUF memory pool */
+	for (i = 0; i < pool->current_count; i++)
+		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+			      pool->elements[i].phys);
+	kfree(pool->elements);
+
+	pci_pool_destroy(phba->lpfc_mbuf_pool);
+	phba->lpfc_mbuf_pool = NULL;
 
+	/* Free DMA buffer memory pool */
+	pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+	phba->lpfc_scsi_dma_buf_pool = NULL;
+
+	return;
+}
+
+/**
+ * lpfc_mem_free_all - Frees all PCI and driver memory
+ * @phba: HBA to free memory for
+ *
+ * Description: Free memory from PCI and driver memory pools and also those
+ * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
+ * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
+ * the VPI bitmask.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free_all(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *mbox, *next_mbox;
+	struct lpfc_dmabuf   *mp;
+
+	/* Free memory used in mailbox queue back to mailbox memory pool */
 	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
 		mp = (struct lpfc_dmabuf *) (mbox->context1);
 		if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
 		list_del(&mbox->list);
 		mempool_free(mbox, phba->mbox_mem_pool);
 	}
+	/* Free memory used in mailbox cmpl list back to mailbox memory pool */
 	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
 		mp = (struct lpfc_dmabuf *) (mbox->context1);
 		if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
 		list_del(&mbox->list);
 		mempool_free(mbox, phba->mbox_mem_pool);
 	}
-
+	/* Free the active mailbox command back to the mailbox memory pool */
+	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
 	if (psli->mbox_active) {
 		mbox = psli->mbox_active;
 		mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
 		psli->mbox_active = NULL;
 	}
 
-	for (i = 0; i < pool->current_count; i++)
-		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
-						 pool->elements[i].phys);
-	kfree(pool->elements);
-
-	pci_pool_destroy(phba->lpfc_hbq_pool);
-	mempool_destroy(phba->nlp_mem_pool);
-	mempool_destroy(phba->mbox_mem_pool);
-
-	pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
-	pci_pool_destroy(phba->lpfc_mbuf_pool);
-
-	phba->lpfc_hbq_pool = NULL;
-	phba->nlp_mem_pool = NULL;
-	phba->mbox_mem_pool = NULL;
-	phba->lpfc_scsi_dma_buf_pool = NULL;
-	phba->lpfc_mbuf_pool = NULL;
+	/* Free and destroy all the allocated memory pools */
+	lpfc_mem_free(phba);
 
 	/* Free the iocb lookup array */
 	kfree(psli->iocbq_lookup);
 	psli->iocbq_lookup = NULL;
+
+	return;
 }
 
 /**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
  * lpfc_els_hbq_alloc - Allocate an HBQ buffer
  * @phba: HBA to allocate HBQ buffer for
  *
- * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
+ * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
  * pool along a non-DMA-mapped container for it.
  *
  * Notes: Not interrupt-safe.  Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 	if (!hbqbp)
 		return NULL;
 
-	hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+	hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
 					  &hbqbp->dbuf.phys);
 	if (!hbqbp->dbuf.virt) {
 		kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
+ * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
  * @phba: HBA buffer was allocated for
  * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
  *
@@ -348,11 +405,72 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 void
 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
 {
-	pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+	pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
 	kfree(hbqbp);
 	return;
 }
 
+/**
+ * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
+ * @phba: HBA to allocate a receive buffer for
+ *
+ * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.
+ *
+ * Returns:
+ *   pointer to HBQ on success
+ *   NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
+{
+	struct hbq_dmabuf *dma_buf;
+
+	dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+	if (!dma_buf)
+		return NULL;
+
+	dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+					    &dma_buf->hbuf.phys);
+	if (!dma_buf->hbuf.virt) {
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+					    &dma_buf->dbuf.phys);
+	if (!dma_buf->dbuf.virt) {
+		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+			      dma_buf->hbuf.phys);
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->size = LPFC_BPL_SIZE;
+	return dma_buf;
+}
+
+/**
+ * lpfc_sli4_rb_free - Frees a receive buffer
+ * @phba: HBA buffer was allocated for
+ * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffers returned by
+ * lpfc_sli4_rb_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
+{
+	pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+	pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+	kfree(dmab);
+	return;
+}
+
 /**
  * lpfc_in_buf_free - Free a DMA buffer
  * @phba: HBA buffer is associated with
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 08cdc77af41..6ba5a72f604 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -28,8 +28,10 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a226c053c0f..9af2db355bc 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -31,8 +31,10 @@
 #include <scsi/scsi_transport_fc.h>
 
 #include "lpfc_version.h"
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
 	"SCSI_PROT_READ_CONVERT",
 	"SCSI_PROT_WRITE_CONVERT"
 };
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 
 static void
 lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -565,12 +569,279 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 		}
 		iocb->ulpClass = CLASS3;
 		psb->status = IOSTAT_SUCCESS;
+		/* Put it back into the SCSI buffer list */
+		lpfc_release_scsi_buf_s4(phba, psb);
 
 	}
 
 	return bcnt;
 }
 
+/**
+ * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
+			  struct sli4_wcqe_xri_aborted *axri)
+{
+	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	struct lpfc_scsi_buf *psb, *next_psb;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
+	list_for_each_entry_safe(psb, next_psb,
+		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+		if (psb->cur_iocbq.sli4_xritag == xri) {
+			list_del(&psb->list);
+			psb->status = IOSTAT_SUCCESS;
+			spin_unlock_irqrestore(
+				&phba->sli4_hba.abts_scsi_buf_list_lock,
+				iflag);
+			lpfc_release_scsi_buf_s4(phba, psb);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
+				iflag);
+}
+
+/**
+ * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of scsi buffers that have been allocated and
+ * repost them to the HBA by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
+ * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *psb;
+	int index, status, bcnt = 0, rcnt = 0, rc = 0;
+	LIST_HEAD(sblist);
+
+	for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
+		psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
+		if (psb) {
+			/* Remove from SCSI buffer list */
+			list_del(&psb->list);
+			/* Add it to a local SCSI buffer list */
+			list_add_tail(&psb->list, &sblist);
+			if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+				bcnt = rcnt;
+				rcnt = 0;
+			}
+		} else
+			/* A hole present in the XRI array, need to skip */
+			bcnt = rcnt;
+
+		if (index == phba->sli4_hba.scsi_xri_cnt - 1)
+			/* End of XRI array for SCSI buffer, complete */
+			bcnt = rcnt;
+
+		/* Continue until collect up to a nembed page worth of sgls */
+		if (bcnt == 0)
+			continue;
+		/* Now, post the SCSI buffer list sgls as a block */
+		status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+		/* Reset SCSI buffer count for next round of posting */
+		bcnt = 0;
+		while (!list_empty(&sblist)) {
+			list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
+					 list);
+			if (status) {
+				/* Put this back on the abort scsi list */
+				psb->status = IOSTAT_LOCAL_REJECT;
+				psb->result = IOERR_ABORT_REQUESTED;
+				rc++;
+			} else
+				psb->status = IOSTAT_SUCCESS;
+			/* Put it back into the SCSI buffer list */
+			lpfc_release_scsi_buf_s4(phba, psb);
+		}
+	}
+	return rc;
+}
+
+/**
+ * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates a scsi buffer for device with SLI-4 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *psb;
+	struct sli4_sge *sgl;
+	IOCB_t *iocb;
+	dma_addr_t pdma_phys_fcp_cmd;
+	dma_addr_t pdma_phys_fcp_rsp;
+	dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
+	uint16_t iotag, last_xritag = NO_XRI;
+	int status = 0, index;
+	int bcnt;
+	int non_sequential_xri = 0;
+	int rc = 0;
+	LIST_HEAD(sblist);
+
+	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+		if (!psb)
+			break;
+
+		/*
+		 * Get memory from the pci pool to map the virt space to pci bus
+		 * space for an I/O.  The DMA buffer includes space for the
+		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+		 * necessary to support the sg_tablesize.
+		 */
+		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+						GFP_KERNEL, &psb->dma_handle);
+		if (!psb->data) {
+			kfree(psb);
+			break;
+		}
+
+		/* Initialize virtual ptrs to dma_buf region. */
+		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+		/* Allocate iotag for psb->cur_iocbq. */
+		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+		if (iotag == 0) {
+			kfree(psb);
+			break;
+		}
+
+		psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
+		if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
+			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+			      psb->data, psb->dma_handle);
+			kfree(psb);
+			break;
+		}
+		if (last_xritag != NO_XRI
+			&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
+			non_sequential_xri = 1;
+		} else
+			list_add_tail(&psb->list, &sblist);
+		last_xritag = psb->cur_iocbq.sli4_xritag;
+
+		index = phba->sli4_hba.scsi_xri_cnt++;
+		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+		psb->fcp_bpl = psb->data;
+		psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
+			- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
+					sizeof(struct fcp_cmnd));
+
+		/* Initialize local short-hand pointers. */
+		sgl = (struct sli4_sge *)psb->fcp_bpl;
+		pdma_phys_bpl = psb->dma_handle;
+		pdma_phys_fcp_cmd =
+			(psb->dma_handle + phba->cfg_sg_dma_buf_size)
+			 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
+
+		/*
+		 * The first two bdes are the FCP_CMD and FCP_RSP.  The balance
+		 * are sg list bdes.  Initialize the first two and leave the
+		 * rest for queuecommand.
+		 */
+		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
+		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+		bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->word3 = cpu_to_le32(sgl->word3);
+		sgl++;
+
+		/* Setup the physical region for the FCP RSP */
+		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
+		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+		bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->word3 = cpu_to_le32(sgl->word3);
+
+		/*
+		 * Since the IOCB for the FCP I/O is built into this
+		 * lpfc_scsi_buf, initialize it with all known data now.
+		 */
+		iocb = &psb->cur_iocbq.iocb;
+		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+		/* setting the BLP size to 2 * sizeof BDE may not be correct.
+		 * We are setting the bpl to point to out sgl. An sgl's
+		 * entries are 16 bytes, a bpl entries are 12 bytes.
+		 */
+		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
+		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
+		iocb->ulpBdeCount = 1;
+		iocb->ulpLe = 1;
+		iocb->ulpClass = CLASS3;
+		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+			pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
+		else
+			pdma_phys_bpl1 = 0;
+		psb->dma_phys_bpl = pdma_phys_bpl;
+		phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
+		if (non_sequential_xri) {
+			status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
+						pdma_phys_bpl1,
+						psb->cur_iocbq.sli4_xritag);
+			if (status) {
+				/* Put this back on the abort scsi list */
+				psb->status = IOSTAT_LOCAL_REJECT;
+				psb->result = IOERR_ABORT_REQUESTED;
+				rc++;
+			} else
+				psb->status = IOSTAT_SUCCESS;
+			/* Put it back into the SCSI buffer list */
+			lpfc_release_scsi_buf_s4(phba, psb);
+			break;
+		}
+	}
+	if (bcnt) {
+		status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+		/* Reset SCSI buffer count for next round of posting */
+		while (!list_empty(&sblist)) {
+			list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
+				 list);
+			if (status) {
+				/* Put this back on the abort scsi list */
+				psb->status = IOSTAT_LOCAL_REJECT;
+				psb->result = IOERR_ABORT_REQUESTED;
+				rc++;
+			} else
+				psb->status = IOSTAT_SUCCESS;
+			/* Put it back into the SCSI buffer list */
+			lpfc_release_scsi_buf_s4(phba, psb);
+		}
+	}
+
+	return bcnt + non_sequential_xri - rc;
+}
+
 /**
  * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
  * @vport: The virtual port for which this call being executed.
@@ -637,6 +908,39 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
 }
 
+/**
+ * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
+ * and cannot be reused for at least RA_TOV amount of time if it was
+ * aborted.
+ **/
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+	unsigned long iflag = 0;
+
+	if (psb->status == IOSTAT_LOCAL_REJECT
+		&& psb->result == IOERR_ABORT_REQUESTED) {
+		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
+					iflag);
+		psb->pCmd = NULL;
+		list_add_tail(&psb->list,
+			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
+					iflag);
+	} else {
+
+		spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+		psb->pCmd = NULL;
+		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
+		spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	}
+}
+
 /**
  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
  * @phba: The Hba for which this call is being executed.
@@ -1454,6 +1758,115 @@ out:
 	return ret;
 }
 
+/**
+ * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ * 	1 - Error
+ * 	0 - Success
+ **/
+static int
+lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct scatterlist *sgel = NULL;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	dma_addr_t physaddr;
+	uint32_t num_bde = 0;
+	uint32_t dma_len;
+	uint32_t dma_offset = 0;
+	int nseg;
+
+	/*
+	 * There are three possibilities here - use scatter-gather segment, use
+	 * the single mapping, or neither.  Start the lpfc command prep by
+	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+	 * data bde entry.
+	 */
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+
+		nseg = scsi_dma_map(scsi_cmnd);
+		if (unlikely(!nseg))
+			return 1;
+		sgl += 1;
+		/* clear the last flag in the fcp_rsp map entry */
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl += 1;
+
+		lpfc_cmd->seg_cnt = nseg;
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			printk(KERN_ERR "%s: Too many sg segments from "
+			       "dma_map_sg.  Config %d, seg_cnt %d\n",
+			       __func__, phba->cfg_sg_seg_cnt,
+			       lpfc_cmd->seg_cnt);
+			scsi_dma_unmap(scsi_cmnd);
+			return 1;
+		}
+
+		/*
+		 * The driver established a maximum scatter-gather segment count
+		 * during probe that limits the number of sg elements in any
+		 * single scsi command.  Just run through the seg_cnt and format
+		 * the sge's.
+		 * When using SLI-3 the driver will try to fit all the BDEs into
+		 * the IOCB. If it can't then the BDEs get added to a BPL as it
+		 * does for SLI-2 mode.
+		 */
+		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
+			physaddr = sg_dma_address(sgel);
+			dma_len = sg_dma_len(sgel);
+			bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
+			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+			if ((num_bde + 1) == nseg)
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+			else
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->word3 = cpu_to_le32(sgl->word3);
+			dma_offset += dma_len;
+			sgl++;
+		}
+	} else {
+		sgl += 1;
+		/* clear the last flag in the fcp_rsp map entry */
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+	}
+
+	/*
+	 * Finish initializing those IOCB fields that are dependent on the
+	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
+	 * explicitly reinitialized.
+	 * all iocb memory resources are reused.
+	 */
+	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+	return 0;
+}
+
 /**
  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
  * @phba: The Hba for which this call is being executed.
@@ -1589,6 +2002,22 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 				psb->pCmd->sc_data_direction);
 }
 
+/**
+ * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
+ * remove the sgl for this scsi buffer then we will do it here. For now
+ * we should be able to just call the sli3 unprep routine.
+ **/
+static void
+lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+	lpfc_scsi_unprep_dma_buf_s3(phba, psb);
+}
+
 /**
  * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
  * @phba: The Hba for which this call is being executed.
@@ -2128,6 +2557,29 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 	piocbq->vport = vport;
 }
 
+/**
+ * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer for device with SLI4 interface spec.
+ **/
+static void
+lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+		       struct lpfc_nodelist *pnode)
+{
+	/*
+	 * The prep cmnd routines do not touch the sgl or its
+	 * entries. We may not have to do anything different.
+	 * I will leave this function in place until we can
+	 * run some IO through the driver and determine if changes
+	 * are needed.
+	 */
+	return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
+}
+
 /**
  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
  * @vport: The virtual port for which this call is being executed.
@@ -2208,6 +2660,37 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
 	return 1;
 }
 
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ * 	0 - Error
+ * 	1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
+				struct lpfc_scsi_buf *lpfc_cmd,
+				unsigned int lun,
+				uint8_t task_mgmt_cmd)
+{
+	/*
+	 * The prep cmnd routines do not touch the sgl or its
+	 * entries. We may not have to do anything different.
+	 * I will leave this function in place until we can
+	 * run some IO through the driver and determine if changes
+	 * are needed.
+	 */
+	return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
+						task_mgmt_cmd);
+}
+
 /**
  * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
  * @vport: The virtual port for which this call is being executed.
@@ -2257,6 +2740,15 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
 					lpfc_scsi_prep_task_mgmt_cmd_s3;
 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
 		break;
+	case LPFC_PCI_DEV_OC:
+		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
+		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
+		phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
+		phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
+		phba->lpfc_scsi_prep_task_mgmt_cmd =
+					lpfc_scsi_prep_task_mgmt_cmd_s4;
+		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
+		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1418 Invalid HBA PCI-device group: 0x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c7c440d5fa2..65dfc8bd5b4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
 	struct fcp_rsp *fcp_rsp;
 	struct ulp_bde64 *fcp_bpl;
 
+	dma_addr_t dma_phys_bpl;
+
 	/* cur_iocbq has phys of the dma-able buffer.
 	 * Iotag is in here
 	 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e2d07d97fa8..706bb22a6e8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -29,9 +29,12 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -120,6 +123,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
 	return iocbq;
 }
 
+/**
+ * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function clears the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+static struct lpfc_sglq *
+__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+	uint16_t adj_xri;
+	struct lpfc_sglq *sglq;
+	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+	if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
+		return NULL;
+	sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+	phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
+	return sglq;
+}
+
+/**
+ * __lpfc_get_active_sglq - Get the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function returns the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+static struct lpfc_sglq *
+__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+	uint16_t adj_xri;
+	struct lpfc_sglq *sglq;
+	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+	if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
+		return NULL;
+	sglq =  phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+	return sglq;
+}
+
+/**
+ * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * Gets a new driver sglq object from the sglq list. If the
+ * list is not empty then it is successful, it returns pointer to the newly
+ * allocated sglq object else it returns NULL.
+ **/
+static struct lpfc_sglq *
+__lpfc_sli_get_sglq(struct lpfc_hba *phba)
+{
+	struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
+	struct lpfc_sglq *sglq = NULL;
+	uint16_t adj_xri;
+	list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
+	adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
+	phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+	return sglq;
+}
+
 /**
  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
  * @phba: Pointer to HBA context object.
@@ -298,6 +371,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
 	case CMD_GEN_REQUEST64_CR:
 	case CMD_GEN_REQUEST64_CX:
 	case CMD_XMIT_ELS_RSP64_CX:
+	case DSSCMD_IWRITE64_CR:
+	case DSSCMD_IWRITE64_CX:
+	case DSSCMD_IREAD64_CR:
+	case DSSCMD_IREAD64_CX:
+	case DSSCMD_INVALIDATE_DEK:
+	case DSSCMD_SET_KEK:
+	case DSSCMD_GET_KEK_ID:
+	case DSSCMD_GEN_XFER:
 		type = LPFC_SOL_IOCB;
 		break;
 	case CMD_ABORT_XRI_CN:
@@ -2629,6 +2710,56 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
 	return retval;
 }
 
+/**
+ * lpfc_sli_brdready_s4 - Check for sli4 host ready status
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function checks the host status register to check if HBA is
+ * ready. This function will wait in a loop for the HBA to be ready
+ * If the HBA is not ready , the function will will reset the HBA PCI
+ * function again. The function returns 1 when HBA fail to be ready
+ * otherwise returns zero.
+ **/
+static int
+lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
+{
+	uint32_t status;
+	int retval = 0;
+
+	/* Read the HBA Host Status Register */
+	status = lpfc_sli4_post_status_check(phba);
+
+	if (status) {
+		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+		lpfc_sli_brdrestart(phba);
+		status = lpfc_sli4_post_status_check(phba);
+	}
+
+	/* Check to see if any errors occurred during init */
+	if (status) {
+		phba->link_state = LPFC_HBA_ERROR;
+		retval = 1;
+	} else
+		phba->sli4_hba.intr_enable = 0;
+
+	return retval;
+}
+
+/**
+ * lpfc_sli_brdready - Wrapper func for checking the hba readyness
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
+ * from the API jump table function pointer from the lpfc_hba struct.
+ **/
+int
+lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
+{
+	return phba->lpfc_sli_brdready(phba, mask);
+}
+
 #define BARRIER_TEST_PATTERN (0xdeadbeef)
 
 /**
@@ -2863,7 +2994,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli_brdrestart - Restart the HBA
+ * lpfc_sli4_brdreset - Reset a sli-4 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets a SLI4 HBA. This function disables PCI layer parity
+ * checking during resets the device. The caller is not required to hold
+ * any locks.
+ *
+ * This function returns 0 always.
+ **/
+int
+lpfc_sli4_brdreset(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	uint16_t cfg_value;
+	uint8_t qindx;
+
+	/* Reset HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0295 Reset HBA Data: x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag);
+
+	/* perform board reset */
+	phba->fc_eventTag = 0;
+	phba->pport->fc_myDID = 0;
+	phba->pport->fc_prevDID = 0;
+
+	/* Turn off parity checking and serr during the physical reset */
+	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+	pci_write_config_word(phba->pcidev, PCI_COMMAND,
+			      (cfg_value &
+			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~(LPFC_PROCESS_LA);
+	phba->fcf.fcf_flag = 0;
+	/* Clean up the child queue list for the CQs */
+	list_del_init(&phba->sli4_hba.mbx_wq->list);
+	list_del_init(&phba->sli4_hba.els_wq->list);
+	list_del_init(&phba->sli4_hba.hdr_rq->list);
+	list_del_init(&phba->sli4_hba.dat_rq->list);
+	list_del_init(&phba->sli4_hba.mbx_cq->list);
+	list_del_init(&phba->sli4_hba.els_cq->list);
+	list_del_init(&phba->sli4_hba.rxq_cq->list);
+	for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
+		list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
+	for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
+		list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Now physically reset the device */
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0389 Performing PCI function reset!\n");
+	/* Perform FCoE PCI function reset */
+	lpfc_pci_function_reset(phba);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
  * @phba: Pointer to HBA context object.
  *
  * This function is called in the SLI initialization code path to
@@ -2875,8 +3065,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
  * The function does not guarantee completion of MBX_RESTART mailbox
  * command before the return of this function.
  **/
-int
-lpfc_sli_brdrestart(struct lpfc_hba *phba)
+static int
+lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
 {
 	MAILBOX_t *mb;
 	struct lpfc_sli *psli;
@@ -2915,7 +3105,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
 	lpfc_sli_brdreset(phba);
 	phba->pport->stopped = 0;
 	phba->link_state = LPFC_INIT_START;
-
+	phba->hba_flag = 0;
 	spin_unlock_irq(&phba->hbalock);
 
 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2929,6 +3119,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
 	return 0;
 }
 
+/**
+ * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to restart
+ * a SLI4 HBA. The caller is not required to hold any lock.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static int
+lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+
+
+	/* Restart HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0296 Restart HBA Data: x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag);
+
+	lpfc_sli4_brdreset(phba);
+
+	spin_lock_irq(&phba->hbalock);
+	phba->pport->stopped = 0;
+	phba->link_state = LPFC_INIT_START;
+	phba->hba_flag = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+	psli->stats_start = get_seconds();
+
+	lpfc_hba_down_post(phba);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart - Wrapper func for restarting hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
+ * API jump table function pointer from the lpfc_hba struct.
+**/
+int
+lpfc_sli_brdrestart(struct lpfc_hba *phba)
+{
+	return phba->lpfc_sli_brdrestart(phba);
+}
+
 /**
  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
  * @phba: Pointer to HBA context object.
@@ -3353,125 +3592,607 @@ lpfc_sli_hba_setup_error:
 	return rc;
 }
 
-
 /**
- * lpfc_mbox_timeout - Timeout call back function for mbox timer
- * @ptr: context object - pointer to hba structure.
- *
- * This is the callback function for mailbox timer. The mailbox
- * timer is armed when a new mailbox command is issued and the timer
- * is deleted when the mailbox complete. The function is called by
- * the kernel timer code when a mailbox does not complete within
- * expected time. This function wakes up the worker thread to
- * process the mailbox timeout and returns. All the processing is
- * done by the worker thread function lpfc_mbox_timeout_handler.
+ * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
+ * @phba: Pointer to HBA context object.
+ * @mboxq: mailbox pointer.
+ * This function issue a dump mailbox command to read config region
+ * 23 and parse the records in the region and populate driver
+ * data structure.
  **/
-void
-lpfc_mbox_timeout(unsigned long ptr)
+static int
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
+		LPFC_MBOXQ_t *mboxq)
 {
-	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
-	unsigned long iflag;
-	uint32_t tmo_posted;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_mqe *mqe;
+	uint32_t data_length;
+	int rc;
 
-	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
-	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
-	if (!tmo_posted)
-		phba->pport->work_port_events |= WORKER_MBOX_TMO;
-	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+	/* Program the default value of vlan_id and fc_map */
+	phba->valid_vlan = 0;
+	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
-	if (!tmo_posted)
-		lpfc_worker_wake_up(phba);
-	return;
-}
+	mqe = &mboxq->u.mqe;
+	if (lpfc_dump_fcoe_param(phba, mboxq))
+		return -ENOMEM;
+
+	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):2571 Mailbox cmd x%x Status x%x "
+			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+			"CQ: x%x x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0,
+			bf_get(lpfc_mqe_command, mqe),
+			bf_get(lpfc_mqe_status, mqe),
+			mqe->un.mb_words[0], mqe->un.mb_words[1],
+			mqe->un.mb_words[2], mqe->un.mb_words[3],
+			mqe->un.mb_words[4], mqe->un.mb_words[5],
+			mqe->un.mb_words[6], mqe->un.mb_words[7],
+			mqe->un.mb_words[8], mqe->un.mb_words[9],
+			mqe->un.mb_words[10], mqe->un.mb_words[11],
+			mqe->un.mb_words[12], mqe->un.mb_words[13],
+			mqe->un.mb_words[14], mqe->un.mb_words[15],
+			mqe->un.mb_words[16], mqe->un.mb_words[50],
+			mboxq->mcqe.word0,
+			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
+			mboxq->mcqe.trailer);
+
+	if (rc) {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		return -EIO;
+	}
+	data_length = mqe->un.mb_words[5];
+	if (data_length > DMP_FCOEPARAM_RGN_SIZE)
+		return -EIO;
 
+	lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	return 0;
+}
 
 /**
- * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
- * @phba: Pointer to HBA context object.
+ * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the LPFC_MBOXQ_t structure.
+ * @vpd: pointer to the memory to hold resulting port vpd data.
+ * @vpd_size: On input, the number of bytes allocated to @vpd.
+ *	      On output, the number of data bytes in @vpd.
  *
- * This function is called from worker thread when a mailbox command times out.
- * The caller is not required to hold any locks. This function will reset the
- * HBA and recover all the pending commands.
+ * This routine executes a READ_REV SLI4 mailbox command.  In
+ * addition, this routine gets the port vpd data.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	ENOMEM - could not allocated memory.
  **/
-void
-lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
+static int
+lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+		    uint8_t *vpd, uint32_t *vpd_size)
 {
-	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
-	MAILBOX_t *mb = &pmbox->mb;
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_sli_ring *pring;
+	int rc = 0;
+	uint32_t dma_size;
+	struct lpfc_dmabuf *dmabuf;
+	struct lpfc_mqe *mqe;
 
-	/* Check the pmbox pointer first.  There is a race condition
-	 * between the mbox timeout handler getting executed in the
-	 * worklist and the mailbox actually completing. When this
-	 * race condition occurs, the mbox_active will be NULL.
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return -ENOMEM;
+
+	/*
+	 * Get a DMA buffer for the vpd data resulting from the READ_REV
+	 * mailbox command.
 	 */
-	spin_lock_irq(&phba->hbalock);
-	if (pmbox == NULL) {
-		lpfc_printf_log(phba, KERN_WARNING,
-				LOG_MBOX | LOG_SLI,
-				"0353 Active Mailbox cleared - mailbox timeout "
-				"exiting\n");
-		spin_unlock_irq(&phba->hbalock);
-		return;
+	dma_size = *vpd_size;
+	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+					  dma_size,
+					  &dmabuf->phys,
+					  GFP_KERNEL);
+	if (!dmabuf->virt) {
+		kfree(dmabuf);
+		return -ENOMEM;
 	}
+	memset(dmabuf->virt, 0, dma_size);
 
-	/* Mbox cmd <mbxCommand> timeout */
-	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-			"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
-			mb->mbxCommand,
-			phba->pport->port_state,
-			phba->sli.sli_flag,
-			phba->sli.mbox_active);
-	spin_unlock_irq(&phba->hbalock);
-
-	/* Setting state unknown so lpfc_sli_abort_iocb_ring
-	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
-	 * it to fail all oustanding SCSI IO.
+	/*
+	 * The SLI4 implementation of READ_REV conflicts at word1,
+	 * bits 31:16 and SLI4 adds vpd functionality not present
+	 * in SLI3.  This code corrects the conflicts.
 	 */
-	spin_lock_irq(&phba->pport->work_port_lock);
-	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
-	spin_unlock_irq(&phba->pport->work_port_lock);
-	spin_lock_irq(&phba->hbalock);
-	phba->link_state = LPFC_LINK_UNKNOWN;
-	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
-	spin_unlock_irq(&phba->hbalock);
+	lpfc_read_rev(phba, mboxq);
+	mqe = &mboxq->u.mqe;
+	mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
+	mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
+	mqe->un.read_rev.word1 &= 0x0000FFFF;
+	bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
+	bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (rc) {
+		dma_free_coherent(&phba->pcidev->dev, dma_size,
+				  dmabuf->virt, dmabuf->phys);
+		return -EIO;
+	}
 
-	pring = &psli->ring[psli->fcp_ring];
-	lpfc_sli_abort_iocb_ring(phba, pring);
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0380 Mailbox cmd x%x Status x%x "
+			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+			"CQ: x%x x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0,
+			bf_get(lpfc_mqe_command, mqe),
+			bf_get(lpfc_mqe_status, mqe),
+			mqe->un.mb_words[0], mqe->un.mb_words[1],
+			mqe->un.mb_words[2], mqe->un.mb_words[3],
+			mqe->un.mb_words[4], mqe->un.mb_words[5],
+			mqe->un.mb_words[6], mqe->un.mb_words[7],
+			mqe->un.mb_words[8], mqe->un.mb_words[9],
+			mqe->un.mb_words[10], mqe->un.mb_words[11],
+			mqe->un.mb_words[12], mqe->un.mb_words[13],
+			mqe->un.mb_words[14], mqe->un.mb_words[15],
+			mqe->un.mb_words[16], mqe->un.mb_words[50],
+			mboxq->mcqe.word0,
+			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
+			mboxq->mcqe.trailer);
 
-	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-			"0345 Resetting board due to mailbox timeout\n");
+	/*
+	 * The available vpd length cannot be bigger than the
+	 * DMA buffer passed to the port.  Catch the less than
+	 * case and update the caller's size.
+	 */
+	if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
+		*vpd_size = mqe->un.read_rev.avail_vpd_len;
 
-	/* Reset the HBA device */
-	lpfc_reset_hba(phba);
+	lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
+	dma_free_coherent(&phba->pcidev->dev, dma_size,
+			  dmabuf->virt, dmabuf->phys);
+	kfree(dmabuf);
+	return 0;
 }
 
 /**
- * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
- * @phba: Pointer to HBA context object.
- * @pmbox: Pointer to mailbox object.
- * @flag: Flag indicating how the mailbox need to be processed.
+ * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
+ * @phba: pointer to lpfc hba data structure.
  *
- * This function is called by discovery code and HBA management code
- * to submit a mailbox command to firmware with SLI-3 interface spec. This
- * function gets the hbalock to protect the data structures.
- * The mailbox command can be submitted in polling mode, in which case
- * this function will wait in a polling loop for the completion of the
- * mailbox.
- * If the mailbox is submitted in no_wait mode (not polling) the
- * function will submit the command and returns immediately without waiting
- * for the mailbox completion. The no_wait is supported only when HBA
- * is in SLI2/SLI3 mode - interrupts are enabled.
- * The SLI interface allows only one mailbox pending at a time. If the
- * mailbox is issued in polling mode and there is already a mailbox
- * pending, then the function will return an error. If the mailbox is issued
- * in NO_WAIT mode and there is a mailbox pending already, the function
- * will return MBX_BUSY after queuing the mailbox into mailbox queue.
- * The sli layer owns the mailbox object until the completion of mailbox
- * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
- * return codes the caller owns the mailbox command after the return of
- * the function.
+ * This routine is called to explicitly arm the SLI4 device's completion and
+ * event queues
+ **/
+static void
+lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
+{
+	uint8_t fcp_eqidx;
+
+	lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
+	lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
+	lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
+	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+		lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
+				     LPFC_QUEUE_REARM);
+	lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
+	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+		lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+				     LPFC_QUEUE_REARM);
+}
+
+/**
+ * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI4 device intialization PCI function. This
+ * function is called by the HBA intialization code, HBA reset code and
+ * HBA error attention handler code. Caller is not required to hold any
+ * locks.
+ **/
+int
+lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+{
+	int rc;
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mqe *mqe;
+	uint8_t *vpd;
+	uint32_t vpd_size;
+	uint32_t ftr_rsp = 0;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
+	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_dmabuf *mp;
+
+	/* Perform a PCI function reset to start from clean */
+	rc = lpfc_pci_function_reset(phba);
+	if (unlikely(rc))
+		return -ENODEV;
+
+	/* Check the HBA Host Status Register for readyness */
+	rc = lpfc_sli4_post_status_check(phba);
+	if (unlikely(rc))
+		return -ENODEV;
+	else {
+		spin_lock_irq(&phba->hbalock);
+		phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	/*
+	 * Allocate a single mailbox container for initializing the
+	 * port.
+	 */
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+
+	/*
+	 * Continue initialization with default values even if driver failed
+	 * to read FCoE param config regions
+	 */
+	if (lpfc_sli4_read_fcoe_params(phba, mboxq))
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+			"2570 Failed to read FCoE parameters \n");
+
+	/* Issue READ_REV to collect vpd and FW information. */
+	vpd_size = PAGE_SIZE;
+	vpd = kzalloc(vpd_size, GFP_KERNEL);
+	if (!vpd) {
+		rc = -ENOMEM;
+		goto out_free_mbox;
+	}
+
+	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
+	if (unlikely(rc))
+		goto out_free_vpd;
+
+	mqe = &mboxq->u.mqe;
+	if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
+		    &mqe->un.read_rev) != LPFC_SLI_REV4) ||
+	    (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+			"0376 READ_REV Error. SLI Level %d "
+			"FCoE enabled %d\n",
+			bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
+			bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
+		rc = -EIO;
+		goto out_free_vpd;
+	}
+	/* Single threaded at this point, no need for lock */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag |= HBA_FCOE_SUPPORT;
+	spin_unlock_irq(&phba->hbalock);
+	/*
+	 * Evaluate the read rev and vpd data. Populate the driver
+	 * state with the results. If this routine fails, the failure
+	 * is not fatal as the driver will use generic values.
+	 */
+	rc = lpfc_parse_vpd(phba, vpd, vpd_size);
+	if (unlikely(!rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0377 Error %d parsing vpd. "
+				"Using defaults.\n", rc);
+		rc = 0;
+	}
+
+	/* By now, we should determine the SLI revision, hard code for now */
+	phba->sli_rev = LPFC_SLI_REV4;
+
+	/*
+	 * Discover the port's supported feature set and match it against the
+	 * hosts requests.
+	 */
+	lpfc_request_features(phba, mboxq);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto out_free_vpd;
+	}
+
+	/*
+	 * The port must support FCP initiator mode as this is the
+	 * only mode running in the host.
+	 */
+	if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"0378 No support for fcpi mode.\n");
+		ftr_rsp++;
+	}
+
+	/*
+	 * If the port cannot support the host's requested features
+	 * then turn off the global config parameters to disable the
+	 * feature in the driver.  This is not a fatal error.
+	 */
+	if ((phba->cfg_enable_bg) &&
+	    !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+		ftr_rsp++;
+
+	if (phba->max_vpi && phba->cfg_enable_npiv &&
+	    !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+		ftr_rsp++;
+
+	if (ftr_rsp) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"0379 Feature Mismatch Data: x%08x %08x "
+				"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
+				mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
+				phba->cfg_enable_npiv, phba->max_vpi);
+		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+			phba->cfg_enable_bg = 0;
+		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+			phba->cfg_enable_npiv = 0;
+	}
+
+	/* These SLI3 features are assumed in SLI4 */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Read the port's service parameters. */
+	lpfc_read_sparam(phba, mboxq, vport->vpi);
+	mboxq->vport = vport;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	if (rc == MBX_SUCCESS) {
+		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
+		rc = 0;
+	}
+
+	/*
+	 * This memory was allocated by the lpfc_read_sparam routine. Release
+	 * it to the mbuf pool.
+	 */
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mboxq->context1 = NULL;
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0382 READ_SPARAM command failed "
+				"status %d, mbxStatus x%x\n",
+				rc, bf_get(lpfc_mqe_status, mqe));
+		phba->link_state = LPFC_HBA_ERROR;
+		rc = -EIO;
+		goto out_free_vpd;
+	}
+
+	if (phba->cfg_soft_wwnn)
+		u64_to_wwn(phba->cfg_soft_wwnn,
+			   vport->fc_sparam.nodeName.u.wwn);
+	if (phba->cfg_soft_wwpn)
+		u64_to_wwn(phba->cfg_soft_wwpn,
+			   vport->fc_sparam.portName.u.wwn);
+	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+	       sizeof(struct lpfc_name));
+	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+	       sizeof(struct lpfc_name));
+
+	/* Update the fc_host data structures with new wwn. */
+	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+
+	/* Register SGL pool to the device using non-embedded mailbox command */
+	rc = lpfc_sli4_post_sgl_list(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0582 Error %d during sgl post operation", rc);
+		rc = -ENODEV;
+		goto out_free_vpd;
+	}
+
+	/* Register SCSI SGL pool to the device */
+	rc = lpfc_sli4_repost_scsi_sgl_list(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"0383 Error %d during scsi sgl post opeation",
+				rc);
+		/* Some Scsi buffers were moved to the abort scsi list */
+		/* A pci function reset will repost them */
+		rc = -ENODEV;
+		goto out_free_vpd;
+	}
+
+	/* Post the rpi header region to the device. */
+	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0393 Error %d during rpi post operation\n",
+				rc);
+		rc = -ENODEV;
+		goto out_free_vpd;
+	}
+	/* Temporary initialization of lpfc_fip_flag to non-fip */
+	bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
+
+	/* Set up all the queues to the device */
+	rc = lpfc_sli4_queue_setup(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0381 Error %d during queue setup.\n ", rc);
+		goto out_stop_timers;
+	}
+
+	/* Arm the CQs and then EQs on device */
+	lpfc_sli4_arm_cqeq_intr(phba);
+
+	/* Indicate device interrupt mode */
+	phba->sli4_hba.intr_enable = 1;
+
+	/* Allow asynchronous mailbox command to go through */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Post receive buffers to the device */
+	lpfc_sli4_rb_setup(phba);
+
+	/* Start the ELS watchdog timer */
+	/*
+	 * The driver for SLI4 is not yet ready to process timeouts
+	 * or interrupts.  Once it is, the comment bars can be removed.
+	 */
+	/* mod_timer(&vport->els_tmofunc,
+	 *           jiffies + HZ * (phba->fc_ratov*2)); */
+
+	/* Start heart beat timer */
+	mod_timer(&phba->hb_tmofunc,
+		  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+	phba->hb_outstanding = 0;
+	phba->last_completion_time = jiffies;
+
+	/* Start error attention (ERATT) polling timer */
+	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+
+	/*
+	 * The port is ready, set the host's link state to LINK_DOWN
+	 * in preparation for link interrupts.
+	 */
+	lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
+	mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	lpfc_set_loopback_flag(phba);
+	/* Change driver state to LPFC_LINK_DOWN right before init link */
+	spin_lock_irq(&phba->hbalock);
+	phba->link_state = LPFC_LINK_DOWN;
+	spin_unlock_irq(&phba->hbalock);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (unlikely(rc != MBX_NOT_FINISHED)) {
+		kfree(vpd);
+		return 0;
+	} else
+		rc = -EIO;
+
+	/* Unset all the queues set up in this routine when error out */
+	if (rc)
+		lpfc_sli4_queue_unset(phba);
+
+out_stop_timers:
+	if (rc)
+		lpfc_stop_hba_timers(phba);
+out_free_vpd:
+	kfree(vpd);
+out_free_mbox:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_mbox_timeout - Timeout call back function for mbox timer
+ * @ptr: context object - pointer to hba structure.
+ *
+ * This is the callback function for mailbox timer. The mailbox
+ * timer is armed when a new mailbox command is issued and the timer
+ * is deleted when the mailbox complete. The function is called by
+ * the kernel timer code when a mailbox does not complete within
+ * expected time. This function wakes up the worker thread to
+ * process the mailbox timeout and returns. All the processing is
+ * done by the worker thread function lpfc_mbox_timeout_handler.
+ **/
+void
+lpfc_mbox_timeout(unsigned long ptr)
+{
+	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
+	unsigned long iflag;
+	uint32_t tmo_posted;
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
+	if (!tmo_posted)
+		phba->pport->work_port_events |= WORKER_MBOX_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+
+/**
+ * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from worker thread when a mailbox command times out.
+ * The caller is not required to hold any locks. This function will reset the
+ * HBA and recover all the pending commands.
+ **/
+void
+lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
+	MAILBOX_t *mb = &pmbox->mb;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+
+	/* Check the pmbox pointer first.  There is a race condition
+	 * between the mbox timeout handler getting executed in the
+	 * worklist and the mailbox actually completing. When this
+	 * race condition occurs, the mbox_active will be NULL.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (pmbox == NULL) {
+		lpfc_printf_log(phba, KERN_WARNING,
+				LOG_MBOX | LOG_SLI,
+				"0353 Active Mailbox cleared - mailbox timeout "
+				"exiting\n");
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	/* Mbox cmd <mbxCommand> timeout */
+	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+			"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+			mb->mbxCommand,
+			phba->pport->port_state,
+			phba->sli.sli_flag,
+			phba->sli.mbox_active);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Setting state unknown so lpfc_sli_abort_iocb_ring
+	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
+	 * it to fail all oustanding SCSI IO.
+	 */
+	spin_lock_irq(&phba->pport->work_port_lock);
+	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+	spin_unlock_irq(&phba->pport->work_port_lock);
+	spin_lock_irq(&phba->hbalock);
+	phba->link_state = LPFC_LINK_UNKNOWN;
+	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	pring = &psli->ring[psli->fcp_ring];
+	lpfc_sli_abort_iocb_ring(phba, pring);
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+			"0345 Resetting board due to mailbox timeout\n");
+
+	/* Reset the HBA device */
+	lpfc_reset_hba(phba);
+}
+
+/**
+ * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code
+ * to submit a mailbox command to firmware with SLI-3 interface spec. This
+ * function gets the hbalock to protect the data structures.
+ * The mailbox command can be submitted in polling mode, in which case
+ * this function will wait in a polling loop for the completion of the
+ * mailbox.
+ * If the mailbox is submitted in no_wait mode (not polling) the
+ * function will submit the command and returns immediately without waiting
+ * for the mailbox completion. The no_wait is supported only when HBA
+ * is in SLI2/SLI3 mode - interrupts are enabled.
+ * The SLI interface allows only one mailbox pending at a time. If the
+ * mailbox is issued in polling mode and there is already a mailbox
+ * pending, then the function will return an error. If the mailbox is issued
+ * in NO_WAIT mode and there is a mailbox pending already, the function
+ * will return MBX_BUSY after queuing the mailbox into mailbox queue.
+ * The sli layer owns the mailbox object until the completion of mailbox
+ * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
+ * return codes the caller owns the mailbox command after the return of
+ * the function.
  **/
 static int
 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
@@ -3812,12 +4533,419 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
 
 out_not_finished:
 	if (processing_queue) {
-		pmbox->mb.mbxStatus = MBX_NOT_FINISHED;
+		pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
 		lpfc_mbox_cmpl_put(phba, pmbox);
 	}
 	return MBX_NOT_FINISHED;
 }
 
+/**
+ * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * The function posts a mailbox to the port.  The mailbox is expected
+ * to be comletely filled in and ready for the port to operate on it.
+ * This routine executes a synchronous completion operation on the
+ * mailbox by polling for its completion.
+ *
+ * The caller must not be holding any locks when calling this routine.
+ *
+ * Returns:
+ *	MBX_SUCCESS - mailbox posted successfully
+ *	Any of the MBX error values.
+ **/
+static int
+lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	int rc = MBX_SUCCESS;
+	unsigned long iflag;
+	uint32_t db_ready;
+	uint32_t mcqe_status;
+	uint32_t mbx_cmnd;
+	unsigned long timeout;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_mqe *mb = &mboxq->u.mqe;
+	struct lpfc_bmbx_create *mbox_rgn;
+	struct dma_address *dma_address;
+	struct lpfc_register bmbx_reg;
+
+	/*
+	 * Only one mailbox can be active to the bootstrap mailbox region
+	 * at a time and there is no queueing provided.
+	 */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2532 Mailbox command x%x (x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli4_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, MBX_POLL);
+		return MBXERR_ERROR;
+	}
+	/* The server grabs the token and owns it until release */
+	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+	phba->sli.mbox_active = mboxq;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	/*
+	 * Initialize the bootstrap memory region to avoid stale data areas
+	 * in the mailbox post.  Then copy the caller's mailbox contents to
+	 * the bmbx mailbox region.
+	 */
+	mbx_cmnd = bf_get(lpfc_mqe_command, mb);
+	memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
+	lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
+			      sizeof(struct lpfc_mqe));
+
+	/* Post the high mailbox dma address to the port and wait for ready. */
+	dma_address = &phba->sli4_hba.bmbx.dma_address;
+	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
+
+	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
+				   * 1000) + jiffies;
+	do {
+		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+		if (!db_ready)
+			msleep(2);
+
+		if (time_after(jiffies, timeout)) {
+			rc = MBXERR_ERROR;
+			goto exit;
+		}
+	} while (!db_ready);
+
+	/* Post the low mailbox dma address to the port. */
+	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
+	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
+				   * 1000) + jiffies;
+	do {
+		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+		if (!db_ready)
+			msleep(2);
+
+		if (time_after(jiffies, timeout)) {
+			rc = MBXERR_ERROR;
+			goto exit;
+		}
+	} while (!db_ready);
+
+	/*
+	 * Read the CQ to ensure the mailbox has completed.
+	 * If so, update the mailbox status so that the upper layers
+	 * can complete the request normally.
+	 */
+	lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
+			      sizeof(struct lpfc_mqe));
+	mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
+	lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
+			      sizeof(struct lpfc_mcqe));
+	mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
+
+	/* Prefix the mailbox status with range x4000 to note SLI4 status. */
+	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+		bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
+		rc = MBXERR_ERROR;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
+			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
+			" x%x x%x CQ: x%x x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0,
+			mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
+			bf_get(lpfc_mqe_status, mb),
+			mb->un.mb_words[0], mb->un.mb_words[1],
+			mb->un.mb_words[2], mb->un.mb_words[3],
+			mb->un.mb_words[4], mb->un.mb_words[5],
+			mb->un.mb_words[6], mb->un.mb_words[7],
+			mb->un.mb_words[8], mb->un.mb_words[9],
+			mb->un.mb_words[10], mb->un.mb_words[11],
+			mb->un.mb_words[12], mboxq->mcqe.word0,
+			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
+			mboxq->mcqe.trailer);
+exit:
+	/* We are holding the token, no needed for lock when release */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	phba->sli.mbox_active = NULL;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return rc;
+}
+
+/**
+ * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code to submit
+ * a mailbox command to firmware with SLI-4 interface spec.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+static int
+lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+		       uint32_t flag)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	unsigned long iflags;
+	int rc;
+
+	/* Detect polling mode and jump to a handler */
+	if (!phba->sli4_hba.intr_enable) {
+		if (flag == MBX_POLL)
+			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+		else
+			rc = -EIO;
+		if (rc != MBX_SUCCESS)
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2541 Mailbox command x%x "
+					"(x%x) cannot issue Data: x%x x%x\n",
+					mboxq->vport ? mboxq->vport->vpi : 0,
+					mboxq->u.mb.mbxCommand,
+					lpfc_sli4_mbox_opcode_get(phba, mboxq),
+					psli->sli_flag, flag);
+		return rc;
+	} else if (flag == MBX_POLL) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2542 Mailbox command x%x (x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli4_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		return -EIO;
+	}
+
+	/* Now, interrupt mode asynchrous mailbox command */
+	rc = lpfc_mbox_cmd_check(phba, mboxq);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2543 Mailbox command x%x (x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli4_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		goto out_not_finished;
+	}
+	rc = lpfc_mbox_dev_check(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2544 Mailbox command x%x (x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli4_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		goto out_not_finished;
+	}
+
+	/* Put the mailbox command to the driver internal FIFO */
+	psli->slistat.mbox_busy++;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	lpfc_mbox_put(phba, mboxq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0354 Mbox cmd issue - Enqueue Data: "
+			"x%x (x%x) x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0xffffff,
+			bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+			lpfc_sli4_mbox_opcode_get(phba, mboxq),
+			phba->pport->port_state,
+			psli->sli_flag, MBX_NOWAIT);
+	/* Wake up worker thread to transport mailbox command from head */
+	lpfc_worker_wake_up(phba);
+
+	return MBX_BUSY;
+
+out_not_finished:
+	return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called by worker thread to send a mailbox command to
+ * SLI4 HBA firmware.
+ *
+ **/
+int
+lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *mboxq;
+	int rc = MBX_SUCCESS;
+	unsigned long iflags;
+	struct lpfc_mqe *mqe;
+	uint32_t mbx_cmnd;
+
+	/* Check interrupt mode before post async mailbox command */
+	if (unlikely(!phba->sli4_hba.intr_enable))
+		return MBX_NOT_FINISHED;
+
+	/* Check for mailbox command service token */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		return MBX_NOT_FINISHED;
+	}
+	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		return MBX_NOT_FINISHED;
+	}
+	if (unlikely(phba->sli.mbox_active)) {
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0384 There is pending active mailbox cmd\n");
+		return MBX_NOT_FINISHED;
+	}
+	/* Take the mailbox command service token */
+	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+	/* Get the next mailbox command from head of queue */
+	mboxq = lpfc_mbox_get(phba);
+
+	/* If no more mailbox command waiting for post, we're done */
+	if (!mboxq) {
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		return MBX_SUCCESS;
+	}
+	phba->sli.mbox_active = mboxq;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	/* Check device readiness for posting mailbox command */
+	rc = lpfc_mbox_dev_check(phba);
+	if (unlikely(rc))
+		/* Driver clean routine will clean up pending mailbox */
+		goto out_not_finished;
+
+	/* Prepare the mbox command to be posted */
+	mqe = &mboxq->u.mqe;
+	mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
+
+	/* Start timer for the mbox_tmo and log some mailbox post messages */
+	mod_timer(&psli->mbox_tmo, (jiffies +
+		  (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
+			"x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+			lpfc_sli4_mbox_opcode_get(phba, mboxq),
+			phba->pport->port_state, psli->sli_flag);
+
+	if (mbx_cmnd != MBX_HEARTBEAT) {
+		if (mboxq->vport) {
+			lpfc_debugfs_disc_trc(mboxq->vport,
+				LPFC_DISC_TRC_MBOX_VPORT,
+				"MBOX Send vport: cmd:x%x mb:x%x x%x",
+				mbx_cmnd, mqe->un.mb_words[0],
+				mqe->un.mb_words[1]);
+		} else {
+			lpfc_debugfs_disc_trc(phba->pport,
+				LPFC_DISC_TRC_MBOX,
+				"MBOX Send: cmd:x%x mb:x%x x%x",
+				mbx_cmnd, mqe->un.mb_words[0],
+				mqe->un.mb_words[1]);
+		}
+	}
+	psli->slistat.mbox_cmd++;
+
+	/* Post the mailbox command to the port */
+	rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2533 Mailbox command x%x (x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli4_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, MBX_NOWAIT);
+		goto out_not_finished;
+	}
+
+	return rc;
+
+out_not_finished:
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+	__lpfc_mbox_cmpl_put(phba, mboxq);
+	/* Release the token */
+	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	phba->sli.mbox_active = NULL;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+int
+lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
+{
+	return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
+}
+
+/**
+ * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the mbox interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
+		phba->lpfc_sli_handle_slow_ring_event =
+				lpfc_sli_handle_slow_ring_event_s3;
+		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
+		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
+		phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
+		phba->lpfc_sli_handle_slow_ring_event =
+				lpfc_sli_handle_slow_ring_event_s4;
+		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
+		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
+		phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1420 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	return 0;
+}
+
 /**
  * __lpfc_sli_ringtx_put - Add an iocb to the txq
  * @phba: Pointer to HBA context object.
@@ -4501,28 +5629,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
 
 	/* Return any active mbox cmds */
 	del_timer_sync(&psli->mbox_tmo);
-	spin_lock_irqsave(&phba->hbalock, flags);
 
-	spin_lock(&phba->pport->work_port_lock);
+	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
-	spin_unlock(&phba->pport->work_port_lock);
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
 
-	/* Return any pending or completed mbox cmds */
-	list_splice_init(&phba->sli.mboxq, &completions);
-	if (psli->mbox_active) {
-		list_add_tail(&psli->mbox_active->list, &completions);
-		psli->mbox_active = NULL;
-		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-	}
-	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
-	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return 1;
+}
+
+/**
+ * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function cleans up all queues, iocb, buffers, mailbox commands while
+ * shutting down the SLI4 HBA FCoE function. This function is called with no
+ * lock held and always returns 1.
+ *
+ * This function does the following to cleanup driver FCoE function resources:
+ * - Free discovery resources for each virtual port
+ * - Cleanup any pending fabric iocbs
+ * - Iterate through the iocb txq and free each entry in the list.
+ * - Free up any buffer posted to the HBA.
+ * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
+ * - Free mailbox commands in the mailbox queue.
+ **/
+int
+lpfc_sli4_hba_down(struct lpfc_hba *phba)
+{
+	/* Stop the SLI4 device port */
+	lpfc_stop_port(phba);
+
+	/* Tear down the queues in the HBA */
+	lpfc_sli4_queue_unset(phba);
+
+	/* unregister default FCFI from the HBA */
+	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
 
-	while (!list_empty(&completions)) {
-		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
-		pmb->mb.mbxStatus = MBX_NOT_FINISHED;
-		if (pmb->mbox_cmpl)
-			pmb->mbox_cmpl(phba,pmb);
-	}
 	return 1;
 }
 
@@ -4853,7 +5995,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	iabt = &abtsiocbp->iocb;
 	iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
 	iabt->un.acxri.abortContextTag = icmd->ulpContext;
-	iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
+	else
+		iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
 	iabt->ulpLe = 1;
 	iabt->ulpClass = icmd->ulpClass;
 
@@ -4869,7 +6014,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			 "abort cmd iotag x%x\n",
 			 iabt->un.acxri.abortContextTag,
 			 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
-	retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
+	retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
 
 	if (retval)
 		__lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -5052,7 +6197,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
 		cmd = &iocbq->iocb;
 		abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
 		abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
-		abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
+		else
+			abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
 		abtsiocb->iocb.ulpLe = 1;
 		abtsiocb->iocb.ulpClass = cmd->ulpClass;
 		abtsiocb->vport = phba->pport;
@@ -5064,7 +6212,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
 
 		/* Setup callback routine and issue the command. */
 		abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
-		ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
+		ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
+					      abtsiocb, 0);
 		if (ret_val == IOCB_ERROR) {
 			lpfc_sli_release_iocbq(phba, abtsiocb);
 			errcnt++;
@@ -5145,7 +6294,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
  **/
 int
 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
-			 struct lpfc_sli_ring *pring,
+			 uint32_t ring_number,
 			 struct lpfc_iocbq *piocb,
 			 struct lpfc_iocbq *prspiocbq,
 			 uint32_t timeout)
@@ -5176,7 +6325,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
 		readl(phba->HCregaddr); /* flush */
 	}
 
-	retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
+	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
 	if (retval == IOCB_SUCCESS) {
 		timeout_req = timeout * HZ;
 		timeleft = wait_event_timeout(done_q,
@@ -5384,6 +6533,58 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
 	return 0;
 }
 
+/**
+ * lpfc_sli4_eratt_read - read sli-4 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI4 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli4_eratt_read(struct lpfc_hba *phba)
+{
+	uint32_t uerr_sta_hi, uerr_sta_lo;
+	uint32_t onlnreg0, onlnreg1;
+
+	/* For now, use the SLI4 device internal unrecoverable error
+	 * registers for error attention. This can be changed later.
+	 */
+	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
+	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
+	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
+		uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
+		uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
+		if (uerr_sta_lo || uerr_sta_hi) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1423 HBA Unrecoverable error: "
+					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+					"online0_reg=0x%x, online1_reg=0x%x\n",
+					uerr_sta_lo, uerr_sta_hi,
+					onlnreg0, onlnreg1);
+			/* TEMP: as the driver error recover logic is not
+			 * fully developed, we just log the error message
+			 * and the device error attention action is now
+			 * temporarily disabled.
+			 */
+			return 0;
+			phba->work_status[0] = uerr_sta_lo;
+			phba->work_status[1] = uerr_sta_hi;
+			spin_lock_irq(&phba->hbalock);
+			/* Set the driver HA work bitmap */
+			phba->work_ha |= HA_ERATT;
+			/* Indicate polling handles this ERATT */
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			spin_unlock_irq(&phba->hbalock);
+			return 1;
+		}
+	}
+	return 0;
+}
+
 /**
  * lpfc_sli_check_eratt - check error attention events
  * @phba: Pointer to HBA context.
@@ -5434,6 +6635,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
 		/* Read chip Host Attention (HA) register */
 		ha_copy = lpfc_sli_eratt_read(phba);
 		break;
+	case LPFC_SLI_REV4:
+		/* Read devcie Uncoverable Error (UERR) registers */
+		ha_copy = lpfc_sli4_eratt_read(phba);
+		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0299 Invalid SLI revision (%d)\n",
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 883938652a6..e6c88ee8ee9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
 	LPFC_CTX_HOST
 } lpfc_ctx_cmd;
 
+/* This structure is used to carry the needed response IOCB states */
+struct lpfc_sli4_rspiocb_info {
+	uint8_t hw_status;
+	uint8_t bfield;
+#define LPFC_XB	0x1
+#define LPFC_PV	0x2
+	uint8_t priority;
+	uint8_t reserved;
+};
+
 /* This structure is used to handle IOCB requests / responses */
 struct lpfc_iocbq {
 	/* lpfc_iocbqs are used in double linked lists */
 	struct list_head list;
 	struct list_head clist;
 	uint16_t iotag;         /* pre-assigned IO tag */
-	uint16_t rsvd1;
+	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
 
 	IOCB_t iocb;		/* IOCB cmd */
 	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
@@ -65,7 +75,7 @@ struct lpfc_iocbq {
 			   struct lpfc_iocbq *);
 	void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
 			   struct lpfc_iocbq *);
-
+	struct lpfc_sli4_rspiocb_info sli4_info;
 };
 
 #define SLI_IOCB_RET_IOCB      1	/* Return IOCB if cmd ring full */
@@ -81,14 +91,18 @@ struct lpfc_iocbq {
 typedef struct lpfcMboxq {
 	/* MBOXQs are used in single linked lists */
 	struct list_head list;	/* ptr to next mailbox command */
-	MAILBOX_t mb;		/* Mailbox cmd */
-	struct lpfc_vport *vport;/* virutal port pointer */
+	union {
+		MAILBOX_t mb;		/* Mailbox cmd */
+		struct lpfc_mqe mqe;
+	} u;
+	struct lpfc_vport *vport;/* virtual port pointer */
 	void *context1;		/* caller context information */
 	void *context2;		/* caller context information */
 
 	void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
 	uint8_t mbox_flag;
-
+	struct lpfc_mcqe mcqe;
+	struct lpfc_mbx_nembed_sge_virt *sge_array;
 } LPFC_MBOXQ_t;
 
 #define MBX_POLL        1	/* poll mailbox till command done, then
@@ -234,6 +248,7 @@ struct lpfc_sli {
 #define LPFC_PROCESS_LA           0x400	/* Able to process link attention */
 #define LPFC_BLOCK_MGMT_IO        0x800	/* Don't allow mgmt mbx or iocb cmds */
 #define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
+#define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
 
 	struct lpfc_sli_ring ring[LPFC_MAX_RING];
 	int fcp_ring;		/* ring used for FCP initiator commands */
@@ -261,6 +276,8 @@ struct lpfc_sli {
 
 #define LPFC_MBOX_TMO           30	/* Sec tmo for outstanding mbox
 					   command */
+#define LPFC_MBOX_SLI4_CONFIG_TMO 60	/* Sec tmo for outstanding mbox
+					   command */
 #define LPFC_MBOX_TMO_FLASH_CMD 300     /* Sec tmo for outstanding FLASH write
 					 * or erase cmds. This is especially
 					 * long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 00000000000..5196b46608d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,467 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LPFC_ACTIVE_MBOX_WAIT_CNT               100
+#define LPFC_RELEASE_NOTIFICATION_INTERVAL	32
+#define LPFC_GET_QE_REL_INT			32
+#define LPFC_RPI_LOW_WATER_MARK			10
+/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
+#define LPFC_NEMBED_MBOX_SGL_CNT		254
+
+/* Multi-queue arrangement for fast-path FCP work queues */
+#define LPFC_FN_EQN_MAX       8
+#define LPFC_SP_EQN_DEF       1
+#define LPFC_FP_EQN_DEF       1
+#define LPFC_FP_EQN_MIN       1
+#define LPFC_FP_EQN_MAX       (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
+
+#define LPFC_FN_WQN_MAX       32
+#define LPFC_SP_WQN_DEF       1
+#define LPFC_FP_WQN_DEF       4
+#define LPFC_FP_WQN_MIN       1
+#define LPFC_FP_WQN_MAX       (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
+
+/*
+ * Provide the default FCF Record attributes used by the driver
+ * when nonFIP mode is configured and there is no other default
+ * FCF Record attributes.
+ */
+#define LPFC_FCOE_FCF_DEF_INDEX	0
+#define LPFC_FCOE_FCF_GET_FIRST	0xFFFF
+#define LPFC_FCOE_FCF_NEXT_NONE	0xFFFF
+
+/* First 3 bytes of default FCF MAC is specified by FC_MAP */
+#define LPFC_FCOE_FCF_MAC3	0xFF
+#define LPFC_FCOE_FCF_MAC4	0xFF
+#define LPFC_FCOE_FCF_MAC5	0xFE
+#define LPFC_FCOE_FCF_MAP0	0x0E
+#define LPFC_FCOE_FCF_MAP1	0xFC
+#define LPFC_FCOE_FCF_MAP2	0x00
+#define LPFC_FCOE_MAX_RCV_SIZE	0x5AC
+#define LPFC_FCOE_FKA_ADV_PER	0
+#define LPFC_FCOE_FIP_PRIORITY	0x80
+
+enum lpfc_sli4_queue_type {
+	LPFC_EQ,
+	LPFC_GCQ,
+	LPFC_MCQ,
+	LPFC_WCQ,
+	LPFC_RCQ,
+	LPFC_MQ,
+	LPFC_WQ,
+	LPFC_HRQ,
+	LPFC_DRQ
+};
+
+/* The queue sub-type defines the functional purpose of the queue */
+enum lpfc_sli4_queue_subtype {
+	LPFC_NONE,
+	LPFC_MBOX,
+	LPFC_FCP,
+	LPFC_ELS,
+	LPFC_USOL
+};
+
+union sli4_qe {
+	void *address;
+	struct lpfc_eqe *eqe;
+	struct lpfc_cqe *cqe;
+	struct lpfc_mcqe *mcqe;
+	struct lpfc_wcqe_complete *wcqe_complete;
+	struct lpfc_wcqe_release *wcqe_release;
+	struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
+	struct lpfc_rcqe_complete *rcqe_complete;
+	struct lpfc_mqe *mqe;
+	union  lpfc_wqe *wqe;
+	struct lpfc_rqe *rqe;
+};
+
+struct lpfc_queue {
+	struct list_head list;
+	enum lpfc_sli4_queue_type type;
+	enum lpfc_sli4_queue_subtype subtype;
+	struct lpfc_hba *phba;
+	struct list_head child_list;
+	uint32_t entry_count;	/* Number of entries to support on the queue */
+	uint32_t entry_size;	/* Size of each queue entry. */
+	uint32_t queue_id;	/* Queue ID assigned by the hardware */
+	struct list_head page_list;
+	uint32_t page_count;	/* Number of pages allocated for this queue */
+
+	uint32_t host_index;	/* The host's index for putting or getting */
+	uint32_t hba_index;	/* The last known hba index for get or put */
+	union sli4_qe qe[1];	/* array to index entries (must be last) */
+};
+
+struct lpfc_cq_event {
+	struct list_head list;
+	union {
+		struct lpfc_mcqe		mcqe_cmpl;
+		struct lpfc_acqe_link		acqe_link;
+		struct lpfc_acqe_fcoe		acqe_fcoe;
+		struct lpfc_acqe_dcbx		acqe_dcbx;
+		struct lpfc_rcqe		rcqe_cmpl;
+		struct sli4_wcqe_xri_aborted	wcqe_axri;
+	} cqe;
+};
+
+struct lpfc_sli4_link {
+	uint8_t speed;
+	uint8_t duplex;
+	uint8_t status;
+	uint8_t physical;
+	uint8_t fault;
+};
+
+struct lpfc_fcf {
+	uint8_t	 fabric_name[8];
+	uint8_t  mac_addr[6];
+	uint16_t fcf_indx;
+	uint16_t fcfi;
+	uint32_t fcf_flag;
+#define FCF_AVAILABLE	0x01 /* FCF available for discovery */
+#define FCF_REGISTERED	0x02 /* FCF registered with FW */
+#define FCF_DISCOVERED	0x04 /* FCF discovery started  */
+#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
+#define FCF_IN_USE	0x10 /* Atleast one discovery completed */
+#define FCF_VALID_VLAN	0x20 /* Use the vlan id specified */
+	uint32_t priority;
+	uint32_t addr_mode;
+	uint16_t vlan_id;
+};
+
+#define LPFC_REGION23_SIGNATURE "RG23"
+#define LPFC_REGION23_VERSION	1
+#define LPFC_REGION23_LAST_REC  0xff
+struct lpfc_fip_param_hdr {
+	uint8_t type;
+#define FCOE_PARAM_TYPE		0xA0
+	uint8_t length;
+#define FCOE_PARAM_LENGTH	2
+	uint8_t parm_version;
+#define FIPP_VERSION		0x01
+	uint8_t parm_flags;
+#define	lpfc_fip_param_hdr_fipp_mode_SHIFT	6
+#define	lpfc_fip_param_hdr_fipp_mode_MASK	0x3
+#define lpfc_fip_param_hdr_fipp_mode_WORD	parm_flags
+#define	FIPP_MODE_ON				0x2
+#define	FIPP_MODE_OFF				0x0
+#define FIPP_VLAN_VALID				0x1
+};
+
+struct lpfc_fcoe_params {
+	uint8_t fc_map[3];
+	uint8_t reserved1;
+	uint16_t vlan_tag;
+	uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_hdr {
+	uint8_t type;
+#define FCOE_CONN_TBL_TYPE		0xA1
+	uint8_t length;   /* words */
+	uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_rec {
+	uint16_t flags;
+#define	FCFCNCT_VALID		0x0001
+#define	FCFCNCT_BOOT		0x0002
+#define	FCFCNCT_PRIMARY		0x0004   /* if not set, Secondary */
+#define	FCFCNCT_FBNM_VALID	0x0008
+#define	FCFCNCT_SWNM_VALID	0x0010
+#define	FCFCNCT_VLAN_VALID	0x0020
+#define	FCFCNCT_AM_VALID	0x0040
+#define	FCFCNCT_AM_PREFERRED	0x0080   /* if not set, AM Required */
+#define	FCFCNCT_AM_SPMA		0x0100	 /* if not set, FPMA */
+
+	uint16_t vlan_tag;
+	uint8_t fabric_name[8];
+	uint8_t switch_name[8];
+};
+
+struct lpfc_fcf_conn_entry {
+	struct list_head list;
+	struct lpfc_fcf_conn_rec conn_rec;
+};
+
+/*
+ * Define the host's bootstrap mailbox.  This structure contains
+ * the member attributes needed to create, use, and destroy the
+ * bootstrap mailbox region.
+ *
+ * The macro definitions for the bmbx data structure are defined
+ * in lpfc_hw4.h with the register definition.
+ */
+struct lpfc_bmbx {
+	struct lpfc_dmabuf *dmabuf;
+	struct dma_address dma_address;
+	void *avirt;
+	dma_addr_t aphys;
+	uint32_t bmbx_size;
+};
+
+#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
+
+#define LPFC_EQE_SIZE_4B 	4
+#define LPFC_EQE_SIZE_16B	16
+#define LPFC_CQE_SIZE		16
+#define LPFC_WQE_SIZE		64
+#define LPFC_MQE_SIZE		256
+#define LPFC_RQE_SIZE		8
+
+#define LPFC_EQE_DEF_COUNT	1024
+#define LPFC_CQE_DEF_COUNT      256
+#define LPFC_WQE_DEF_COUNT      64
+#define LPFC_MQE_DEF_COUNT      16
+#define LPFC_RQE_DEF_COUNT	512
+
+#define LPFC_QUEUE_NOARM	false
+#define LPFC_QUEUE_REARM	true
+
+
+/*
+ * SLI4 CT field defines
+ */
+#define SLI4_CT_RPI 0
+#define SLI4_CT_VPI 1
+#define SLI4_CT_VFI 2
+#define SLI4_CT_FCFI 3
+
+#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
+
+/*
+ * SLI4 specific data structures
+ */
+struct lpfc_max_cfg_param {
+	uint16_t max_xri;
+	uint16_t xri_base;
+	uint16_t xri_used;
+	uint16_t max_rpi;
+	uint16_t rpi_base;
+	uint16_t rpi_used;
+	uint16_t max_vpi;
+	uint16_t vpi_base;
+	uint16_t vpi_used;
+	uint16_t max_vfi;
+	uint16_t vfi_base;
+	uint16_t vfi_used;
+	uint16_t max_fcfi;
+	uint16_t fcfi_base;
+	uint16_t fcfi_used;
+	uint16_t max_eq;
+	uint16_t max_rq;
+	uint16_t max_cq;
+	uint16_t max_wq;
+};
+
+struct lpfc_hba;
+/* SLI4 HBA multi-fcp queue handler struct */
+struct lpfc_fcp_eq_hdl {
+	uint32_t idx;
+	struct lpfc_hba *phba;
+};
+
+/* SLI4 HBA data structure entries */
+struct lpfc_sli4_hba {
+	void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR0, config space registers */
+	void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR1, control registers */
+	void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR2, doorbell registers */
+	/* BAR0 PCI config space register memory map */
+	void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
+	void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
+	void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
+	void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
+#define LPFC_ONLINE_NERR	0xFFFFFFFF
+	void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
+	/* BAR1 FCoE function CSR register memory map */
+	void __iomem *STAregaddr;    /* Address to HST_STATE register */
+	void __iomem *ISRregaddr;    /* Address to HST_ISR register */
+	void __iomem *IMRregaddr;    /* Address to HST_IMR register */
+	void __iomem *ISCRregaddr;   /* Address to HST_ISCR register */
+	/* BAR2 VF-0 doorbell register memory map */
+	void __iomem *RQDBregaddr;   /* Address to RQ_DOORBELL register */
+	void __iomem *WQDBregaddr;   /* Address to WQ_DOORBELL register */
+	void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
+	void __iomem *MQDBregaddr;   /* Address to MQ_DOORBELL register */
+	void __iomem *BMBXregaddr;   /* Address to BootStrap MBX register */
+
+	struct msix_entry *msix_entries;
+	uint32_t cfg_eqn;
+	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
+	/* Pointers to the constructed SLI4 queues */
+	struct lpfc_queue **fp_eq; /* Fast-path event queue */
+	struct lpfc_queue *sp_eq;  /* Slow-path event queue */
+	struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
+	struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
+	struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
+	struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
+	struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+	struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
+	struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
+	struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
+	struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
+
+	/* Setup information for various queue parameters */
+	int eq_esize;
+	int eq_ecount;
+	int cq_esize;
+	int cq_ecount;
+	int wq_esize;
+	int wq_ecount;
+	int mq_esize;
+	int mq_ecount;
+	int rq_esize;
+	int rq_ecount;
+#define LPFC_SP_EQ_MAX_INTR_SEC         10000
+#define LPFC_FP_EQ_MAX_INTR_SEC         10000
+
+	uint32_t intr_enable;
+	struct lpfc_bmbx bmbx;
+	struct lpfc_max_cfg_param max_cfg_param;
+	uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
+	uint16_t next_rpi;
+	uint16_t scsi_xri_max;
+	uint16_t scsi_xri_cnt;
+	struct list_head lpfc_free_sgl_list;
+	struct list_head lpfc_sgl_list;
+	struct lpfc_sglq **lpfc_els_sgl_array;
+	struct list_head lpfc_abts_els_sgl_list;
+	struct lpfc_scsi_buf **lpfc_scsi_psb_array;
+	struct list_head lpfc_abts_scsi_buf_list;
+	uint32_t total_sglq_bufs;
+	struct lpfc_sglq **lpfc_sglq_active_list;
+	struct list_head lpfc_rpi_hdr_list;
+	unsigned long *rpi_bmask;
+	uint16_t rpi_count;
+	struct lpfc_sli4_flags sli4_flags;
+	struct list_head sp_rspiocb_work_queue;
+	struct list_head sp_cqe_event_pool;
+	struct list_head sp_asynce_work_queue;
+	struct list_head sp_fcp_xri_aborted_work_queue;
+	struct list_head sp_els_xri_aborted_work_queue;
+	struct list_head sp_unsol_work_queue;
+	struct lpfc_sli4_link link_state;
+	spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
+	spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+};
+
+enum lpfc_sge_type {
+	GEN_BUFF_TYPE,
+	SCSI_BUFF_TYPE
+};
+
+struct lpfc_sglq {
+	/* lpfc_sglqs are used in double linked lists */
+	struct list_head list;
+	struct list_head clist;
+	enum lpfc_sge_type buff_type; /* is this a scsi sgl */
+	uint16_t iotag;         /* pre-assigned IO tag */
+	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
+	struct sli4_sge *sgl;	/* pre-assigned SGL */
+	void *virt;		/* virtual address. */
+	dma_addr_t phys;	/* physical address */
+};
+
+struct lpfc_rpi_hdr {
+	struct list_head list;
+	uint32_t len;
+	struct lpfc_dmabuf *dmabuf;
+	uint32_t page_count;
+	uint32_t start_rpi;
+};
+
+/*
+ * SLI4 specific function prototypes
+ */
+int lpfc_pci_function_reset(struct lpfc_hba *);
+int lpfc_sli4_hba_setup(struct lpfc_hba *);
+int lpfc_sli4_hba_down(struct lpfc_hba *);
+int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
+		     uint8_t, uint32_t, bool);
+void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
+void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
+			   struct lpfc_mbx_sge *);
+
+void lpfc_sli4_hba_reset(struct lpfc_hba *);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
+			uint32_t);
+void lpfc_sli4_queue_free(struct lpfc_queue *);
+uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
+uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t, uint32_t);
+uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t);
+uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t);
+uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, struct lpfc_queue *, uint32_t);
+uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
+			 struct lpfc_queue *);
+int lpfc_sli4_queue_setup(struct lpfc_hba *);
+void lpfc_sli4_queue_unset(struct lpfc_hba *);
+int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
+int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
+int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
+uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
+int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
+int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
+struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
+void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
+void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
+void lpfc_sli4_remove_rpis(struct lpfc_hba *);
+void lpfc_sli4_async_event_proc(struct lpfc_hba *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
+			       struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
+			       struct sli4_wcqe_xri_aborted *);
+int lpfc_sli4_brdreset(struct lpfc_hba *);
+int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
+void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
+int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
+int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
+uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
+uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
+void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
+void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_post_status_check(struct lpfc_hba *);
+uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
+
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 917ad56b0af..59e67f7ee53 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -32,8 +32,10 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
 		vpi = 0;
 	else
 		set_bit(vpi, phba->vpi_bmask);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->sli4_hba.max_cfg_param.vpi_used++;
 	spin_unlock_irq(&phba->hbalock);
 	return vpi;
 }
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
 static void
 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
 {
+	if (vpi == 0)
+		return;
 	spin_lock_irq(&phba->hbalock);
 	clear_bit(vpi, phba->vpi_bmask);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->sli4_hba.max_cfg_param.vpi_used--;
 	spin_unlock_irq(&phba->hbalock);
 }
 
@@ -308,6 +316,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 		goto error_out;
 	}
 
+	/*
+	 * In SLI4, the vpi must be activated before it can be used
+	 * by the port.
+	 */
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rc = lpfc_sli4_init_vpi(phba, vpi);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+					"1838 Failed to INIT_VPI on vpi %d "
+					"status %d\n", vpi, rc);
+			rc = VPORT_NORESOURCES;
+			lpfc_free_vpi(phba, vpi);
+			goto error_out;
+		}
+	}
 
 	/* Assign an unused board number */
 	if ((instance = lpfc_get_instance()) < 0) {
-- 
cgit v1.2.3-70-g09d2


From 4f774513f7b3fe96648b8936f60f835e6ceaa88e Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Fri, 22 May 2009 14:52:35 -0400
Subject: [SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - Queues

Adds support for the new queues in the SLI-4 interface.  There are :
- Work Queues - host-to-adapter for fast-path traffic
- Mailbox Queues - host-to-adapter for control (slow-path)
- Buffer Queues - host-to-adapter for posting buffers for async receive
- Completion Queues - adapter-to-host for posting async events,
       completions for fast or slow patch work, receipt of async
       receive traffic
- Event Queues - tied to MSI-X vectors, binds completion queues with
       interrupts

These patches add the all the support code to tie into command submission
and response paths, updates the interrupt handling, etc.

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc_sli.c | 3532 +++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 3457 insertions(+), 75 deletions(-)

(limited to 'drivers/scsi/lpfc')

diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 706bb22a6e8..cf42ada3ffc 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -70,6 +70,350 @@ typedef enum _lpfc_iocb_type {
 	LPFC_ABORT_IOCB
 } lpfc_iocb_type;
 
+
+/* Provide function prototypes local to this module. */
+static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
+				  uint32_t);
+static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
+			    uint8_t *, uint32_t *);
+
+static IOCB_t *
+lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
+{
+	return &iocbq->iocb;
+}
+
+/**
+ * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
+ * @q: The Work Queue to operate on.
+ * @wqe: The work Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
+{
+	union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
+	struct lpfc_register doorbell;
+	uint32_t host_index;
+
+	/* If the host has not yet processed the next entry then we are done */
+	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+		return -ENOMEM;
+	/* set consumption flag every once in a while */
+	if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
+		bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
+
+	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+
+	/* Update the host index before invoking device */
+	host_index = q->host_index;
+	q->host_index = ((q->host_index + 1) % q->entry_count);
+
+	/* Ring Doorbell */
+	doorbell.word0 = 0;
+	bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
+	bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
+	bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
+	readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_wq_release - Updates internal hba index for WQ
+ * @q: The Work Queue to operate on.
+ * @index: The index to advance the hba index to.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
+{
+	uint32_t released = 0;
+
+	if (q->hba_index == index)
+		return 0;
+	do {
+		q->hba_index = ((q->hba_index + 1) % q->entry_count);
+		released++;
+	} while (q->hba_index != index);
+	return released;
+}
+
+/**
+ * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
+ * @q: The Mailbox Queue to operate on.
+ * @wqe: The Mailbox Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @mqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
+{
+	struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
+	struct lpfc_register doorbell;
+	uint32_t host_index;
+
+	/* If the host has not yet processed the next entry then we are done */
+	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+		return -ENOMEM;
+	lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
+	/* Save off the mailbox pointer for completion */
+	q->phba->mbox = (MAILBOX_t *)temp_mqe;
+
+	/* Update the host index before invoking device */
+	host_index = q->host_index;
+	q->host_index = ((q->host_index + 1) % q->entry_count);
+
+	/* Ring Doorbell */
+	doorbell.word0 = 0;
+	bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
+	bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
+	readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
+	return 0;
+}
+
+/**
+ * lpfc_sli4_mq_release - Updates internal hba index for MQ
+ * @q: The Mailbox Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_mq_release(struct lpfc_queue *q)
+{
+	/* Clear the mailbox pointer for completion */
+	q->phba->mbox = NULL;
+	q->hba_index = ((q->hba_index + 1) % q->entry_count);
+	return 1;
+}
+
+/**
+ * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
+ * @q: The Event Queue to get the first valid EQE from
+ *
+ * This routine will get the first valid Event Queue Entry from @q, update
+ * the queue's internal hba index, and return the EQE. If no valid EQEs are in
+ * the Queue (no more work to do), or the Queue is full of EQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_eqe *
+lpfc_sli4_eq_get(struct lpfc_queue *q)
+{
+	struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
+
+	/* If the next EQE is not valid then we are done */
+	if (!bf_get(lpfc_eqe_valid, eqe))
+		return NULL;
+	/* If the host has not yet processed the next entry then we are done */
+	if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+		return NULL;
+
+	q->hba_index = ((q->hba_index + 1) % q->entry_count);
+	return eqe;
+}
+
+/**
+ * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
+ * @q: The Event Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Event Queue Entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of EQEs that were popped.
+ **/
+uint32_t
+lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
+{
+	uint32_t released = 0;
+	struct lpfc_eqe *temp_eqe;
+	struct lpfc_register doorbell;
+
+	/* while there are valid entries */
+	while (q->hba_index != q->host_index) {
+		temp_eqe = q->qe[q->host_index].eqe;
+		bf_set(lpfc_eqe_valid, temp_eqe, 0);
+		released++;
+		q->host_index = ((q->host_index + 1) % q->entry_count);
+	}
+	if (unlikely(released == 0 && !arm))
+		return 0;
+
+	/* ring doorbell for number popped */
+	doorbell.word0 = 0;
+	if (arm) {
+		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+	}
+	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+	bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+	return released;
+}
+
+/**
+ * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
+ * @q: The Completion Queue to get the first valid CQE from
+ *
+ * This routine will get the first valid Completion Queue Entry from @q, update
+ * the queue's internal hba index, and return the CQE. If no valid CQEs are in
+ * the Queue (no more work to do), or the Queue is full of CQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_cqe *
+lpfc_sli4_cq_get(struct lpfc_queue *q)
+{
+	struct lpfc_cqe *cqe;
+
+	/* If the next CQE is not valid then we are done */
+	if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+		return NULL;
+	/* If the host has not yet processed the next entry then we are done */
+	if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+		return NULL;
+
+	cqe = q->qe[q->hba_index].cqe;
+	q->hba_index = ((q->hba_index + 1) % q->entry_count);
+	return cqe;
+}
+
+/**
+ * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
+ * @q: The Completion Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Completion queue entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of CQEs that were released.
+ **/
+uint32_t
+lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
+{
+	uint32_t released = 0;
+	struct lpfc_cqe *temp_qe;
+	struct lpfc_register doorbell;
+
+	/* while there are valid entries */
+	while (q->hba_index != q->host_index) {
+		temp_qe = q->qe[q->host_index].cqe;
+		bf_set(lpfc_cqe_valid, temp_qe, 0);
+		released++;
+		q->host_index = ((q->host_index + 1) % q->entry_count);
+	}
+	if (unlikely(released == 0 && !arm))
+		return 0;
+
+	/* ring doorbell for number popped */
+	doorbell.word0 = 0;
+	if (arm)
+		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
+	bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+	return released;
+}
+
+/**
+ * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
+ * @q: The Header Receive Queue to operate on.
+ * @wqe: The Receive Queue Entry to put on the Receive queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Receive Queue Doorbell to signal the
+ * HBA to start processing the Receive Queue Entry. This function returns the
+ * index that the rqe was copied to if successful. If no entries are available
+ * on @q then this function will return -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static int
+lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
+		 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
+{
+	struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
+	struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
+	struct lpfc_register doorbell;
+	int put_index = hq->host_index;
+
+	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
+		return -EINVAL;
+	if (hq->host_index != dq->host_index)
+		return -EINVAL;
+	/* If the host has not yet processed the next entry then we are done */
+	if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
+		return -EBUSY;
+	lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
+	lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
+
+	/* Update the host index to point to the next slot */
+	hq->host_index = ((hq->host_index + 1) % hq->entry_count);
+	dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+
+	/* Ring The Header Receive Queue Doorbell */
+	if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
+		doorbell.word0 = 0;
+		bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
+		       LPFC_RQ_POST_BATCH);
+		bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
+		writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
+	}
+	return put_index;
+}
+
+/**
+ * lpfc_sli4_rq_release - Updates internal hba index for RQ
+ * @q: The Header Receive Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * one Receive Queue Entry by the HBA. When the HBA indicates that it has
+ * consumed an entry the host calls this function to update the queue's
+ * internal pointers. This routine returns the number of entries that were
+ * consumed by the HBA.
+ **/
+static uint32_t
+lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
+{
+	if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
+		return 0;
+	hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
+	dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
+	return 1;
+}
+
 /**
  * lpfc_cmd_iocb - Get next command iocb entry in the ring
  * @phba: Pointer to HBA context object.
@@ -214,6 +558,59 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
 	return iocbq;
 }
 
+/**
+ * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ * The sqlq structure that holds the xritag and phys and virtual
+ * mappings for the scatter gather list is retrieved from the
+ * active array of sglq. The get of the sglq pointer also clears
+ * the entry in the array. If the status of the IO indiactes that
+ * this IO was aborted then the sglq entry it put on the
+ * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
+ * IO has good status or fails for any other reason then the sglq
+ * entry is added to the free list (lpfc_sgl_list).
+ **/
+static void
+__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	struct lpfc_sglq *sglq;
+	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+	unsigned long iflag;
+
+	if (iocbq->sli4_xritag == NO_XRI)
+		sglq = NULL;
+	else
+		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
+	if (sglq)  {
+		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
+			|| ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+			&& (iocbq->iocb.un.ulpWord[4]
+				== IOERR_SLI_ABORTED))) {
+			spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
+					iflag);
+			list_add(&sglq->list,
+				&phba->sli4_hba.lpfc_abts_els_sgl_list);
+			spin_unlock_irqrestore(
+				&phba->sli4_hba.abts_sgl_list_lock, iflag);
+		} else
+			list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+	}
+
+
+	/*
+	 * Clean all volatile data fields, preserve iotag and node struct.
+	 */
+	memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+	iocbq->sli4_xritag = NO_XRI;
+	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
+}
+
 /**
  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
  * @phba: Pointer to HBA context object.
@@ -959,6 +1356,37 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
 		return -ENOMEM;
 }
 
+/**
+ * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post an RQE to the SLI4
+ * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
+ * the hbq_buffer_list and return zero, otherwise it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
+			    struct hbq_dmabuf *hbq_buf)
+{
+	int rc;
+	struct lpfc_rqe hrqe;
+	struct lpfc_rqe drqe;
+
+	hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
+	hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
+	drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
+	drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
+	rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+			      &hrqe, &drqe);
+	if (rc < 0)
+		return rc;
+	hbq_buf->tag = rc;
+	list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
+	return 0;
+}
+
 /* HBQ for ELS and CT traffic. */
 static struct lpfc_hbq_init lpfc_els_hbq = {
 	.rn = 1,
@@ -2574,6 +3002,36 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
 	return;
 }
 
+/**
+ * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a pending
+ * ELS response iocb on the driver internal slow-path response iocb worker
+ * queue. The caller does not hold any lock. The function will remove each
+ * response iocb from the response worker queue and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
+				   struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	struct lpfc_iocbq *irspiocbq;
+	unsigned long iflag;
+
+	while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
+		/* Get the response iocb from the head of work queue */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
+				 irspiocbq, struct lpfc_iocbq, list);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		/* Process the response iocb */
+		lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
+	}
+}
+
 /**
  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
  * @phba: Pointer to HBA context object.
@@ -3375,6 +3833,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
 	return 0;
 }
 
+/**
+ * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
+static int
+lpfc_sli4_rb_setup(struct lpfc_hba *phba)
+{
+	phba->hbq_in_use = 1;
+	phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
+	phba->hbq_count = 1;
+	/* Initially populate or replenish the HBQs */
+	lpfc_sli_hbqbuf_init_hbqs(phba, 0);
+	return 0;
+}
+
 /**
  * lpfc_sli_config_port - Issue config port mailbox command
  * @phba: Pointer to HBA context object.
@@ -5130,104 +5608,550 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
 }
 
 /**
- * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
- *
- * This routine wraps the actual lockless version for issusing IOCB function
- * pointer from the lpfc_hba struct.
- *
- * Return codes:
- * 	IOCB_ERROR - Error
- * 	IOCB_SUCCESS - Success
- * 	IOCB_BUSY - Busy
- **/
-static inline int
-__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
-		struct lpfc_iocbq *piocb, uint32_t flag)
-{
-	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
-}
-
-/**
- * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
- * @phba: The hba struct for which this call is being executed.
- * @dev_grp: The HBA PCI-Device group number.
- *
- * This routine sets up the SLI interface API function jump table in @phba
- * struct.
- * Returns: 0 - success, -ENODEV - failure.
- **/
-int
-lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+ * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @sglq: Pointer to the scatter gather queue object.
+ *
+ * This routine converts the bpl or bde that is in the IOCB
+ * to a sgl list for the sli4 hardware. The physical address
+ * of the bpl/bde is converted back to a virtual address.
+ * If the IOCB contains a BPL then the list of BDE's is
+ * converted to sli4_sge's. If the IOCB contains a single
+ * BDE then it is converted to a single sli_sge.
+ * The IOCB is still in cpu endianess so the contents of
+ * the bpl can be used without byte swapping.
+ *
+ * Returns valid XRI = Success, NO_XRI = Failure.
+**/
+static uint16_t
+lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+		struct lpfc_sglq *sglq)
 {
+	uint16_t xritag = NO_XRI;
+	struct ulp_bde64 *bpl = NULL;
+	struct ulp_bde64 bde;
+	struct sli4_sge *sgl  = NULL;
+	IOCB_t *icmd;
+	int numBdes = 0;
+	int i = 0;
 
-	switch (dev_grp) {
-	case LPFC_PCI_DEV_LP:
-		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
-		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
-		break;
-	default:
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1419 Invalid HBA PCI-device group: 0x%x\n",
-				dev_grp);
-		return -ENODEV;
-		break;
-	}
-	phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
-	return 0;
+	if (!piocbq || !sglq)
+		return xritag;
+
+	sgl  = (struct sli4_sge *)sglq->sgl;
+	icmd = &piocbq->iocb;
+	if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+		numBdes = icmd->un.genreq64.bdl.bdeSize /
+				sizeof(struct ulp_bde64);
+		/* The addrHigh and addrLow fields within the IOCB
+		 * have not been byteswapped yet so there is no
+		 * need to swap them back.
+		 */
+		bpl  = (struct ulp_bde64 *)
+			((struct lpfc_dmabuf *)piocbq->context3)->virt;
+
+		if (!bpl)
+			return xritag;
+
+		for (i = 0; i < numBdes; i++) {
+			/* Should already be byte swapped. */
+			sgl->addr_hi =  bpl->addrHigh;
+			sgl->addr_lo =  bpl->addrLow;
+			/* swap the size field back to the cpu so we
+			 * can assign it to the sgl.
+			 */
+			bde.tus.w  = le32_to_cpu(bpl->tus.w);
+			bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
+			if ((i+1) == numBdes)
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+			else
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->word3 = cpu_to_le32(sgl->word3);
+			bpl++;
+			sgl++;
+		}
+	} else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
+			/* The addrHigh and addrLow fields of the BDE have not
+			 * been byteswapped yet so they need to be swapped
+			 * before putting them in the sgl.
+			 */
+			sgl->addr_hi =
+				cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
+			sgl->addr_lo =
+				cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+			bf_set(lpfc_sli4_sge_len, sgl,
+				icmd->un.genreq64.bdl.bdeSize);
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->word3 = cpu_to_le32(sgl->word3);
+	}
+	return sglq->sli4_xritag;
 }
 
 /**
- * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
+ * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
  * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
  * @piocb: Pointer to command iocb.
- * @flag: Flag indicating if this command can be put into txq.
  *
- * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
- * function. This function gets the hbalock and calls
- * __lpfc_sli_issue_iocb function and will return the error returned
- * by __lpfc_sli_issue_iocb function. This wrapper is used by
- * functions which do not hold hbalock.
+ * This routine performs a round robin SCSI command to SLI4 FCP WQ index
+ * distribution.
+ *
+ * Return: index into SLI4 fast-path FCP queue index.
  **/
-int
-lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
-		    struct lpfc_iocbq *piocb, uint32_t flag)
+static uint32_t
+lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
 {
-	unsigned long iflags;
-	int rc;
-
-	spin_lock_irqsave(&phba->hbalock, iflags);
-	rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	static uint32_t fcp_qidx;
 
-	return rc;
+	return fcp_qidx++ % phba->cfg_fcp_wq_count;
 }
 
 /**
- * lpfc_extra_ring_setup - Extra ring setup function
+ * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
  * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @wqe: Pointer to the work queue entry.
  *
- * This function is called while driver attaches with the
- * HBA to setup the extra ring. The extra ring is used
- * only when driver needs to support target mode functionality
- * or IP over FC functionalities.
+ * This routine converts the iocb command to its Work Queue Entry
+ * equivalent. The wqe pointer should not have any fields set when
+ * this routine is called because it will memcpy over them.
+ * This routine does not set the CQ_ID or the WQEC bits in the
+ * wqe.
  *
- * This function is called with no lock held.
+ * Returns: 0 = Success, IOCB_ERROR = Failure.
  **/
 static int
-lpfc_extra_ring_setup( struct lpfc_hba *phba)
+lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
+		union lpfc_wqe *wqe)
 {
-	struct lpfc_sli *psli;
-	struct lpfc_sli_ring *pring;
+	uint32_t payload_len = 0;
+	uint8_t ct = 0;
+	uint32_t fip;
+	uint32_t abort_tag;
+	uint8_t command_type = ELS_COMMAND_NON_FIP;
+	uint8_t cmnd;
+	uint16_t xritag;
+	struct ulp_bde64 *bpl = NULL;
+
+	fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
+	/* The fcp commands will set command type */
+	if ((!(iocbq->iocb_flag &  LPFC_IO_FCP)) && (!fip))
+		command_type = ELS_COMMAND_NON_FIP;
+	else if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
+		command_type = ELS_COMMAND_FIP;
+	else if (iocbq->iocb_flag &  LPFC_IO_FCP)
+		command_type = FCP_COMMAND;
+	else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2019 Invalid cmd 0x%x\n",
+			iocbq->iocb.ulpCommand);
+		return IOCB_ERROR;
+	}
+	/* Some of the fields are in the right position already */
+	memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
+	abort_tag = (uint32_t) iocbq->iotag;
+	xritag = iocbq->sli4_xritag;
+	wqe->words[7] = 0; /* The ct field has moved so reset */
+	/* words0-2 bpl convert bde */
+	if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+		bpl  = (struct ulp_bde64 *)
+			((struct lpfc_dmabuf *)iocbq->context3)->virt;
+		if (!bpl)
+			return IOCB_ERROR;
 
-	psli = &phba->sli;
+		/* Should already be byte swapped. */
+		wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
+		wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
+		/* swap the size field back to the cpu so we
+		 * can assign it to the sgl.
+		 */
+		wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
+		payload_len = wqe->generic.bde.tus.f.bdeSize;
+	} else
+		payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
 
-	/* Adjust cmd/rsp ring iocb entries more evenly */
+	iocbq->iocb.ulpIoTag = iocbq->iotag;
+	cmnd = iocbq->iocb.ulpCommand;
 
-	/* Take some away from the FCP ring */
-	pring = &psli->ring[psli->fcp_ring];
-	pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
-	pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+	switch (iocbq->iocb.ulpCommand) {
+	case CMD_ELS_REQUEST64_CR:
+		if (!iocbq->iocb.ulpLe) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2007 Only Limited Edition cmd Format"
+				" supported 0x%x\n",
+				iocbq->iocb.ulpCommand);
+			return IOCB_ERROR;
+		}
+		wqe->els_req.payload_len = payload_len;
+		/* Els_reguest64 has a TMO */
+		bf_set(wqe_tmo, &wqe->els_req.wqe_com,
+			iocbq->iocb.ulpTimeout);
+		/* Need a VF for word 4 set the vf bit*/
+		bf_set(els_req64_vf, &wqe->els_req, 0);
+		/* And a VFID for word 12 */
+		bf_set(els_req64_vfid, &wqe->els_req, 0);
+		/*
+		 * Set ct field to 3, indicates that the context_tag field
+		 * contains the FCFI and remote N_Port_ID is
+		 * in word 5.
+		 */
+
+		ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+		bf_set(lpfc_wqe_gen_context, &wqe->generic,
+				iocbq->iocb.ulpContext);
+
+		if (iocbq->vport->fc_myDID != 0) {
+			bf_set(els_req64_sid, &wqe->els_req,
+				 iocbq->vport->fc_myDID);
+			bf_set(els_req64_sp, &wqe->els_req, 1);
+		}
+		bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
+		bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+		/* CCP CCPE PV PRI in word10 were set in the memcpy */
+	break;
+	case CMD_XMIT_SEQUENCE64_CR:
+		/* word3 iocb=io_tag32 wqe=payload_offset */
+		/* payload offset used for multilpe outstanding
+		 * sequences on the same exchange
+		 */
+		wqe->words[3] = 0;
+		/* word4 relative_offset memcpy */
+		/* word5 r_ctl/df_ctl memcpy */
+		bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+		wqe->xmit_sequence.xmit_len = payload_len;
+	break;
+	case CMD_XMIT_BCAST64_CN:
+		/* word3 iocb=iotag32 wqe=payload_len */
+		wqe->words[3] = 0; /* no definition for this in wqe */
+		/* word4 iocb=rsvd wqe=rsvd */
+		/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
+		/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
+		bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+	break;
+	case CMD_FCP_IWRITE64_CR:
+		command_type = FCP_COMMAND_DATA_OUT;
+		/* The struct for wqe fcp_iwrite has 3 fields that are somewhat
+		 * confusing.
+		 * word3 is payload_len: byte offset to the sgl entry for the
+		 * fcp_command.
+		 * word4 is total xfer len, same as the IOCB->ulpParameter.
+		 * word5 is initial xfer len 0 = wait for xfer-ready
+		 */
+
+		/* Always wait for xfer-ready before sending data */
+		wqe->fcp_iwrite.initial_xfer_len = 0;
+		/* word 4 (xfer length) should have been set on the memcpy */
+
+	/* allow write to fall through to read */
+	case CMD_FCP_IREAD64_CR:
+		/* FCP_CMD is always the 1st sgl entry */
+		wqe->fcp_iread.payload_len =
+			payload_len + sizeof(struct fcp_rsp);
+
+		/* word 4 (xfer length) should have been set on the memcpy */
+
+		bf_set(lpfc_wqe_gen_erp, &wqe->generic,
+			iocbq->iocb.ulpFCP2Rcvy);
+		bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
+		/* The XC bit and the XS bit are similar. The driver never
+		 * tracked whether or not the exchange was previouslly open.
+		 * XC = Exchange create, 0 is create. 1 is already open.
+		 * XS = link cmd: 1 do not close the exchange after command.
+		 * XS = 0 close exchange when command completes.
+		 * The only time we would not set the XC bit is when the XS bit
+		 * is set and we are sending our 2nd or greater command on
+		 * this exchange.
+		 */
+
+	/* ALLOW read & write to fall through to ICMD64 */
+	case CMD_FCP_ICMND64_CR:
+		/* Always open the exchange */
+		bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
+
+		wqe->words[10] &= 0xffff0000; /* zero out ebde count */
+		bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+	break;
+	case CMD_GEN_REQUEST64_CR:
+		/* word3 command length is described as byte offset to the
+		 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
+		 * sgl[0] = cmnd
+		 * sgl[1] = rsp.
+		 *
+		 */
+		wqe->gen_req.command_len = payload_len;
+		/* Word4 parameter  copied in the memcpy */
+		/* Word5 [rctl, type, df_ctl, la] copied in memcpy */
+		/* word6 context tag copied in memcpy */
+		if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
+			ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2015 Invalid CT %x command 0x%x\n",
+				ct, iocbq->iocb.ulpCommand);
+			return IOCB_ERROR;
+		}
+		bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
+		bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
+			iocbq->iocb.ulpTimeout);
+
+		bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+		command_type = OTHER_COMMAND;
+	break;
+	case CMD_XMIT_ELS_RSP64_CX:
+		/* words0-2 BDE memcpy */
+		/* word3 iocb=iotag32 wqe=rsvd */
+		wqe->words[3] = 0;
+		/* word4 iocb=did wge=rsvd. */
+		wqe->words[4] = 0;
+		/* word5 iocb=rsvd wge=did */
+		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+			 iocbq->iocb.un.elsreq64.remoteID);
+
+		bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+
+		bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+		bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+		if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
+			bf_set(lpfc_wqe_gen_context, &wqe->generic,
+			       iocbq->vport->vpi + phba->vpi_base);
+		command_type = OTHER_COMMAND;
+	break;
+	case CMD_CLOSE_XRI_CN:
+	case CMD_ABORT_XRI_CN:
+	case CMD_ABORT_XRI_CX:
+		/* words 0-2 memcpy should be 0 rserved */
+		/* port will send abts */
+		if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+			/*
+			 * The link is down so the fw does not need to send abts
+			 * on the wire.
+			 */
+			bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+		else
+			bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+		bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+		abort_tag = iocbq->iocb.un.acxri.abortIoTag;
+		wqe->words[5] = 0;
+		bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+		abort_tag = iocbq->iocb.un.acxri.abortIoTag;
+		wqe->generic.abort_tag = abort_tag;
+		/*
+		 * The abort handler will send us CMD_ABORT_XRI_CN or
+		 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
+		 */
+		bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
+		cmnd = CMD_ABORT_XRI_CX;
+		command_type = OTHER_COMMAND;
+		xritag = 0;
+	break;
+	case CMD_XRI_ABORTED_CX:
+	case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
+		/* words0-2 are all 0's no bde */
+		/* word3 and word4 are rsvrd */
+		wqe->words[3] = 0;
+		wqe->words[4] = 0;
+		/* word5 iocb=rsvd wge=did */
+		/* There is no remote port id in the IOCB? */
+		/* Let this fall through and fail */
+	case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
+	case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
+	case CMD_FCP_TRSP64_CX: /* Target mode rcv */
+	case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2014 Invalid command 0x%x\n",
+				iocbq->iocb.ulpCommand);
+		return IOCB_ERROR;
+	break;
+
+	}
+	bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
+	bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
+	wqe->generic.abort_tag = abort_tag;
+	bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
+	bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
+	bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
+	bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
+
+	return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue iocb on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-4 interface spec.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
+			 struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	struct lpfc_sglq *sglq;
+	uint16_t xritag;
+	union lpfc_wqe wqe;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+	uint32_t fcp_wqidx;
+
+	if (piocb->sli4_xritag == NO_XRI) {
+		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+			piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+			sglq = NULL;
+		else {
+			sglq = __lpfc_sli_get_sglq(phba);
+			if (!sglq)
+				return IOCB_ERROR;
+			piocb->sli4_xritag = sglq->sli4_xritag;
+		}
+	} else if (piocb->iocb_flag &  LPFC_IO_FCP) {
+		sglq = NULL; /* These IO's already have an XRI and
+			      * a mapped sgl.
+			      */
+	} else {
+		/* This is a continuation of a commandi,(CX) so this
+		 * sglq is on the active list
+		 */
+		sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
+		if (!sglq)
+			return IOCB_ERROR;
+	}
+
+	if (sglq) {
+		xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
+		if (xritag != sglq->sli4_xritag)
+			return IOCB_ERROR;
+	}
+
+	if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+		return IOCB_ERROR;
+
+	if (piocb->iocb_flag &  LPFC_IO_FCP) {
+		fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
+		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
+			return IOCB_ERROR;
+	} else {
+		if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+			return IOCB_ERROR;
+	}
+	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
+
+	return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
+ *
+ * This routine wraps the actual lockless version for issusing IOCB function
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * 	IOCB_ERROR - Error
+ * 	IOCB_SUCCESS - Success
+ * 	IOCB_BUSY - Busy
+ **/
+static inline int
+__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+		struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+}
+
+/**
+ * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SLI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
+		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
+		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1419 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
+	return 0;
+}
+
+/**
+ * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
+ * function. This function gets the hbalock and calls
+ * __lpfc_sli_issue_iocb function and will return the error returned
+ * by __lpfc_sli_issue_iocb function. This wrapper is used by
+ * functions which do not hold hbalock.
+ **/
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+		    struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	unsigned long iflags;
+	int rc;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return rc;
+}
+
+/**
+ * lpfc_extra_ring_setup - Extra ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called while driver attaches with the
+ * HBA to setup the extra ring. The extra ring is used
+ * only when driver needs to support target mode functionality
+ * or IP over FC functionalities.
+ *
+ * This function is called with no lock held.
+ **/
+static int
+lpfc_extra_ring_setup( struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+
+	psli = &phba->sli;
+
+	/* Adjust cmd/rsp ring iocb entries more evenly */
+
+	/* Take some away from the FCP ring */
+	pring = &psli->ring[psli->fcp_ring];
+	pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+	pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
 	pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
 	pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
 
@@ -7152,3 +8076,2461 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
 	/* Return device-level interrupt handling status */
 	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
 }  /* lpfc_sli_intr_handler */
+
+/**
+ * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 FCP abort XRI events.
+ **/
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the fcp xri abort event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the fcp xri abort events */
+	while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Notify aborted XRI for FCP work queue */
+		lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+		/* Free the event processed back to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+/**
+ * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 els abort xri events.
+ **/
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the els xri abort event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the els xri abort events */
+	while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Notify aborted XRI for ELS work queue */
+		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+		/* Free the event processed back to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+static void
+lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
+			      struct lpfc_iocbq *pIocbOut,
+			      struct lpfc_wcqe_complete *wcqe)
+{
+	size_t offset = offsetof(struct lpfc_iocbq, iocb);
+
+	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
+	       sizeof(struct lpfc_iocbq) - offset);
+	memset(&pIocbIn->sli4_info, 0,
+	       sizeof(struct lpfc_sli4_rspiocb_info));
+	/* Map WCQE parameters into irspiocb parameters */
+	pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
+	if (pIocbOut->iocb_flag & LPFC_IO_FCP)
+		if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+			pIocbIn->iocb.un.fcpi.fcpi_parm =
+					pIocbOut->iocb.un.fcpi.fcpi_parm -
+					wcqe->total_data_placed;
+		else
+			pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+	else
+		pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+	/* Load in additional WCQE parameters */
+	pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
+	pIocbIn->sli4_info.bfield = 0;
+	if (bf_get(lpfc_wcqe_c_xb, wcqe))
+		pIocbIn->sli4_info.bfield |= LPFC_XB;
+	if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
+		pIocbIn->sli4_info.bfield |= LPFC_PV;
+		pIocbIn->sli4_info.priority =
+					bf_get(lpfc_wcqe_c_priority, wcqe);
+	}
+}
+
+/**
+ * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
+			     struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_iocbq *irspiocbq;
+	unsigned long iflags;
+	bool workposted = false;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	pring->stats.iocb_event++;
+	/* Look up the ELS command IOCB and create pseudo response IOCB */
+	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	if (unlikely(!cmdiocbq)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0386 ELS complete with no corresponding "
+				"cmdiocb: iotag (%d)\n",
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+		return workposted;
+	}
+
+	/* Fake the irspiocbq and copy necessary response information */
+	irspiocbq = lpfc_sli_get_iocbq(phba);
+	if (!irspiocbq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0387 Failed to allocate an iocbq\n");
+		return workposted;
+	}
+	lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
+
+	/* Add the irspiocb to the response IOCB work list */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
+	/* Indicate ELS ring attention */
+	phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	workposted = true;
+
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles slow-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
+			     struct lpfc_wcqe_release *wcqe)
+{
+	/* Check for the slow-path ELS work queue */
+	if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
+		lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
+				     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+	else
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"2579 Slow-path wqe consume event carries "
+				"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
+				bf_get(lpfc_wcqe_r_wqe_index, wcqe),
+				phba->sli4_hba.els_wq->queue_id);
+}
+
+/**
+ * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to a WQ completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an XRI abort event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
+				   struct lpfc_queue *cq,
+				   struct sli4_wcqe_xri_aborted *wcqe)
+{
+	bool workposted = false;
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflags;
+
+	/* Allocate a new internal CQ_EVENT entry */
+	cq_event = lpfc_sli4_cq_event_alloc(phba);
+	if (!cq_event) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0602 Failed to allocate CQ_EVENT entry\n");
+		return false;
+	}
+
+	/* Move the CQE into the proper xri abort event list */
+	memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+	switch (cq->subtype) {
+	case LPFC_FCP:
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+		/* Set the fcp xri abort event flag */
+		phba->hba_flag |= FCP_XRI_ABORT_EVENT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	case LPFC_ELS:
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
+		/* Set the els xri abort event flag */
+		phba->hba_flag |= ELS_XRI_ABORT_EVENT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0603 Invalid work queue CQE subtype (x%x)\n",
+				cq->subtype);
+		workposted = false;
+		break;
+	}
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to the completion queue.
+ * @wcqe: Pointer to a completion queue entry.
+ *
+ * This routine process a slow-path work-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			 struct lpfc_cqe *cqe)
+{
+	struct lpfc_wcqe_complete wcqe;
+	bool workposted = false;
+
+	/* Copy the work queue CQE and convert endian order if needed */
+	lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+	/* Check and process for different type of WCQE and dispatch */
+	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+	case CQE_CODE_COMPL_WQE:
+		/* Process the WQ complete event */
+		workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
+					(struct lpfc_wcqe_complete *)&wcqe);
+		break;
+	case CQE_CODE_RELEASE_WQE:
+		/* Process the WQ release event */
+		lpfc_sli4_sp_handle_rel_wcqe(phba,
+					(struct lpfc_wcqe_release *)&wcqe);
+		break;
+	case CQE_CODE_XRI_ABORTED:
+		/* Process the WQ XRI abort event */
+		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+					(struct sli4_wcqe_xri_aborted *)&wcqe);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0388 Not a valid WCQE code: x%x\n",
+				bf_get(lpfc_wcqe_c_code, &wcqe));
+		break;
+	}
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @rcqe: Pointer to receive-queue completion queue entry.
+ *
+ * This routine process a receive-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+{
+	struct lpfc_rcqe rcqe;
+	bool workposted = false;
+	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
+	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
+	struct hbq_dmabuf *dma_buf;
+	uint32_t status;
+	unsigned long iflags;
+
+	/* Copy the receive queue CQE and convert endian order if needed */
+	lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
+	lpfc_sli4_rq_release(hrq, drq);
+	if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
+		goto out;
+	if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
+		goto out;
+
+	status = bf_get(lpfc_rcqe_status, &rcqe);
+	switch (status) {
+	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2537 Receive Frame Truncated!!\n");
+	case FC_STATUS_RQ_SUCCESS:
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
+		if (!dma_buf) {
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			goto out;
+		}
+		memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
+		/* save off the frame for the word thread to process */
+		list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
+		/* Frame received */
+		phba->hba_flag |= HBA_RECEIVE_BUFFER;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	case FC_STATUS_INSUFF_BUF_NEED_BUF:
+	case FC_STATUS_INSUFF_BUF_FRM_DISC:
+		/* Post more buffers if possible */
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	}
+out:
+	return workposted;
+
+}
+
+/**
+ * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the slow-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on that completion queue, rearm the
+ * completion queue, and then return.
+ *
+ **/
+static void
+lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+	struct lpfc_queue *cq = NULL, *childq, *speq;
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	int ecount = 0;
+	uint16_t cqid;
+
+	if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
+	    bf_get(lpfc_eqe_minor_code, eqe) != 0) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0359 Not a valid slow-path completion "
+				"event: majorcode=x%x, minorcode=x%x\n",
+				bf_get(lpfc_eqe_major_code, eqe),
+				bf_get(lpfc_eqe_minor_code, eqe));
+		return;
+	}
+
+	/* Get the reference to the corresponding CQ */
+	cqid = bf_get(lpfc_eqe_resource_id, eqe);
+
+	/* Search for completion queue pointer matching this cqid */
+	speq = phba->sli4_hba.sp_eq;
+	list_for_each_entry(childq, &speq->child_list, list) {
+		if (childq->queue_id == cqid) {
+			cq = childq;
+			break;
+		}
+	}
+	if (unlikely(!cq)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0365 Slow-path CQ identifier (%d) does "
+				"not exist\n", cqid);
+		return;
+	}
+
+	/* Process all the entries to the CQ */
+	switch (cq->type) {
+	case LPFC_MCQ:
+		while ((cqe = lpfc_sli4_cq_get(cq))) {
+			workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
+			if (!(++ecount % LPFC_GET_QE_REL_INT))
+				lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+		}
+		break;
+	case LPFC_WCQ:
+		while ((cqe = lpfc_sli4_cq_get(cq))) {
+			workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
+			if (!(++ecount % LPFC_GET_QE_REL_INT))
+				lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+		}
+		break;
+	case LPFC_RCQ:
+		while ((cqe = lpfc_sli4_cq_get(cq))) {
+			workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
+			if (!(++ecount % LPFC_GET_QE_REL_INT))
+				lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+		}
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0370 Invalid completion queue type (%d)\n",
+				cq->type);
+		return;
+	}
+
+	/* Catch the no cq entry condition, log an error */
+	if (unlikely(ecount == 0))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0371 No entry from the CQ: identifier "
+				"(x%x), type (%d)\n", cq->queue_id, cq->type);
+
+	/* In any case, flash and re-arm the RCQ */
+	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+	/* wake up worker thread if there are works to be done */
+	if (workposted)
+		lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static void
+lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
+			     struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_iocbq irspiocbq;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	pring->stats.iocb_event++;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	/* Check for response status */
+	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
+		/* If resource errors reported from HBA, reduce queue
+		 * depth of the SCSI device.
+		 */
+		if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
+		     IOSTAT_LOCAL_REJECT) &&
+		    (wcqe->parameter == IOERR_NO_RESOURCES)) {
+			phba->lpfc_rampdown_queue_depth(phba);
+		}
+		/* Log the error status */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0373 FCP complete error: status=x%x, "
+				"hw_status=x%x, total_data_specified=%d, "
+				"parameter=x%x, word3=x%x\n",
+				bf_get(lpfc_wcqe_c_status, wcqe),
+				bf_get(lpfc_wcqe_c_hw_status, wcqe),
+				wcqe->total_data_placed, wcqe->parameter,
+				wcqe->word3);
+	}
+
+	/* Look up the FCP command IOCB and create pseudo response IOCB */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (unlikely(!cmdiocbq)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0374 FCP complete with no corresponding "
+				"cmdiocb: iotag (%d)\n",
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+		return;
+	}
+	if (unlikely(!cmdiocbq->iocb_cmpl)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0375 FCP cmdiocb not callback function "
+				"iotag: (%d)\n",
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+		return;
+	}
+
+	/* Fake the irspiocb and copy necessary response information */
+	lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
+
+	/* Pass the cmd_iocb and the rsp state to the upper layer */
+	(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
+}
+
+/**
+ * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an fast-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			     struct lpfc_wcqe_release *wcqe)
+{
+	struct lpfc_queue *childwq;
+	bool wqid_matched = false;
+	uint16_t fcp_wqid;
+
+	/* Check for fast-path FCP work queue release */
+	fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
+	list_for_each_entry(childwq, &cq->child_list, list) {
+		if (childwq->queue_id == fcp_wqid) {
+			lpfc_sli4_wq_release(childwq,
+					bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+			wqid_matched = true;
+			break;
+		}
+	}
+	/* Report warning log message if no match found */
+	if (wqid_matched != true)
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"2580 Fast-path wqe consume event carries "
+				"miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
+}
+
+/**
+ * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
+ * @cq: Pointer to the completion queue.
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static int
+lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			 struct lpfc_cqe *cqe)
+{
+	struct lpfc_wcqe_release wcqe;
+	bool workposted = false;
+
+	/* Copy the work queue CQE and convert endian order if needed */
+	lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+	/* Check and process for different type of WCQE and dispatch */
+	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+	case CQE_CODE_COMPL_WQE:
+		/* Process the WQ complete event */
+		lpfc_sli4_fp_handle_fcp_wcqe(phba,
+				(struct lpfc_wcqe_complete *)&wcqe);
+		break;
+	case CQE_CODE_RELEASE_WQE:
+		/* Process the WQ release event */
+		lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
+				(struct lpfc_wcqe_release *)&wcqe);
+		break;
+	case CQE_CODE_XRI_ABORTED:
+		/* Process the WQ XRI abort event */
+		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+				(struct sli4_wcqe_xri_aborted *)&wcqe);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0144 Not a valid WCQE code: x%x\n",
+				bf_get(lpfc_wcqe_c_code, &wcqe));
+		break;
+	}
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the fast-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on the completion queue, rearm the
+ * completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+			uint32_t fcp_cqidx)
+{
+	struct lpfc_queue *cq;
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	uint16_t cqid;
+	int ecount = 0;
+
+	if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
+	    unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0366 Not a valid fast-path completion "
+				"event: majorcode=x%x, minorcode=x%x\n",
+				bf_get(lpfc_eqe_major_code, eqe),
+				bf_get(lpfc_eqe_minor_code, eqe));
+		return;
+	}
+
+	cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
+	if (unlikely(!cq)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0367 Fast-path completion queue does not "
+				"exist\n");
+		return;
+	}
+
+	/* Get the reference to the corresponding CQ */
+	cqid = bf_get(lpfc_eqe_resource_id, eqe);
+	if (unlikely(cqid != cq->queue_id)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0368 Miss-matched fast-path completion "
+				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
+				cqid, cq->queue_id);
+		return;
+	}
+
+	/* Process all the entries to the CQ */
+	while ((cqe = lpfc_sli4_cq_get(cq))) {
+		workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+		if (!(++ecount % LPFC_GET_QE_REL_INT))
+			lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+	}
+
+	/* Catch the no cq entry condition */
+	if (unlikely(ecount == 0))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0369 No entry from fast-path completion "
+				"queue fcpcqid=%d\n", cq->queue_id);
+
+	/* In any case, flash and re-arm the CQ */
+	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+	/* wake up worker thread if there are works to be done */
+	if (workposted)
+		lpfc_worker_wake_up(phba);
+}
+
+static void
+lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+	struct lpfc_eqe *eqe;
+
+	/* walk all the EQ entries and drop on the floor */
+	while ((eqe = lpfc_sli4_eq_get(eq)))
+		;
+
+	/* Clear and re-arm the EQ */
+	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+}
+
+/**
+ * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA is
+ * undergoing initialization, the interrupt handler will not process the
+ * interrupt. The link attention and ELS ring attention events are handled
+ * by the worker thread. The interrupt handler signals the worker thread
+ * and returns for these events. This function is called without any lock
+ * held. It gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba *phba;
+	struct lpfc_queue *speq;
+	struct lpfc_eqe *eqe;
+	unsigned long iflag;
+	int ecount = 0;
+
+	/*
+	 * Get the driver's phba structure from the dev_id
+	 */
+	phba = (struct lpfc_hba *)dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/* Get to the EQ struct associated with this vector */
+	speq = phba->sli4_hba.sp_eq;
+
+	/* Check device state for handling interrupt */
+	if (unlikely(lpfc_intr_state_check(phba))) {
+		/* Check again for link_state with lock held */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (phba->link_state < LPFC_LINK_DOWN)
+			/* Flush, clear interrupt, and rearm the EQ */
+			lpfc_sli4_eq_flush(phba, speq);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return IRQ_NONE;
+	}
+
+	/*
+	 * Process all the event on FCP slow-path EQ
+	 */
+	while ((eqe = lpfc_sli4_eq_get(speq))) {
+		lpfc_sli4_sp_handle_eqe(phba, eqe);
+		if (!(++ecount % LPFC_GET_QE_REL_INT))
+			lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
+	}
+
+	/* Always clear and re-arm the slow-path EQ */
+	lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
+
+	/* Catch the no cq entry condition */
+	if (unlikely(ecount == 0)) {
+		if (phba->intr_type == MSIX)
+			/* MSI-X treated interrupt served as no EQ share INT */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0357 MSI-X interrupt with no EQE\n");
+		else
+			/* Non MSI-X treated on interrupt as EQ share INT */
+			return IRQ_NONE;
+	}
+
+	return IRQ_HANDLED;
+} /* lpfc_sli4_sp_intr_handler */
+
+/**
+ * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
+ * equal to that of FCP CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba *phba;
+	struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+	struct lpfc_queue *fpeq;
+	struct lpfc_eqe *eqe;
+	unsigned long iflag;
+	int ecount = 0;
+	uint32_t fcp_eqidx;
+
+	/* Get the driver's phba structure from the dev_id */
+	fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+	phba = fcp_eq_hdl->phba;
+	fcp_eqidx = fcp_eq_hdl->idx;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/* Get to the EQ struct associated with this vector */
+	fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+
+	/* Check device state for handling interrupt */
+	if (unlikely(lpfc_intr_state_check(phba))) {
+		/* Check again for link_state with lock held */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (phba->link_state < LPFC_LINK_DOWN)
+			/* Flush, clear interrupt, and rearm the EQ */
+			lpfc_sli4_eq_flush(phba, fpeq);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return IRQ_NONE;
+	}
+
+	/*
+	 * Process all the event on FCP fast-path EQ
+	 */
+	while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+		lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
+		if (!(++ecount % LPFC_GET_QE_REL_INT))
+			lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+	}
+
+	/* Always clear and re-arm the fast-path EQ */
+	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+	if (unlikely(ecount == 0)) {
+		if (phba->intr_type == MSIX)
+			/* MSI-X treated interrupt served as no EQ share INT */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0358 MSI-X interrupt with no EQE\n");
+		else
+			/* Non MSI-X treated on interrupt as EQ share INT */
+			return IRQ_NONE;
+	}
+
+	return IRQ_HANDLED;
+} /* lpfc_sli4_fp_intr_handler */
+
+/**
+ * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the device-level interrupt handler to device with SLI-4
+ * interface spec, called from the PCI layer when either MSI or Pin-IRQ
+ * interrupt mode is enabled and there is an event in the HBA which requires
+ * driver attention. This function invokes the slow-path interrupt attention
+ * handling function and fast-path interrupt attention handling function in
+ * turn to process the relevant HBA attention events. This function is called
+ * without any lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba  *phba;
+	irqreturn_t sp_irq_rc, fp_irq_rc;
+	bool fp_handled = false;
+	uint32_t fcp_eqidx;
+
+	/* Get the driver's phba structure from the dev_id */
+	phba = (struct lpfc_hba *)dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/*
+	 * Invokes slow-path host attention interrupt handling as appropriate.
+	 */
+	sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
+
+	/*
+	 * Invoke fast-path host attention interrupt handling as appropriate.
+	 */
+	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+		fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
+					&phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
+		if (fp_irq_rc == IRQ_HANDLED)
+			fp_handled |= true;
+	}
+
+	return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
+} /* lpfc_sli4_intr_handler */
+
+/**
+ * lpfc_sli4_queue_free - free a queue structure and associated memory
+ * @queue: The queue structure to free.
+ *
+ * This function frees a queue structure and the DMAable memeory used for
+ * the host resident queue. This function must be called after destroying the
+ * queue on the HBA.
+ **/
+void
+lpfc_sli4_queue_free(struct lpfc_queue *queue)
+{
+	struct lpfc_dmabuf *dmabuf;
+
+	if (!queue)
+		return;
+
+	while (!list_empty(&queue->page_list)) {
+		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
+				 list);
+		dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
+				  dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+	kfree(queue);
+	return;
+}
+
+/**
+ * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
+ * @phba: The HBA that this queue is being created on.
+ * @entry_size: The size of each queue entry for this queue.
+ * @entry count: The number of entries that this queue will handle.
+ *
+ * This function allocates a queue structure and the DMAable memory used for
+ * the host resident queue. This function must be called before creating the
+ * queue on the HBA.
+ **/
+struct lpfc_queue *
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
+		      uint32_t entry_count)
+{
+	struct lpfc_queue *queue;
+	struct lpfc_dmabuf *dmabuf;
+	int x, total_qe_count;
+	void *dma_pointer;
+
+
+	queue = kzalloc(sizeof(struct lpfc_queue) +
+			(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
+	if (!queue)
+		return NULL;
+	queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
+	INIT_LIST_HEAD(&queue->list);
+	INIT_LIST_HEAD(&queue->page_list);
+	INIT_LIST_HEAD(&queue->child_list);
+	for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
+		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!dmabuf)
+			goto out_fail;
+		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+						  PAGE_SIZE, &dmabuf->phys,
+						  GFP_KERNEL);
+		if (!dmabuf->virt) {
+			kfree(dmabuf);
+			goto out_fail;
+		}
+		dmabuf->buffer_tag = x;
+		list_add_tail(&dmabuf->list, &queue->page_list);
+		/* initialize queue's entry array */
+		dma_pointer = dmabuf->virt;
+		for (; total_qe_count < entry_count &&
+		     dma_pointer < (PAGE_SIZE + dmabuf->virt);
+		     total_qe_count++, dma_pointer += entry_size) {
+			queue->qe[total_qe_count].address = dma_pointer;
+		}
+	}
+	queue->entry_size = entry_size;
+	queue->entry_count = entry_count;
+	queue->phba = phba;
+
+	return queue;
+out_fail:
+	lpfc_sli4_queue_free(queue);
+	return NULL;
+}
+
+/**
+ * lpfc_eq_create - Create an Event Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @eq: The queue structure to use to create the event queue.
+ * @imax: The maximum interrupt per second limit.
+ *
+ * This function creates an event queue, as detailed in @eq, on a port,
+ * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @eq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the EQ_CREATE mailbox command to the HBA to setup the
+ * event queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
+{
+	struct lpfc_mbx_eq_create *eq_create;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	struct lpfc_dmabuf *dmabuf;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint16_t dmult;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_eq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_EQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	eq_create = &mbox->u.mqe.un.eq_create;
+	bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
+	       eq->page_count);
+	bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
+	       LPFC_EQE_SIZE);
+	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
+	/* Calculate delay multiper from maximum interrupt per second */
+	dmult = LPFC_DMULT_CONST/imax - 1;
+	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
+	       dmult);
+	switch (eq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0360 Unsupported EQ count. (%d)\n",
+				eq->entry_count);
+		if (eq->entry_count < 256)
+			return -EINVAL;
+		/* otherwise default to smallest count (drop through) */
+	case 256:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_256);
+		break;
+	case 512:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_512);
+		break;
+	case 1024:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_1024);
+		break;
+	case 2048:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_2048);
+		break;
+	case 4096:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_4096);
+		break;
+	}
+	list_for_each_entry(dmabuf, &eq->page_list, list) {
+		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mbox->context1 = NULL;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2500 EQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	eq->type = LPFC_EQ;
+	eq->subtype = LPFC_NONE;
+	eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
+	if (eq->queue_id == 0xFFFF)
+		status = -ENXIO;
+	eq->host_index = 0;
+	eq->hba_index = 0;
+
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_cq_create - Create a Completion Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @cq: The queue structure to use to create the completion queue.
+ * @eq: The event queue to bind this completion queue to.
+ *
+ * This function creates a completion queue, as detailed in @wq, on a port,
+ * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @eq
+ * is used to indicate which event queue to bind this completion queue to. This
+ * function will send the CQ_CREATE mailbox command to the HBA to setup the
+ * completion queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
+	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
+{
+	struct lpfc_mbx_cq_create *cq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_cq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_CQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	cq_create = &mbox->u.mqe.un.cq_create;
+	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
+		    cq->page_count);
+	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
+	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
+	bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
+	switch (cq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0361 Unsupported CQ count. (%d)\n",
+				cq->entry_count);
+		if (cq->entry_count < 256)
+			return -EINVAL;
+		/* otherwise default to smallest count (drop through) */
+	case 256:
+		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+		       LPFC_CQ_CNT_256);
+		break;
+	case 512:
+		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+		       LPFC_CQ_CNT_512);
+		break;
+	case 1024:
+		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+		       LPFC_CQ_CNT_1024);
+		break;
+	}
+	list_for_each_entry(dmabuf, &cq->page_list, list) {
+		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2501 CQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+	if (cq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	/* link the cq onto the parent eq child list */
+	list_add_tail(&cq->list, &eq->child_list);
+	/* Set up completion queue's type and subtype */
+	cq->type = type;
+	cq->subtype = subtype;
+	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+	cq->host_index = 0;
+	cq->hba_index = 0;
+out:
+
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_wq_create - Create a Work Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @wq: The queue structure to use to create the work queue.
+ * @cq: The completion queue to bind this work queue to.
+ * @subtype: The subtype of the work queue indicating its functionality.
+ *
+ * This function creates a work queue, as detailed in @wq, on a port, described
+ * by @phba by sending a WQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @wq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @cq
+ * is used to indicate which completion queue to bind this work queue to. This
+ * function will send the WQ_CREATE mailbox command to the HBA to setup the
+ * work queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+	       struct lpfc_queue *cq, uint32_t subtype)
+{
+	struct lpfc_mbx_wq_create *wq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_wq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	wq_create = &mbox->u.mqe.un.wq_create;
+	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
+		    wq->page_count);
+	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
+		    cq->queue_id);
+	list_for_each_entry(dmabuf, &wq->page_list, list) {
+		wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2503 WQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
+	if (wq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	wq->type = LPFC_WQ;
+	wq->subtype = subtype;
+	wq->host_index = 0;
+	wq->hba_index = 0;
+
+	/* link the wq onto the parent cq child list */
+	list_add_tail(&wq->list, &cq->child_list);
+out:
+	if (rc == MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_rq_create - Create a Receive Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @hrq: The queue structure to use to create the header receive queue.
+ * @drq: The queue structure to use to create the data receive queue.
+ * @cq: The completion queue to bind this work queue to.
+ *
+ * This function creates a receive buffer queue pair , as detailed in @hrq and
+ * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
+ * to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
+ * struct is used to get the entry count that is necessary to determine the
+ * number of pages to use for this queue. The @cq is used to indicate which
+ * completion queue to bind received buffers that are posted to these queues to.
+ * This function will send the RQ_CREATE mailbox command to the HBA to setup the
+ * receive queue pair. This function is asynchronous and will wait for the
+ * mailbox command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
+{
+	struct lpfc_mbx_rq_create *rq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (hrq->entry_count != drq->entry_count)
+		return -EINVAL;
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_rq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	rq_create = &mbox->u.mqe.un.rq_create;
+	switch (hrq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2535 Unsupported RQ count. (%d)\n",
+				hrq->entry_count);
+		if (hrq->entry_count < 512)
+			return -EINVAL;
+		/* otherwise default to smallest count (drop through) */
+	case 512:
+		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+		       LPFC_RQ_RING_SIZE_512);
+		break;
+	case 1024:
+		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+		       LPFC_RQ_RING_SIZE_1024);
+		break;
+	case 2048:
+		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+		       LPFC_RQ_RING_SIZE_2048);
+		break;
+	case 4096:
+		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+		       LPFC_RQ_RING_SIZE_4096);
+		break;
+	}
+	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+	       cq->queue_id);
+	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+	       hrq->page_count);
+	bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+	       LPFC_HDR_BUF_SIZE);
+	list_for_each_entry(dmabuf, &hrq->page_list, list) {
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2504 RQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+	if (hrq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	hrq->type = LPFC_HRQ;
+	hrq->subtype = subtype;
+	hrq->host_index = 0;
+	hrq->hba_index = 0;
+
+	/* now create the data queue */
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	switch (drq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2536 Unsupported RQ count. (%d)\n",
+				drq->entry_count);
+		if (drq->entry_count < 512)
+			return -EINVAL;
+		/* otherwise default to smallest count (drop through) */
+	case 512:
+		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+		       LPFC_RQ_RING_SIZE_512);
+		break;
+	case 1024:
+		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+		       LPFC_RQ_RING_SIZE_1024);
+		break;
+	case 2048:
+		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+		       LPFC_RQ_RING_SIZE_2048);
+		break;
+	case 4096:
+		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+		       LPFC_RQ_RING_SIZE_4096);
+		break;
+	}
+	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+	       cq->queue_id);
+	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+	       drq->page_count);
+	bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+	       LPFC_DATA_BUF_SIZE);
+	list_for_each_entry(dmabuf, &drq->page_list, list) {
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		status = -ENXIO;
+		goto out;
+	}
+	drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+	if (drq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	drq->type = LPFC_DRQ;
+	drq->subtype = subtype;
+	drq->host_index = 0;
+	drq->hba_index = 0;
+
+	/* link the header and data RQs onto the parent cq child list */
+	list_add_tail(&hrq->list, &cq->child_list);
+	list_add_tail(&drq->list, &cq->child_list);
+
+out:
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_eq_destroy - Destroy an event Queue on the HBA
+ * @eq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @eq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @eq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (!eq)
+		return -ENODEV;
+	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_eq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_EQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
+	       eq->queue_id);
+	mbox->vport = eq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+
+	rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2505 EQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+
+	/* Remove eq from any list */
+	list_del_init(&eq->list);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, eq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
+ * @cq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @cq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @cq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (!cq)
+		return -ENODEV;
+	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_cq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_CQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
+	       cq->queue_id);
+	mbox->vport = cq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.wq_create.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2506 CQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	/* Remove cq from any list */
+	list_del_init(&cq->list);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, cq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_wq_destroy - Destroy a Work Queue on the HBA
+ * @wq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @wq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @wq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (!wq)
+		return -ENODEV;
+	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_wq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
+	       wq->queue_id);
+	mbox->vport = wq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2508 WQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	/* Remove wq from any list */
+	list_del_init(&wq->list);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, wq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
+ * @rq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @rq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @rq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+		struct lpfc_queue *drq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (!hrq || !drq)
+		return -ENODEV;
+	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_rq_destroy) -
+		  sizeof(struct mbox_header));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+	       hrq->queue_id);
+	mbox->vport = hrq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2509 RQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mbox, hrq->phba->mbox_mem_pool);
+		return -ENXIO;
+	}
+	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+	       drq->queue_id);
+	rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2510 RQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	list_del_init(&hrq->list);
+	list_del_init(&drq->list);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, hrq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
+ * @phba: The virtual port for which this call being executed.
+ * @pdma_phys_addr0: Physical address of the 1st SGL page.
+ * @pdma_phys_addr1: Physical address of the 2nd SGL page.
+ * @xritag: the xritag that ties this io to the SGL pages.
+ *
+ * This routine will post the sgl pages for the IO that has the xritag
+ * that is in the iocbq structure. The xritag is assigned during iocbq
+ * creation and persists for as long as the driver is loaded.
+ * if the caller has fewer than 256 scatter gather segments to map then
+ * pdma_phys_addr1 should be 0.
+ * If the caller needs to map more than 256 scatter gather segment then
+ * pdma_phys_addr1 should be a valid physical address.
+ * physical address for SGLs must be 64 byte aligned.
+ * If you are going to map 2 SGL's then the first one must have 256 entries
+ * the second sgl can have between 1 and 256 entries.
+ *
+ * Return codes:
+ * 	0 - Success
+ * 	-ENXIO, -ENOMEM - Failure
+ **/
+int
+lpfc_sli4_post_sgl(struct lpfc_hba *phba,
+		dma_addr_t pdma_phys_addr0,
+		dma_addr_t pdma_phys_addr1,
+		uint16_t xritag)
+{
+	struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (xritag == NO_XRI) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0364 Invalid param:\n");
+		return -EINVAL;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+			sizeof(struct lpfc_mbx_post_sgl_pages) -
+			sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+
+	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
+				&mbox->u.mqe.un.post_sgl_pages;
+	bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
+	bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
+
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo	=
+				cpu_to_le32(putPaddrLow(pdma_phys_addr0));
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
+				cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
+
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo	=
+				cpu_to_le32(putPaddrLow(pdma_phys_addr1));
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
+				cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2511 POST_SGL mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return 0;
+}
+/**
+ * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
+ * @phba: The virtual port for which this call being executed.
+ *
+ * This routine will remove all of the sgl pages registered with the hba.
+ *
+ * Return codes:
+ * 	0 - Success
+ * 	-ENXIO, -ENOMEM - Failure
+ **/
+int
+lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
+			LPFC_SLI4_MBX_EMBED);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_next_xritag - Get an xritag for the io
+ * @phba: Pointer to HBA context object.
+ *
+ * This function gets an xritag for the iocb. If there is no unused xritag
+ * it will return 0xffff.
+ * The function returns the allocated xritag if successful, else returns zero.
+ * Zero is not a valid xritag.
+ * The caller is not required to hold any lock.
+ **/
+uint16_t
+lpfc_sli4_next_xritag(struct lpfc_hba *phba)
+{
+	uint16_t xritag;
+
+	spin_lock_irq(&phba->hbalock);
+	xritag = phba->sli4_hba.next_xri;
+	if ((xritag != (uint16_t) -1) && xritag <
+		(phba->sli4_hba.max_cfg_param.max_xri
+			+ phba->sli4_hba.max_cfg_param.xri_base)) {
+		phba->sli4_hba.next_xri++;
+		phba->sli4_hba.max_cfg_param.xri_used++;
+		spin_unlock_irq(&phba->hbalock);
+		return xritag;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2004 Failed to allocate XRI.last XRITAG is %d"
+			" Max XRI is %d, Used XRI is %d\n",
+			phba->sli4_hba.next_xri,
+			phba->sli4_hba.max_cfg_param.max_xri,
+			phba->sli4_hba.max_cfg_param.xri_used);
+	return -1;
+}
+
+/**
+ * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry;
+	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+	struct sgl_page_pairs *sgl_pg_pairs;
+	void *viraddr;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t reqlen, alloclen, pg_pairs;
+	uint32_t mbox_tmo;
+	uint16_t xritag_start = 0;
+	int els_xri_cnt, rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* The number of sgls to be posted */
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+	reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+	if (reqlen > PAGE_SIZE) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2559 Block sgl registration required DMA "
+				"size (%d) great than a page\n", reqlen);
+		return -ENOMEM;
+	}
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2560 Failed to allocate mbox cmd memory\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+			 LPFC_SLI4_MBX_NEMBED);
+
+	if (alloclen < reqlen) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0285 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory "
+				"size (%d)\n", alloclen, reqlen);
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+		return -ENOMEM;
+	}
+
+	/* Get the first SGE entry from the non-embedded DMA memory */
+	if (unlikely(!mbox->sge_array)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2525 Failed to get the non-embedded SGE "
+				"virtual address\n");
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+		return -ENOMEM;
+	}
+	viraddr = mbox->sge_array->addr[0];
+
+	/* Set up the SGL pages in the non-embedded DMA pages */
+	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+	sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+	for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
+		sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+		/* Set up the sge entry */
+		sgl_pg_pairs->sgl_pg0_addr_lo =
+				cpu_to_le32(putPaddrLow(sglq_entry->phys));
+		sgl_pg_pairs->sgl_pg0_addr_hi =
+				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+		sgl_pg_pairs->sgl_pg1_addr_lo =
+				cpu_to_le32(putPaddrLow(0));
+		sgl_pg_pairs->sgl_pg1_addr_hi =
+				cpu_to_le32(putPaddrHigh(0));
+		/* Keep the first xritag on the list */
+		if (pg_pairs == 0)
+			xritag_start = sglq_entry->sli4_xritag;
+		sgl_pg_pairs++;
+	}
+	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+	pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
+	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+	/* Perform endian conversion if necessary */
+	sgl->word0 = cpu_to_le32(sgl->word0);
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2513 POST_SGL_BLOCK mailbox command failed "
+				"status x%x add_status x%x mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
+			      int cnt)
+{
+	struct lpfc_scsi_buf *psb;
+	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+	struct sgl_page_pairs *sgl_pg_pairs;
+	void *viraddr;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t reqlen, alloclen, pg_pairs;
+	uint32_t mbox_tmo;
+	uint16_t xritag_start = 0;
+	int rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	dma_addr_t pdma_phys_bpl1;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* Calculate the requested length of the dma memory */
+	reqlen = cnt * sizeof(struct sgl_page_pairs) +
+		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+	if (reqlen > PAGE_SIZE) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0217 Block sgl registration required DMA "
+				"size (%d) great than a page\n", reqlen);
+		return -ENOMEM;
+	}
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0283 Failed to allocate mbox cmd memory\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+				LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+				LPFC_SLI4_MBX_NEMBED);
+
+	if (alloclen < reqlen) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2561 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory "
+				"size (%d)\n", alloclen, reqlen);
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+		return -ENOMEM;
+	}
+
+	/* Get the first SGE entry from the non-embedded DMA memory */
+	if (unlikely(!mbox->sge_array)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2565 Failed to get the non-embedded SGE "
+				"virtual address\n");
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+		return -ENOMEM;
+	}
+	viraddr = mbox->sge_array->addr[0];
+
+	/* Set up the SGL pages in the non-embedded DMA pages */
+	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+	sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+	pg_pairs = 0;
+	list_for_each_entry(psb, sblist, list) {
+		/* Set up the sge entry */
+		sgl_pg_pairs->sgl_pg0_addr_lo =
+			cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+		sgl_pg_pairs->sgl_pg0_addr_hi =
+			cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+			pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
+		else
+			pdma_phys_bpl1 = 0;
+		sgl_pg_pairs->sgl_pg1_addr_lo =
+			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+		sgl_pg_pairs->sgl_pg1_addr_hi =
+			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+		/* Keep the first xritag on the list */
+		if (pg_pairs == 0)
+			xritag_start = psb->cur_iocbq.sli4_xritag;
+		sgl_pg_pairs++;
+		pg_pairs++;
+	}
+	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+	/* Perform endian conversion if necessary */
+	sgl->word0 = cpu_to_le32(sgl->word0);
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2564 POST_SGL_BLOCK mailbox command failed "
+				"status x%x add_status x%x mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
+ * @phba: pointer to lpfc_hba struct that the frame was received on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function checks the fields in the @fc_hdr to see if the FC frame is a
+ * valid type of frame that the LPFC driver will handle. This function will
+ * return a zero if the frame is a valid frame or a non zero value when the
+ * frame does not pass the check.
+ **/
+static int
+lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
+{
+	char *rctl_names[] = FC_RCTL_NAMES_INIT;
+	char *type_names[] = FC_TYPE_NAMES_INIT;
+	struct fc_vft_header *fc_vft_hdr;
+
+	switch (fc_hdr->fh_r_ctl) {
+	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
+	case FC_RCTL_DD_SOL_DATA:	/* solicited data */
+	case FC_RCTL_DD_UNSOL_CTL:	/* unsolicited control */
+	case FC_RCTL_DD_SOL_CTL:	/* solicited control or reply */
+	case FC_RCTL_DD_UNSOL_DATA:	/* unsolicited data */
+	case FC_RCTL_DD_DATA_DESC:	/* data descriptor */
+	case FC_RCTL_DD_UNSOL_CMD:	/* unsolicited command */
+	case FC_RCTL_DD_CMD_STATUS:	/* command status */
+	case FC_RCTL_ELS_REQ:	/* extended link services request */
+	case FC_RCTL_ELS_REP:	/* extended link services reply */
+	case FC_RCTL_ELS4_REQ:	/* FC-4 ELS request */
+	case FC_RCTL_ELS4_REP:	/* FC-4 ELS reply */
+	case FC_RCTL_BA_NOP:  	/* basic link service NOP */
+	case FC_RCTL_BA_ABTS: 	/* basic link service abort */
+	case FC_RCTL_BA_RMC: 	/* remove connection */
+	case FC_RCTL_BA_ACC:	/* basic accept */
+	case FC_RCTL_BA_RJT:	/* basic reject */
+	case FC_RCTL_BA_PRMT:
+	case FC_RCTL_ACK_1:	/* acknowledge_1 */
+	case FC_RCTL_ACK_0:	/* acknowledge_0 */
+	case FC_RCTL_P_RJT:	/* port reject */
+	case FC_RCTL_F_RJT:	/* fabric reject */
+	case FC_RCTL_P_BSY:	/* port busy */
+	case FC_RCTL_F_BSY:	/* fabric busy to data frame */
+	case FC_RCTL_F_BSYL:	/* fabric busy to link control frame */
+	case FC_RCTL_LCR:	/* link credit reset */
+	case FC_RCTL_END:	/* end */
+		break;
+	case FC_RCTL_VFTH:	/* Virtual Fabric tagging Header */
+		fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+		fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
+		return lpfc_fc_frame_check(phba, fc_hdr);
+	default:
+		goto drop;
+	}
+	switch (fc_hdr->fh_type) {
+	case FC_TYPE_BLS:
+	case FC_TYPE_ELS:
+	case FC_TYPE_FCP:
+	case FC_TYPE_CT:
+		break;
+	case FC_TYPE_IP:
+	case FC_TYPE_ILS:
+	default:
+		goto drop;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"2538 Received frame rctl:%s type:%s\n",
+			rctl_names[fc_hdr->fh_r_ctl],
+			type_names[fc_hdr->fh_type]);
+	return 0;
+drop:
+	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+			"2539 Dropped frame rctl:%s type:%s\n",
+			rctl_names[fc_hdr->fh_r_ctl],
+			type_names[fc_hdr->fh_type]);
+	return 1;
+}
+
+/**
+ * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function processes the FC header to retrieve the VFI from the VF
+ * header, if one exists. This function will return the VFI if one exists
+ * or 0 if no VSAN Header exists.
+ **/
+static uint32_t
+lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
+{
+	struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+
+	if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
+		return 0;
+	return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
+}
+
+/**
+ * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
+ * @phba: Pointer to the HBA structure to search for the vport on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ * @fcfi: The FC Fabric ID that the frame came from
+ *
+ * This function searches the @phba for a vport that matches the content of the
+ * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
+ * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
+ * returns the matching vport pointer or NULL if unable to match frame to a
+ * vport.
+ **/
+static struct lpfc_vport *
+lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
+		       uint16_t fcfi)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_vport *vport = NULL;
+	int i;
+	uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
+			fc_hdr->fh_d_id[1] << 8 |
+			fc_hdr->fh_d_id[2]);
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			if (phba->fcf.fcfi == fcfi &&
+			    vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
+			    vports[i]->fc_myDID == did) {
+				vport = vports[i];
+				break;
+			}
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+	return vport;
+}
+
+/**
+ * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
+ * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
+ *
+ * This function searches through the existing incomplete sequences that have
+ * been sent to this @vport. If the frame matches one of the incomplete
+ * sequences then the dbuf in the @dmabuf is added to the list of frames that
+ * make up that sequence. If no sequence is found that matches this frame then
+ * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
+ * This function returns a pointer to the first dmabuf in the sequence list that
+ * the frame was linked to.
+ **/
+static struct hbq_dmabuf *
+lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *new_hdr;
+	struct fc_frame_header *temp_hdr;
+	struct lpfc_dmabuf *d_buf;
+	struct lpfc_dmabuf *h_buf;
+	struct hbq_dmabuf *seq_dmabuf = NULL;
+	struct hbq_dmabuf *temp_dmabuf = NULL;
+
+	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	/* Use the hdr_buf to find the sequence that this frame belongs to */
+	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+		temp_hdr = (struct fc_frame_header *)h_buf->virt;
+		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+			continue;
+		/* found a pending sequence that matches this frame */
+		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		break;
+	}
+	if (!seq_dmabuf) {
+		/*
+		 * This indicates first frame received for this sequence.
+		 * Queue the buffer on the vport's rcv_buffer_list.
+		 */
+		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+		return dmabuf;
+	}
+	temp_hdr = seq_dmabuf->hbuf.virt;
+	if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
+		list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
+		return dmabuf;
+	}
+	/* find the correct place in the sequence to insert this frame */
+	list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
+		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+		temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
+		/*
+		 * If the frame's sequence count is greater than the frame on
+		 * the list then insert the frame right after this frame
+		 */
+		if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
+			list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
+			return seq_dmabuf;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * lpfc_seq_complete - Indicates if a sequence is complete
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function checks the sequence, starting with the frame described by
+ * @dmabuf, to see if all the frames associated with this sequence are present.
+ * the frames associated with this sequence are linked to the @dmabuf using the
+ * dbuf list. This function looks for two major things. 1) That the first frame
+ * has a sequence count of zero. 2) There is a frame with last frame of sequence
+ * set. 3) That there are no holes in the sequence count. The function will
+ * return 1 when the sequence is complete, otherwise it will return 0.
+ **/
+static int
+lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *hdr;
+	struct lpfc_dmabuf *d_buf;
+	struct hbq_dmabuf *seq_dmabuf;
+	uint32_t fctl;
+	int seq_count = 0;
+
+	hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	/* make sure first fame of sequence has a sequence count of zero */
+	if (hdr->fh_seq_cnt != seq_count)
+		return 0;
+	fctl = (hdr->fh_f_ctl[0] << 16 |
+		hdr->fh_f_ctl[1] << 8 |
+		hdr->fh_f_ctl[2]);
+	/* If last frame of sequence we can return success. */
+	if (fctl & FC_FC_END_SEQ)
+		return 1;
+	list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
+		seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+		hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+		/* If there is a hole in the sequence count then fail. */
+		if (++seq_count != hdr->fh_seq_cnt)
+			return 0;
+		fctl = (hdr->fh_f_ctl[0] << 16 |
+			hdr->fh_f_ctl[1] << 8 |
+			hdr->fh_f_ctl[2]);
+		/* If last frame of sequence we can return success. */
+		if (fctl & FC_FC_END_SEQ)
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_prep_seq - Prep sequence for ULP processing
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function takes a sequence, described by a list of frames, and creates
+ * a list of iocbq structures to describe the sequence. This iocbq list will be
+ * used to issue to the generic unsolicited sequence handler. This routine
+ * returns a pointer to the first iocbq in the list. If the function is unable
+ * to allocate an iocbq then it throw out the received frames that were not
+ * able to be described and return a pointer to the first iocbq. If unable to
+ * allocate any iocbqs (including the first) this function will return NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
+{
+	struct lpfc_dmabuf *d_buf, *n_buf;
+	struct lpfc_iocbq *first_iocbq, *iocbq;
+	struct fc_frame_header *fc_hdr;
+	uint32_t sid;
+
+	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+	/* remove from receive buffer list */
+	list_del_init(&seq_dmabuf->hbuf.list);
+	/* get the Remote Port's SID */
+	sid = (fc_hdr->fh_s_id[0] << 16 |
+	       fc_hdr->fh_s_id[1] << 8 |
+	       fc_hdr->fh_s_id[2]);
+	/* Get an iocbq struct to fill in. */
+	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
+	if (first_iocbq) {
+		/* Initialize the first IOCB. */
+		first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+		first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
+		first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
+		first_iocbq->iocb.unsli3.rcvsli3.vpi =
+					vport->vpi + vport->phba->vpi_base;
+		/* put the first buffer into the first IOCBq */
+		first_iocbq->context2 = &seq_dmabuf->dbuf;
+		first_iocbq->context3 = NULL;
+		first_iocbq->iocb.ulpBdeCount = 1;
+		first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+							LPFC_DATA_BUF_SIZE;
+		first_iocbq->iocb.un.rcvels.remoteID = sid;
+	}
+	iocbq = first_iocbq;
+	/*
+	 * Each IOCBq can have two Buffers assigned, so go through the list
+	 * of buffers for this sequence and save two buffers in each IOCBq
+	 */
+	list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
+		if (!iocbq) {
+			lpfc_in_buf_free(vport->phba, d_buf);
+			continue;
+		}
+		if (!iocbq->context3) {
+			iocbq->context3 = d_buf;
+			iocbq->iocb.ulpBdeCount++;
+			iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
+							LPFC_DATA_BUF_SIZE;
+		} else {
+			iocbq = lpfc_sli_get_iocbq(vport->phba);
+			if (!iocbq) {
+				if (first_iocbq) {
+					first_iocbq->iocb.ulpStatus =
+							IOSTAT_FCP_RSP_ERROR;
+					first_iocbq->iocb.un.ulpWord[4] =
+							IOERR_NO_RESOURCES;
+				}
+				lpfc_in_buf_free(vport->phba, d_buf);
+				continue;
+			}
+			iocbq->context2 = d_buf;
+			iocbq->context3 = NULL;
+			iocbq->iocb.ulpBdeCount = 1;
+			iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+							LPFC_DATA_BUF_SIZE;
+			iocbq->iocb.un.rcvels.remoteID = sid;
+			list_add_tail(&iocbq->list, &first_iocbq->list);
+		}
+	}
+	return first_iocbq;
+}
+
+/**
+ * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the received buffers and gives it to upper layers when a received buffer
+ * indicates that it is the final frame in the sequence. The interrupt
+ * service routine processes received buffers at interrupt contexts and adds
+ * received dma buffers to the rb_pend_list queue and signals the worker thread.
+ * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
+ * appropriate receive function when the final frame in a sequence is received.
+ **/
+int
+lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
+{
+	LIST_HEAD(cmplq);
+	struct hbq_dmabuf *dmabuf, *seq_dmabuf;
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_vport *vport;
+	uint32_t fcfi;
+	struct lpfc_iocbq *iocbq;
+
+	/* Clear hba flag and get all received buffers into the cmplq */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
+	list_splice_init(&phba->rb_pend_list, &cmplq);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Process each received buffer */
+	while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
+		fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+		/* check to see if this a valid type of frame */
+		if (lpfc_fc_frame_check(phba, fc_hdr)) {
+			lpfc_in_buf_free(phba, &dmabuf->dbuf);
+			continue;
+		}
+		fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
+		vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
+		if (!vport) {
+			/* throw out the frame */
+			lpfc_in_buf_free(phba, &dmabuf->dbuf);
+			continue;
+		}
+		/* Link this frame */
+		seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
+		if (!seq_dmabuf) {
+			/* unable to add frame to vport - throw it out */
+			lpfc_in_buf_free(phba, &dmabuf->dbuf);
+			continue;
+		}
+		/* If not last frame in sequence continue processing frames. */
+		if (!lpfc_seq_complete(seq_dmabuf)) {
+			/*
+			 * When saving off frames post a new one and mark this
+			 * frame to be freed when it is finished.
+			 **/
+			lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
+			dmabuf->tag = -1;
+			continue;
+		}
+		fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+		iocbq = lpfc_prep_seq(vport, seq_dmabuf);
+		if (!lpfc_complete_unsol_iocb(phba,
+					      &phba->sli.ring[LPFC_ELS_RING],
+					      iocbq, fc_hdr->fh_r_ctl,
+					      fc_hdr->fh_type))
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"2540 Ring %d handler: unexpected Rctl "
+					"x%x Type x%x received\n",
+					LPFC_ELS_RING,
+					fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+	};
+	return 0;
+}
-- 
cgit v1.2.3-70-g09d2


From 04c684968487eb4f98728363a97b8da48f3bb958 Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Fri, 22 May 2009 14:52:52 -0400
Subject: [SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - Mailbox handling

The mailbox commands themselves are the same, or very similar to
their SLI3 counterparts. This patch genericizes mailbox command
handling and adds support for the new SLI4 mailbox queue.

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc_attr.c      |  47 +--
 drivers/scsi/lpfc/lpfc_crtn.h      |   1 +
 drivers/scsi/lpfc/lpfc_els.c       |   6 +-
 drivers/scsi/lpfc/lpfc_hbadisc.c   |  22 +-
 drivers/scsi/lpfc/lpfc_init.c      |  22 +-
 drivers/scsi/lpfc/lpfc_mbox.c      | 632 ++++++++++++++++++++++++++++++++++---
 drivers/scsi/lpfc/lpfc_nportdisc.c |  13 +-
 drivers/scsi/lpfc/lpfc_sli.c       | 505 +++++++++++++++++++++++++----
 8 files changed, 1101 insertions(+), 147 deletions(-)

(limited to 'drivers/scsi/lpfc')

diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 82016fc672b..463104d9686 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -507,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
 		return -ENOMEM;
 
 	memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
-	pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
-	pmboxq->mb.mbxOwner = OWN_HOST;
+	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+	pmboxq->u.mb.mbxOwner = OWN_HOST;
 
 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
 
-	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
+	if ((mbxstatus == MBX_SUCCESS) &&
+	    (pmboxq->u.mb.mbxStatus == 0 ||
+	     pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
 		memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 		lpfc_init_link(phba, pmboxq, phba->cfg_topology,
 			       phba->cfg_link_speed);
@@ -791,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
 		  uint32_t *mrpi, uint32_t *arpi,
 		  uint32_t *mvpi, uint32_t *avpi)
 {
-	struct lpfc_sli   *psli = &phba->sli;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_mbx_read_config *rd_config;
 	LPFC_MBOXQ_t *pmboxq;
 	MAILBOX_t *pmb;
 	int rc = 0;
@@ -813,7 +816,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
 		return 0;
 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 
-	pmb = &pmboxq->mb;
+	pmb = &pmboxq->u.mb;
 	pmb->mbxCommand = MBX_READ_CONFIG;
 	pmb->mbxOwner = OWN_HOST;
 	pmboxq->context1 = NULL;
@@ -3247,7 +3250,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
 		}
 	}
 
-	memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
+	memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
 	       buf, count);
 
 	phba->sysfs_mbox.offset = off + count;
@@ -3289,6 +3292,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 	int rc;
+	MAILBOX_t *pmb;
 
 	if (off > MAILBOX_CMD_SIZE)
 		return -ERANGE;
@@ -3313,8 +3317,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 	if (off == 0 &&
 	    phba->sysfs_mbox.state  == SMBOX_WRITING &&
 	    phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
-
-		switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
+		pmb = &phba->sysfs_mbox.mbox->u.mb;
+		switch (pmb->mbxCommand) {
 			/* Offline only */
 		case MBX_INIT_LINK:
 		case MBX_DOWN_LINK:
@@ -3331,7 +3335,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 			if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
 				printk(KERN_WARNING "mbox_read:Command 0x%x "
 				       "is illegal in on-line state\n",
-				       phba->sysfs_mbox.mbox->mb.mbxCommand);
+				       pmb->mbxCommand);
 				sysfs_mbox_idle(phba);
 				spin_unlock_irq(&phba->hbalock);
 				return -EPERM;
@@ -3367,13 +3371,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 		case MBX_CONFIG_PORT:
 		case MBX_RUN_BIU_DIAG:
 			printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
-			       phba->sysfs_mbox.mbox->mb.mbxCommand);
+			       pmb->mbxCommand);
 			sysfs_mbox_idle(phba);
 			spin_unlock_irq(&phba->hbalock);
 			return -EPERM;
 		default:
 			printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
-			       phba->sysfs_mbox.mbox->mb.mbxCommand);
+			       pmb->mbxCommand);
 			sysfs_mbox_idle(phba);
 			spin_unlock_irq(&phba->hbalock);
 			return -EPERM;
@@ -3383,14 +3387,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 		 * or RESTART mailbox commands until the HBA is restarted.
 		 */
 		if (phba->pport->stopped &&
-		    phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
-		    phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
-		    phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
-		    phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
+		    pmb->mbxCommand != MBX_DUMP_MEMORY &&
+		    pmb->mbxCommand != MBX_RESTART &&
+		    pmb->mbxCommand != MBX_WRITE_VPARMS &&
+		    pmb->mbxCommand != MBX_WRITE_WWN)
 			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
 					"1259 mbox: Issued mailbox cmd "
 					"0x%x while in stopped state.\n",
-					phba->sysfs_mbox.mbox->mb.mbxCommand);
+					pmb->mbxCommand);
 
 		phba->sysfs_mbox.mbox->vport = vport;
 
@@ -3416,8 +3420,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 			spin_unlock_irq(&phba->hbalock);
 			rc = lpfc_sli_issue_mbox_wait (phba,
 						       phba->sysfs_mbox.mbox,
-				lpfc_mbox_tmo_val(phba,
-				    phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
+				lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
 			spin_lock_irq(&phba->hbalock);
 		}
 
@@ -3439,7 +3442,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 		return -EAGAIN;
 	}
 
-	memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
+	memcpy(buf, (uint8_t *) &pmb + off, count);
 
 	phba->sysfs_mbox.offset = off + count;
 
@@ -3711,14 +3714,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
 		return NULL;
 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 
-	pmb = &pmboxq->mb;
+	pmb = &pmboxq->u.mb;
 	pmb->mbxCommand = MBX_READ_STATUS;
 	pmb->mbxOwner = OWN_HOST;
 	pmboxq->context1 = NULL;
 	pmboxq->vport = vport;
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-		(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3817,7 +3820,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
 		return;
 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
 
-	pmb = &pmboxq->mb;
+	pmb = &pmboxq->u.mb;
 	pmb->mbxCommand = MBX_READ_STATUS;
 	pmb->mbxOwner = OWN_HOST;
 	pmb->un.varWords[0] = 0x1; /* reset request */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 3802e455734..e0f1cd4b3d3 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -209,6 +209,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
 uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
 void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
 			   uint32_t);
+void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
 
 void lpfc_reset_barrier(struct lpfc_hba * phba);
 int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 9fe36bf6fd1..2c034a554c8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4277,7 +4277,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 			lpfc_init_link(phba, mbox,
 				       phba->cfg_topology,
 				       phba->cfg_link_speed);
-			mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+			mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 			mbox->vport = vport;
 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4426,7 +4426,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	uint16_t xri, status;
 	uint32_t cmdsize;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 
 	ndlp = (struct lpfc_nodelist *) pmb->context2;
 	xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -5755,7 +5755,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	struct lpfc_vport *vport = pmb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 
 	spin_lock_irq(shost->host_lock);
 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0fc66005d54..2270d9a7c8e 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -879,7 +879,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	struct lpfc_vport *vport = pmb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_sli   *psli = &phba->sli;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	uint32_t control;
 
 	/* Since we don't do discovery right now, turn these off here */
@@ -942,7 +942,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_vport *vport = pmb->vport;
 
-	if (pmb->mb.mbxStatus)
+	if (pmb->u.mb.mbxStatus)
 		goto out;
 
 	mempool_free(pmb, phba->mbox_mem_pool);
@@ -970,7 +970,7 @@ out:
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
 			 "0306 CONFIG_LINK mbxStatus error x%x "
 			 "HBA state x%x\n",
-			 pmb->mb.mbxStatus, vport->port_state);
+			 pmb->u.mb.mbxStatus, vport->port_state);
 	mempool_free(pmb, phba->mbox_mem_pool);
 
 	lpfc_linkdown(phba);
@@ -1202,7 +1202,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	struct lpfc_vport *vport = pmb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	READ_LA_VAR *la;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
 	/* Unblock ELS traffic */
@@ -1217,7 +1217,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		goto lpfc_mbx_cmpl_read_la_free_mbuf;
 	}
 
-	la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
+	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
 
 	memcpy(&phba->alpa_map[0], mp->virt, 128);
 
@@ -1355,7 +1355,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 static void
 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_vport *vport = pmb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 
@@ -1408,7 +1408,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_vport *vport = pmb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 
 	switch (mb->mbxStatus) {
 	case 0x0011:
@@ -2279,7 +2279,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
 	if ((mb = phba->sli.mbox_active)) {
-		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
 			mb->context2 = NULL;
 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2288,7 +2288,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
 	spin_lock_irq(&phba->hbalock);
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
-		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
 			mp = (struct lpfc_dmabuf *) (mb->context1);
 			if (mp) {
@@ -2970,7 +2970,7 @@ restart_disc:
 		lpfc_linkdown(phba);
 		lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
 			       phba->cfg_link_speed);
-		initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+		initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
 		initlinkmbox->vport = vport;
 		initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 		rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -3069,7 +3069,7 @@ restart_disc:
 void
 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 	struct lpfc_vport    *vport = pmb->vport;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e9e4a1df898..ff821bb7716 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -108,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
 		return -ENOMEM;
 	}
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	phba->link_state = LPFC_INIT_MBX_CMDS;
 
 	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -221,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
 					mb->mbxCommand, mb->mbxStatus);
 			mb->un.varDmp.word_cnt = 0;
 		}
+		/* dump mem may return a zero when finished or we got a
+		 * mailbox error, either way we are done.
+		 */
+		if (mb->un.varDmp.word_cnt == 0)
+			break;
 		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
 			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -249,7 +254,7 @@ out_free_mbox:
 static void
 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 {
-	if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
+	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
 		phba->temp_sensor_support = 1;
 	else
 		phba->temp_sensor_support = 0;
@@ -276,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	/* character array used for decoding dist type. */
 	char dist_char[] = "nabx";
 
-	if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
+	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
 		mempool_free(pmboxq, phba->mbox_mem_pool);
 		return;
 	}
@@ -284,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	prg = (struct prog_id *) &prog_id_word;
 
 	/* word 7 contain option rom version */
-	prog_id_word = pmboxq->mb.un.varWords[7];
+	prog_id_word = pmboxq->u.mb.un.varWords[7];
 
 	/* Decode the Option rom version word to a readable string */
 	if (prg->dist < 4)
@@ -341,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 		phba->link_state = LPFC_HBA_ERROR;
 		return -ENOMEM;
 	}
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 
 	/* Get login parameters for NID.  */
 	lpfc_read_sparam(phba, pmb, 0);
@@ -476,17 +481,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 					"0352 Config MSI mailbox command "
 					"failed, mbxCmd x%x, mbxStatus x%x\n",
-					pmb->mb.mbxCommand, pmb->mb.mbxStatus);
+					pmb->u.mb.mbxCommand,
+					pmb->u.mb.mbxStatus);
 			mempool_free(pmb, phba->mbox_mem_pool);
 			return -EIO;
 		}
 	}
 
+	spin_lock_irq(&phba->hbalock);
 	/* Initialize ERATT handling flag */
 	phba->hba_flag &= ~HBA_ERATT_HANDLED;
 
 	/* Enable appropriate host interrupts */
-	spin_lock_irq(&phba->hbalock);
 	status = readl(phba->HCregaddr);
 	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
 	if (psli->num_rings > 0)
@@ -2201,7 +2207,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
 	}
 	lpfc_destroy_vport_work_array(phba, vports);
 
-	lpfc_sli_flush_mbox_queue(phba);
+	lpfc_sli_mbox_sys_shutdown(phba);
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 7f5899b70bd..6aeb1c668e2 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -60,7 +60,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
 	MAILBOX_t *mb;
 	void *ctx;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	ctx = pmb->context2;
 
 	/* Setup to dump VPD region */
@@ -92,7 +92,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	MAILBOX_t *mb;
 	void *ctx;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	/* Save context so that we can restore after memset */
 	ctx = pmb->context2;
 
@@ -127,7 +127,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 	MAILBOX_t *mb;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	mb->mbxCommand = MBX_READ_NV;
 	mb->mbxOwner = OWN_HOST;
@@ -153,7 +153,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
 {
 	MAILBOX_t *mb;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
 	mb->un.varCfgAsyncEvent.ring = ring;
@@ -179,7 +179,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 	MAILBOX_t *mb;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	mb->mbxCommand = MBX_HEARTBEAT;
 	mb->mbxOwner = OWN_HOST;
@@ -213,7 +213,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
 	struct lpfc_sli *psli;
 
 	psli = &phba->sli;
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	INIT_LIST_HEAD(&mp->list);
@@ -250,7 +250,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 	MAILBOX_t *mb;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -277,7 +277,7 @@ void
 lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 	struct lpfc_vport  *vport = phba->pport;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	/* NEW_FEATURE
@@ -323,7 +323,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 int
 lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	uint32_t attentionConditions[2];
 
 	/* Sanity check */
@@ -407,7 +407,7 @@ lpfc_init_link(struct lpfc_hba * phba,
 	struct lpfc_sli *psli;
 	MAILBOX_t *mb;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	psli = &phba->sli;
@@ -494,7 +494,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
 	struct lpfc_sli *psli;
 
 	psli = &phba->sli;
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	mb->mbxOwner = OWN_HOST;
@@ -517,7 +517,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
 	mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
 	mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
 	mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
-	mb->un.varRdSparm.vpi = vpi;
+	mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
 
 	/* save address for completion */
 	pmb->context1 = mp;
@@ -546,10 +546,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
 {
 	MAILBOX_t *mb;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	mb->un.varUnregDID.did = did;
+	if (vpi != 0xffff)
+		vpi += phba->vpi_base;
 	mb->un.varUnregDID.vpi = vpi;
 
 	mb->mbxCommand = MBX_UNREG_D_ID;
@@ -575,7 +577,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 	MAILBOX_t *mb;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	mb->mbxCommand = MBX_READ_CONFIG;
@@ -600,7 +602,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 	MAILBOX_t *mb;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -609,7 +611,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 }
 
 /**
- * lpfc_reg_login - Prepare a mailbox command for registering remote login
+ * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
  * @phba: pointer to lpfc hba data structure.
  * @vpi: virtual N_Port identifier.
  * @did: remote port identifier.
@@ -633,17 +635,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
  *    1 - DMA memory allocation failed
  **/
 int
-lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
+lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
 	       uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	uint8_t *sparam;
 	struct lpfc_dmabuf *mp;
 
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	mb->un.varRegLogin.rpi = 0;
-	mb->un.varRegLogin.vpi = vpi;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
+		if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
+			return 1;
+	}
+
+	mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
 	mb->un.varRegLogin.did = did;
 	mb->un.varWords[30] = flag;	/* Set flag to issue action on cmpl */
 
@@ -699,15 +707,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
 {
 	MAILBOX_t *mb;
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	mb->un.varUnregLogin.rpi = (uint16_t) rpi;
 	mb->un.varUnregLogin.rsvd1 = 0;
-	mb->un.varUnregLogin.vpi = vpi;
+	mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
 
 	mb->mbxCommand = MBX_UNREG_LOGIN;
 	mb->mbxOwner = OWN_HOST;
+
 	return;
 }
 
@@ -727,15 +736,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
  * This routine prepares the mailbox command for registering a virtual N_Port.
  **/
 void
-lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
-	     LPFC_MBOXQ_t *pmb)
+lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
-	mb->un.varRegVpi.vpi = vpi;
-	mb->un.varRegVpi.sid = sid;
+	mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
+	mb->un.varRegVpi.sid = vport->fc_myDID;
+	mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
 
 	mb->mbxCommand = MBX_REG_VPI;
 	mb->mbxOwner = OWN_HOST;
@@ -762,10 +771,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
 void
 lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
-	mb->un.varUnregVpi.vpi = vpi;
+	mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
 
 	mb->mbxCommand = MBX_UNREG_VPI;
 	mb->mbxOwner = OWN_HOST;
@@ -854,7 +863,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
 void
 lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	mb->un.varRdRev.cv = 1;
 	mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -947,7 +956,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
 		uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
 {
 	int i;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
 
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1022,7 +1031,7 @@ void
 lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
 {
 	int i;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_sli *psli;
 	struct lpfc_sli_ring *pring;
 
@@ -1077,7 +1086,7 @@ void
 lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	dma_addr_t pdma_addr;
 	uint32_t bar_low, bar_high;
 	size_t offset;
@@ -1101,21 +1110,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
 	/* If HBA supports SLI=3 ask for it */
 
-	if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
+	if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
 		if (phba->cfg_enable_bg)
 			mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
+		mb->un.varCfgPort.cdss = 1; /* Configure Security */
 		mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
 		mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
 		mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
 		mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
 		if (phba->max_vpi && phba->cfg_enable_npiv &&
 		    phba->vpd.sli3Feat.cmv) {
-			mb->un.varCfgPort.max_vpi = phba->max_vpi;
+			mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
 			mb->un.varCfgPort.cmv = 1;
 		} else
 			mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
 	} else
-		phba->sli_rev = 2;
+		phba->sli_rev = LPFC_SLI_REV2;
 	mb->un.varCfgPort.sli_mode = phba->sli_rev;
 
 	/* Now setup pcb */
@@ -1247,7 +1257,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 void
 lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 
 	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
 	mb->mbxCommand = MBX_KILL_BOARD;
@@ -1306,29 +1316,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
 	return mbq;
 }
 
+/**
+ * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the unlocked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+	list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+}
+
 /**
  * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
  * @phba: pointer to lpfc hba data structure.
  * @mbq: pointer to the driver internal queue element for mailbox command.
  *
  * This routine put the completed mailbox command into the mailbox command
- * complete list. This routine is called from driver interrupt handler
- * context.The mailbox complete list is used by the driver worker thread
- * to process mailbox complete callback functions outside the driver interrupt
- * handler.
+ * complete list. This is the locked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
  **/
 void
-lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
 {
 	unsigned long iflag;
 
 	/* This function expects to be called from interrupt context */
 	spin_lock_irqsave(&phba->hbalock, iflag);
-	list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+	__lpfc_mbox_cmpl_put(phba, mbq);
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
 	return;
 }
 
+/**
+ * lpfc_mbox_cmd_check - Check the validality of a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is to check whether a mailbox command is valid to be issued.
+ * This check will be performed by both the mailbox issue API when a client
+ * is to issue a mailbox command to the mailbox transport.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	/* Mailbox command that have a completion handler must also have a
+	 * vport specified.
+	 */
+	if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+	    mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+		if (!mboxq->vport) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+					"1814 Mbox x%x failed, no vport\n",
+					mboxq->u.mb.mbxCommand);
+			dump_stack();
+			return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to check whether the HBA device is ready for posting a
+ * mailbox command. It is used by the mailbox transport API at the time the
+ * to post a mailbox command to the device.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_dev_check(struct lpfc_hba *phba)
+{
+	/* If the PCI channel is in offline state, do not issue mbox */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return -ENODEV;
+
+	/* If the HBA is in error state, do not issue mbox */
+	if (phba->link_state == LPFC_HBA_ERROR)
+		return -ENODEV;
+
+	return 0;
+}
+
 /**
  * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
  * @phba: pointer to lpfc hba data structure.
@@ -1352,6 +1431,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
 	case MBX_WRITE_WWN:     /* 0x98 */
 	case MBX_LOAD_EXP_ROM:	/* 0x9C */
 		return LPFC_MBOX_TMO_FLASH_CMD;
+	case MBX_SLI4_CONFIG:	/* 0x9b */
+		return LPFC_MBOX_SLI4_CONFIG_TMO;
 	}
 	return LPFC_MBOX_TMO;
 }
+
+/**
+ * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ * @phyaddr: physical address for the sge
+ * @length: Length of the sge.
+ *
+ * This routine sets up an entry in the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
+		      dma_addr_t phyaddr, uint32_t length)
+{
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+	nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+				&mbox->u.mqe.un.nembed_cmd;
+	nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
+	nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
+	nembed_sge->sge[sgentry].length = length;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ *
+ * This routine gets an entry from the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
+		      struct lpfc_mbx_sge *sge)
+{
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+	nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+				&mbox->u.mqe.un.nembed_cmd;
+	sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
+	sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
+	sge->length = nembed_sge->sge[sgentry].length;
+}
+
+/**
+ * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ **/
+void
+lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	struct lpfc_mbx_sge sge;
+	dma_addr_t phyaddr;
+	uint32_t sgecount, sgentry;
+
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, just free the mbox command */
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+
+	/* For non-embedded mbox command, we need to free the pages first */
+	sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
+	/* There is nothing we can do if there is no sge address array */
+	if (unlikely(!mbox->sge_array)) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+	/* Each non-embedded DMA memory was allocated in the length of a page */
+	for (sgentry = 0; sgentry < sgecount; sgentry++) {
+		lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
+		phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
+		dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
+				  mbox->sge_array->addr[sgentry], phyaddr);
+	}
+	/* Free the sge address array memory */
+	kfree(mbox->sge_array);
+	/* Finally, free the mailbox command itself */
+	mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_config - Initialize the  SLI4 Config Mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ * @subsystem: The sli4 config sub mailbox subsystem.
+ * @opcode: The sli4 config sub mailbox command opcode.
+ * @length: Length of the sli4 config mailbox command.
+ *
+ * This routine sets up the header fields of SLI4 specific mailbox command
+ * for sending IOCTL command.
+ *
+ * Return: the actual length of the mbox command allocated (mostly useful
+ *         for none embedded mailbox command).
+ **/
+int
+lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+		 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
+{
+	struct lpfc_mbx_sli4_config *sli4_config;
+	union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
+	uint32_t alloc_len;
+	uint32_t resid_len;
+	uint32_t pagen, pcount;
+	void *viraddr;
+	dma_addr_t phyaddr;
+
+	/* Set up SLI4 mailbox command header fields */
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
+
+	/* Set up SLI4 ioctl command header fields */
+	sli4_config = &mbox->u.mqe.un.sli4_config;
+
+	/* Setup for the embedded mbox command */
+	if (emb) {
+		/* Set up main header fields */
+		bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
+		sli4_config->header.cfg_mhdr.payload_length =
+					LPFC_MBX_CMD_HDR_LENGTH + length;
+		/* Set up sub-header fields following main header */
+		bf_set(lpfc_mbox_hdr_opcode,
+			&sli4_config->header.cfg_shdr.request, opcode);
+		bf_set(lpfc_mbox_hdr_subsystem,
+			&sli4_config->header.cfg_shdr.request, subsystem);
+		sli4_config->header.cfg_shdr.request.request_length = length;
+		return length;
+	}
+
+	/* Setup for the none-embedded mbox command */
+	pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
+	pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
+				LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
+	/* Allocate record for keeping SGE virtual addresses */
+	mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+				  GFP_KERNEL);
+	if (!mbox->sge_array)
+		return 0;
+
+	for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
+		/* The DMA memory is always allocated in the length of a
+		 * page even though the last SGE might not fill up to a
+		 * page, this is used as a priori size of PAGE_SIZE for
+		 * the later DMA memory free.
+		 */
+		viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
+					     &phyaddr, GFP_KERNEL);
+		/* In case of malloc fails, proceed with whatever we have */
+		if (!viraddr)
+			break;
+		mbox->sge_array->addr[pagen] = viraddr;
+		/* Keep the first page for later sub-header construction */
+		if (pagen == 0)
+			cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
+		resid_len = length - alloc_len;
+		if (resid_len > PAGE_SIZE) {
+			lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+					      PAGE_SIZE);
+			alloc_len += PAGE_SIZE;
+		} else {
+			lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+					      resid_len);
+			alloc_len = length;
+		}
+	}
+
+	/* Set up main header fields in mailbox command */
+	sli4_config->header.cfg_mhdr.payload_length = alloc_len;
+	bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
+
+	/* Set up sub-header fields into the first page */
+	if (pagen > 0) {
+		bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
+		bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
+		cfg_shdr->request.request_length =
+				alloc_len - sizeof(union  lpfc_sli4_cfg_shdr);
+	}
+	/* The sub-header is in DMA memory, which needs endian converstion */
+	lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
+			      sizeof(union  lpfc_sli4_cfg_shdr));
+
+	return alloc_len;
+}
+
+/**
+ * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine gets the opcode from a SLI4 specific mailbox command for
+ * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
+ * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
+ * returned.
+ **/
+uint8_t
+lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+	if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+		return 0;
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, get opcode from embedded sub-header*/
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+		return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+	}
+
+	/* For non-embedded mbox command, get opcode from first dma page */
+	if (unlikely(!mbox->sge_array))
+		return 0;
+	cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+	return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
+ * @mboxq: pointer to lpfc mbox command.
+ *
+ * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
+ * mailbox command.
+ **/
+void
+lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
+{
+	/* Set up SLI4 mailbox command header fields */
+	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+	bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
+
+	/* Set up host requested features. */
+	bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	/* Virtual fabrics and FIPs are not supported yet. */
+	bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
+
+	/* Enable DIF (block guard) only if configured to do so. */
+	if (phba->cfg_enable_bg)
+		bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	/* Enable NPIV only if configured to do so. */
+	if (phba->max_vpi && phba->cfg_enable_npiv)
+		bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	return;
+}
+
+/**
+ * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: Vport associated with the VF.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
+ * in the context of an FCF. The driver issues this command to setup a VFI
+ * before issuing a FLOGI to login to the VSAN. The driver should also issue a
+ * REG_VFI after a successful VSAN login.
+ **/
+void
+lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+	struct lpfc_mbx_init_vfi *init_vfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	init_vfi = &mbox->u.mqe.un.init_vfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
+	bf_set(lpfc_init_vfi_vr, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vt, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
+	bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
+}
+
+/**
+ * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ * @phys: BDE DMA bus address used to send the service parameters to the HBA.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
+ * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
+ * fabrics identified by VFI in the context of an FCF.
+ **/
+void
+lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
+{
+	struct lpfc_mbx_reg_vfi *reg_vfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	reg_vfi = &mbox->u.mqe.un.reg_vfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
+	bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
+	bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
+	bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
+	bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+	reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+	reg_vfi->bde.addrLow = putPaddrLow(phys);
+	reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+	reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+	bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+}
+
+/**
+ * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vpi: VPI to be initialized.
+ *
+ * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
+ * command to activate a virtual N_Port. The HBA assigns a MAC address to use
+ * with the virtual N Port.  The SLI Host issues this command before issuing a
+ * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
+ * successful virtual NPort login.
+ **/
+void
+lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
+	bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
+}
+
+/**
+ * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vfi: VFI to be unregistered.
+ *
+ * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
+ * (logical NPort) into the inactive state. The SLI Host must have logged out
+ * and unregistered all remote N_Ports to abort any activity on the virtual
+ * fabric. The SLI Port posts the mailbox response after marking the virtual
+ * fabric inactive.
+ **/
+void
+lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
+	bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
+}
+
+/**
+ * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
+ * @phba: pointer to the hba structure containing.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * This function create a SLI4 dump mailbox command to dump FCoE
+ * parameters stored in region 23.
+ **/
+int
+lpfc_dump_fcoe_param(struct lpfc_hba *phba,
+		struct lpfcMboxq *mbox)
+{
+	struct lpfc_dmabuf *mp = NULL;
+	MAILBOX_t *mb;
+
+	memset(mbox, 0, sizeof(*mbox));
+	mb = &mbox->u.mb;
+
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		/* dump_fcoe_param failed to allocate memory */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+			"2569 lpfc_dump_fcoe_param: memory"
+			" allocation failed \n");
+		return 1;
+	}
+
+	memset(mp->virt, 0, LPFC_BPL_SIZE);
+	INIT_LIST_HEAD(&mp->list);
+
+	/* save address for completion */
+	mbox->context1 = (uint8_t *) mp;
+
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
+	mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
+	mb->un.varWords[3] = putPaddrLow(mp->phys);
+	mb->un.varWords[4] = putPaddrHigh(mp->phys);
+	return 0;
+}
+
+/**
+ * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
+ * @phba: pointer to the hba structure containing the FCF index and RQ ID.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
+ * SLI Host uses the command to activate an FCF after it has acquired FCF
+ * information via a READ_FCF mailbox command. This mailbox command also is used
+ * to indicate where received unsolicited frames from this FCF will be sent. By
+ * default this routine will set up the FCF to forward all unsolicited frames
+ * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * more complicated setups.
+ **/
+void
+lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_reg_fcfi *reg_fcfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
+	bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
+	bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
+	/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
+	bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
+		(~phba->fcf.addr_mode) & 0x3);
+	if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
+		bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
+		bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
+	}
+}
+
+/**
+ * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @fcfi: FCFI to be unregistered.
+ *
+ * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
+ * The SLI Host uses the command to inactivate an FCFI.
+ **/
+void
+lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
+	bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
+}
+
+/**
+ * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @ndlp: The nodelist structure that describes the RPI to resume.
+ *
+ * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
+ * link event.
+ **/
+void
+lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_mbx_resume_rpi *resume_rpi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	resume_rpi = &mbox->u.mqe.un.resume_rpi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
+	bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
+	bf_set(lpfc_resume_rpi_vpi, resume_rpi,
+	       ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
+	bf_set(lpfc_resume_rpi_vfi, resume_rpi,
+	       ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
+}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 6ba5a72f604..6efe459e8dd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1192,7 +1192,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
 
 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
 	if ((mb = phba->sli.mbox_active)) {
-		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
 			lpfc_nlp_put(ndlp);
 			mb->context2 = NULL;
@@ -1202,7 +1202,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
 
 	spin_lock_irq(&phba->hbalock);
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
-		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
 			mp = (struct lpfc_dmabuf *) (mb->context1);
 			if (mp) {
@@ -1253,7 +1253,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	uint32_t did  = mb->un.varWords[1];
 
 	if (mb->mbxStatus) {
@@ -1880,11 +1880,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
 			    void *arg, uint32_t evt)
 {
 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
-	MAILBOX_t    *mb = &pmb->mb;
+	MAILBOX_t    *mb = &pmb->u.mb;
 
-	if (!mb->mbxStatus)
+	if (!mb->mbxStatus) {
 		ndlp->nlp_rpi = mb->un.varWords[0];
-	else {
+		ndlp->nlp_flag |= NLP_RPI_VALID;
+	} else {
 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
 			lpfc_drop_node(vport, ndlp);
 			return NLP_STE_FREED_NODE;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index cf42ada3ffc..b53af993628 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -43,24 +43,7 @@
 #include "lpfc_logmsg.h"
 #include "lpfc_compat.h"
 #include "lpfc_debugfs.h"
-
-/*
- * Define macro to log: Mailbox command x%x cannot issue Data
- * This allows multiple uses of lpfc_msgBlk0311
- * w/o perturbing log msg utility.
- */
-#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
-			lpfc_printf_log(phba, \
-				KERN_INFO, \
-				LOG_MBOX | LOG_SLI, \
-				"(%d):0311 Mailbox command x%x cannot " \
-				"issue Data: x%x x%x x%x\n", \
-				pmbox->vport ? pmbox->vport->vpi : 0, \
-				pmbox->mb.mbxCommand,		\
-				phba->pport->port_state,	\
-				psli->sli_flag,	\
-				flag)
-
+#include "lpfc_vport.h"
 
 /* There are only four IOCB completion types. */
 typedef enum _lpfc_iocb_type {
@@ -843,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!pmb)
 		return -ENOMEM;
-	pmbox = &pmb->mb;
+	pmbox = &pmb->u.mb;
 	phba->link_state = LPFC_INIT_MBX_CMDS;
 	for (i = 0; i < psli->num_rings; i++) {
 		lpfc_config_ring(phba, i, pmb);
@@ -1652,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
 	case MBX_HEARTBEAT:
 	case MBX_PORT_CAPABILITIES:
 	case MBX_PORT_IOV_CONTROL:
+	case MBX_SLI4_CONFIG:
+	case MBX_SLI4_REQ_FTRS:
+	case MBX_REG_FCFI:
+	case MBX_UNREG_FCFI:
+	case MBX_REG_VFI:
+	case MBX_UNREG_VFI:
+	case MBX_INIT_VPI:
+	case MBX_INIT_VFI:
+	case MBX_RESUME_RPI:
 		ret = mbxCommand;
 		break;
 	default:
@@ -1672,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
  * will wake up thread waiting on the wait queue pointed by context1
  * of the mailbox.
  **/
-static void
+void
 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
 	wait_queue_head_t *pdone_q;
@@ -1706,7 +1698,7 @@ void
 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_dmabuf *mp;
-	uint16_t rpi;
+	uint16_t rpi, vpi;
 	int rc;
 
 	mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1716,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		kfree(mp);
 	}
 
+	if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
+	    (phba->sli_rev == LPFC_SLI_REV4))
+		lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
+
 	/*
 	 * If a REG_LOGIN succeeded  after node is destroyed or node
 	 * is in re-discovery driver need to cleanup the RPI.
 	 */
 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
-	    pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
-	    !pmb->mb.mbxStatus) {
-
-		rpi = pmb->mb.un.varWords[0];
-		lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
+	    pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
+	    !pmb->u.mb.mbxStatus) {
+		rpi = pmb->u.mb.un.varWords[0];
+		vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
+		lpfc_unreg_login(phba, vpi, rpi, pmb);
 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 		if (rc != MBX_NOT_FINISHED)
 			return;
 	}
 
-	mempool_free(pmb, phba->mbox_mem_pool);
-	return;
+	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
+		lpfc_sli4_mbox_cmd_free(phba, pmb);
+	else
+		mempool_free(pmb, phba->mbox_mem_pool);
 }
 
 /**
@@ -1770,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
 		if (pmb == NULL)
 			break;
 
-		pmbox = &pmb->mb;
+		pmbox = &pmb->u.mb;
 
 		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
 			if (pmb->vport) {
@@ -1799,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
 			/* Unknow mailbox command compl */
 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
 					"(%d):0323 Unknown Mailbox command "
-					"%x Cmpl\n",
+					"x%x (x%x) Cmpl\n",
 					pmb->vport ? pmb->vport->vpi : 0,
-					pmbox->mbxCommand);
+					pmbox->mbxCommand,
+					lpfc_sli4_mbox_opcode_get(phba, pmb));
 			phba->link_state = LPFC_HBA_ERROR;
 			phba->work_hs = HS_FFER3;
 			lpfc_handle_eratt(phba);
@@ -1816,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
 						LOG_MBOX | LOG_SLI,
 						"(%d):0305 Mbox cmd cmpl "
 						"error - RETRYing Data: x%x "
-						"x%x x%x x%x\n",
+						"(x%x) x%x x%x x%x\n",
 						pmb->vport ? pmb->vport->vpi :0,
 						pmbox->mbxCommand,
+						lpfc_sli4_mbox_opcode_get(phba,
+									  pmb),
 						pmbox->mbxStatus,
 						pmbox->un.varWords[0],
 						pmb->vport->port_state);
 				pmbox->mbxStatus = 0;
 				pmbox->mbxOwner = OWN_HOST;
-				spin_lock_irq(&phba->hbalock);
-				phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-				spin_unlock_irq(&phba->hbalock);
 				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-				if (rc == MBX_SUCCESS)
+				if (rc != MBX_NOT_FINISHED)
 					continue;
 			}
 		}
 
 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
-				"(%d):0307 Mailbox cmd x%x Cmpl x%p "
+				"(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
 				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
 				pmb->vport ? pmb->vport->vpi : 0,
 				pmbox->mbxCommand,
+				lpfc_sli4_mbox_opcode_get(phba, pmb),
 				pmb->mbox_cmpl,
 				*((uint32_t *) pmbox),
 				pmbox->un.varWords[0],
@@ -3377,10 +3376,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
 	}
 	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	psli->mbox_active = NULL;
 	phba->link_flag &= ~LS_IGNORE_ERATT;
 	spin_unlock_irq(&phba->hbalock);
 
-	psli->mbox_active = NULL;
 	lpfc_hba_down_post(phba);
 	phba->link_state = LPFC_HBA_ERROR;
 
@@ -3790,7 +3789,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
 	if (!pmb)
 		return -ENOMEM;
 
-	pmbox = &pmb->mb;
+	pmbox = &pmb->u.mb;
 
 	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
 	phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -3917,33 +3916,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0442 Adapter failed to init, mbxCmd x%x "
 				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
-				pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0);
+				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
 			spin_lock_irq(&phba->hbalock);
-			phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
+			phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
 			spin_unlock_irq(&phba->hbalock);
 			rc = -ENXIO;
-		} else
+		} else {
+			/* Allow asynchronous mailbox command to go through */
+			spin_lock_irq(&phba->hbalock);
+			phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+			spin_unlock_irq(&phba->hbalock);
 			done = 1;
+		}
 	}
 	if (!done) {
 		rc = -EINVAL;
 		goto do_prep_failed;
 	}
-	if (pmb->mb.un.varCfgPort.sli_mode == 3) {
-		if (!pmb->mb.un.varCfgPort.cMA) {
+	if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
+		if (!pmb->u.mb.un.varCfgPort.cMA) {
 			rc = -ENXIO;
 			goto do_prep_failed;
 		}
-		if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) {
+		if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
 			phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
-			phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi;
+			phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
+			phba->max_vports = (phba->max_vpi > phba->max_vports) ?
+				phba->max_vpi : phba->max_vports;
+
 		} else
 			phba->max_vpi = 0;
-		if (pmb->mb.un.varCfgPort.gerbm)
+		if (pmb->u.mb.un.varCfgPort.gdss)
+			phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
+		if (pmb->u.mb.un.varCfgPort.gerbm)
 			phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
-		if (pmb->mb.un.varCfgPort.gcrp)
+		if (pmb->u.mb.un.varCfgPort.gcrp)
 			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
-		if (pmb->mb.un.varCfgPort.ginb) {
+		if (pmb->u.mb.un.varCfgPort.ginb) {
 			phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
 			phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
 			phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3959,7 +3968,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
 		}
 
 		if (phba->cfg_enable_bg) {
-			if (pmb->mb.un.varCfgPort.gbg)
+			if (pmb->u.mb.un.varCfgPort.gbg)
 				phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
 			else
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4054,8 +4063,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
 		if (rc)
 			goto lpfc_sli_hba_setup_error;
 	}
-
+	spin_lock_irq(&phba->hbalock);
 	phba->sli.sli_flag |= LPFC_PROCESS_LA;
+	spin_unlock_irq(&phba->hbalock);
 
 	rc = lpfc_config_port_post(phba);
 	if (rc)
@@ -4596,7 +4606,7 @@ void
 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
 {
 	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
-	MAILBOX_t *mb = &pmbox->mb;
+	MAILBOX_t *mb = &pmbox->u.mb;
 	struct lpfc_sli *psli = &phba->sli;
 	struct lpfc_sli_ring *pring;
 
@@ -6413,6 +6423,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
 	return 1;
 }
 
+/**
+ * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine flushes the mailbox command subsystem. It will unconditionally
+ * flush all the mailbox commands in the three possible stages in the mailbox
+ * command sub-system: pending mailbox command queue; the outstanding mailbox
+ * command; and completed mailbox command queue. It is caller's responsibility
+ * to make sure that the driver is in the proper state to flush the mailbox
+ * command sub-system. Namely, the posting of mailbox commands into the
+ * pending mailbox command queue from the various clients must be stopped;
+ * either the HBA is in a state that it will never works on the outstanding
+ * mailbox command (such as in EEH or ERATT conditions) or the outstanding
+ * mailbox command has been completed.
+ **/
+static void
+lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *pmb;
+	unsigned long iflag;
+
+	/* Flush all the mailbox commands in the mbox system */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	/* The pending mailbox command queue */
+	list_splice_init(&phba->sli.mboxq, &completions);
+	/* The outstanding active mailbox command */
+	if (psli->mbox_active) {
+		list_add_tail(&psli->mbox_active->list, &completions);
+		psli->mbox_active = NULL;
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	}
+	/* The completed mailbox command queue */
+	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
+	while (!list_empty(&completions)) {
+		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
+		pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		if (pmb->mbox_cmpl)
+			pmb->mbox_cmpl(phba, pmb);
+	}
+}
+
 /**
  * lpfc_sli_host_down - Vport cleanup function
  * @vport: Pointer to virtual port object.
@@ -6506,9 +6562,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
 	struct lpfc_sli *psli = &phba->sli;
 	struct lpfc_sli_ring *pring;
 	struct lpfc_dmabuf *buf_ptr;
-	LPFC_MBOXQ_t *pmb;
-	int i;
 	unsigned long flags = 0;
+	int i;
+
+	/* Shutdown the mailbox command sub-system */
+	lpfc_sli_mbox_sys_shutdown(phba);
 
 	lpfc_hba_down_prep(phba);
 
@@ -7773,7 +7831,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 
 		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
 			pmb = phba->sli.mbox_active;
-			pmbox = &pmb->mb;
+			pmbox = &pmb->u.mb;
 			mbox = phba->mbox;
 			vport = pmb->vport;
 
@@ -8169,6 +8227,183 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
 	}
 }
 
+/**
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with asynchrous
+ * event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflags;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0392 Async Event: word0:x%x, word1:x%x, "
+			"word2:x%x, word3:x%x\n", mcqe->word0,
+			mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
+
+	/* Allocate a new internal CQ_EVENT entry */
+	cq_event = lpfc_sli4_cq_event_alloc(phba);
+	if (!cq_event) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0394 Failed to allocate CQ_EVENT entry\n");
+		return false;
+	}
+
+	/* Move the CQE into an asynchronous event entry */
+	memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
+	/* Set the async event flag */
+	phba->hba_flag |= ASYNC_EVENT;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return true;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with mailbox
+ * completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+	uint32_t mcqe_status;
+	MAILBOX_t *mbox, *pmbox;
+	struct lpfc_mqe *mqe;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *mp;
+	unsigned long iflags;
+	LPFC_MBOXQ_t *pmb;
+	bool workposted = false;
+	int rc;
+
+	/* If not a mailbox complete MCQE, out by checking mailbox consume */
+	if (!bf_get(lpfc_trailer_completed, mcqe))
+		goto out_no_mqe_complete;
+
+	/* Get the reference to the active mbox command */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	pmb = phba->sli.mbox_active;
+	if (unlikely(!pmb)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"1832 No pending MBOX command to handle\n");
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		goto out_no_mqe_complete;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	mqe = &pmb->u.mqe;
+	pmbox = (MAILBOX_t *)&pmb->u.mqe;
+	mbox = phba->mbox;
+	vport = pmb->vport;
+
+	/* Reset heartbeat timer */
+	phba->last_completion_time = jiffies;
+	del_timer(&phba->sli.mbox_tmo);
+
+	/* Move mbox data to caller's mailbox region, do endian swapping */
+	if (pmb->mbox_cmpl && mbox)
+		lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
+	/* Set the mailbox status with SLI4 range 0x4000 */
+	mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
+	if (mcqe_status != MB_CQE_STATUS_SUCCESS)
+		bf_set(lpfc_mqe_status, mqe,
+		       (LPFC_MBX_ERROR_RANGE | mcqe_status));
+
+	if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+		pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
+				      "MBOX dflt rpi: status:x%x rpi:x%x",
+				      mcqe_status,
+				      pmbox->un.varWords[0], 0);
+		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
+			mp = (struct lpfc_dmabuf *)(pmb->context1);
+			ndlp = (struct lpfc_nodelist *)pmb->context2;
+			/* Reg_LOGIN of dflt RPI was successful. Now lets get
+			 * RID of the PPI using the same mbox buffer.
+			 */
+			lpfc_unreg_login(phba, vport->vpi,
+					 pmbox->un.varWords[0], pmb);
+			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+			pmb->context1 = mp;
+			pmb->context2 = ndlp;
+			pmb->vport = vport;
+			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+			if (rc != MBX_BUSY)
+				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+						LOG_SLI, "0385 rc should "
+						"have been MBX_BUSY\n");
+			if (rc != MBX_NOT_FINISHED)
+				goto send_current_mbox;
+		}
+	}
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+	/* There is mailbox completion work to do */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_mbox_cmpl_put(phba, pmb);
+	phba->work_ha |= HA_MBATT;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	workposted = true;
+
+send_current_mbox:
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	/* Release the mailbox command posting token */
+	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	/* Setting active mailbox pointer need to be in sync to flag clear */
+	phba->sli.mbox_active = NULL;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	/* Wake up worker thread to post the next pending mailbox command */
+	lpfc_worker_wake_up(phba);
+out_no_mqe_complete:
+	if (bf_get(lpfc_trailer_consumed, mcqe))
+		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry, it invokes the
+ * proper mailbox complete handling or asynchrous event handling routine
+ * according to the MCQE's async bit.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+{
+	struct lpfc_mcqe mcqe;
+	bool workposted;
+
+	/* Copy the mailbox MCQE and convert endian order as needed */
+	lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
+
+	/* Invoke the proper event handling routine */
+	if (!bf_get(lpfc_trailer_async, &mcqe))
+		workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
+	else
+		workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
+	return workposted;
+}
+
 /**
  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
  * @phba: Pointer to HBA context object.
@@ -9246,6 +9481,112 @@ out:
 	return status;
 }
 
+/**
+ * lpfc_mq_create - Create a mailbox Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ *
+ * This function creates a mailbox queue, as detailed in @mq, on a port,
+ * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the MQ_CREATE mailbox command to the HBA to setup the
+ * mailbox queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
+	       struct lpfc_queue *cq, uint32_t subtype)
+{
+	struct lpfc_mbx_mq_create *mq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_mq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_MQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	mq_create = &mbox->u.mqe.un.mq_create;
+	bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+		    mq->page_count);
+	bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+		    cq->queue_id);
+	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+	switch (mq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0362 Unsupported MQ count. (%d)\n",
+				mq->entry_count);
+		if (mq->entry_count < 16)
+			return -EINVAL;
+		/* otherwise default to smallest count (drop through) */
+	case 16:
+		bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+		       LPFC_MQ_CNT_16);
+		break;
+	case 32:
+		bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+		       LPFC_MQ_CNT_32);
+		break;
+	case 64:
+		bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+		       LPFC_MQ_CNT_64);
+		break;
+	case 128:
+		bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+		       LPFC_MQ_CNT_128);
+		break;
+	}
+	list_for_each_entry(dmabuf, &mq->page_list, list) {
+		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2502 MQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
+	if (mq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	mq->type = LPFC_MQ;
+	mq->subtype = subtype;
+	mq->host_index = 0;
+	mq->hba_index = 0;
+
+	/* link the mq onto the parent cq child list */
+	list_add_tail(&mq->list, &cq->child_list);
+out:
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
 /**
  * lpfc_wq_create - Create a Work Queue on the HBA
  * @phba: HBA structure that indicates port to create a queue on.
@@ -9614,6 +9955,60 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
 	return status;
 }
 
+/**
+ * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
+ * @qm: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @mq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @mq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (!mq)
+		return -ENODEV;
+	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_mq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_MQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
+	       mq->queue_id);
+	mbox->vport = mq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2507 MQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	/* Remove mq from any list */
+	list_del_init(&mq->list);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, mq->phba->mbox_mem_pool);
+	return status;
+}
+
 /**
  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
  * @wq: The queue structure associated with the queue to destroy.
-- 
cgit v1.2.3-70-g09d2


From 6fb120a7ed882aae9636545142a51cf3182a3ace Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Fri, 22 May 2009 14:52:59 -0400
Subject: [SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - FCOE Discovery
 support

SLI4 supports both FC and FCOE, with some extended topology objects.
This patch adss support for the objects, and updates the disovery
engines for their use.

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc_attr.c      |   32 +-
 drivers/scsi/lpfc/lpfc_crtn.h      |    3 +
 drivers/scsi/lpfc/lpfc_els.c       |  179 +++++-
 drivers/scsi/lpfc/lpfc_hbadisc.c   | 1055 +++++++++++++++++++++++++++++++++++-
 drivers/scsi/lpfc/lpfc_nportdisc.c |   34 +-
 drivers/scsi/lpfc/lpfc_sli.c       |  517 ++++++++++++++++++
 6 files changed, 1756 insertions(+), 64 deletions(-)

(limited to 'drivers/scsi/lpfc')

diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 463104d9686..270a4c6cd3a 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2924,6 +2924,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
 */
 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 
+/*
+# lpfc_enable_fip: When set, FIP is required to start discovery. If not
+# set, the driver will add an FCF record manually if the port has no
+# FCF records available and start discovery.
+# Value range is [0,1]. Default value is 1 (enabled)
+*/
+LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
+
 
 /*
 # lpfc_prot_mask: i
@@ -2990,6 +2998,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_peer_port_login,
 	&dev_attr_lpfc_nodev_tmo,
 	&dev_attr_lpfc_devloss_tmo,
+	&dev_attr_lpfc_enable_fip,
 	&dev_attr_lpfc_fcp_class,
 	&dev_attr_lpfc_use_adisc,
 	&dev_attr_lpfc_ack0,
@@ -3042,6 +3051,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
 	&dev_attr_lpfc_lun_queue_depth,
 	&dev_attr_lpfc_nodev_tmo,
 	&dev_attr_lpfc_devloss_tmo,
+	&dev_attr_lpfc_enable_fip,
 	&dev_attr_lpfc_hba_queue_depth,
 	&dev_attr_lpfc_peer_port_login,
 	&dev_attr_lpfc_restrict_login,
@@ -4167,26 +4177,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	phba->cfg_soft_wwpn = 0L;
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
 	lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
-	/*
-	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
-	 * used to create the sg_dma_buf_pool must be dynamically calculated.
-	 * 2 segments are added since the IOCB needs a command and response bde.
-	 */
-	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-			sizeof(struct fcp_rsp) +
-			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
-
-	if (phba->cfg_enable_bg) {
-		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
-		phba->cfg_sg_dma_buf_size +=
-			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
-	}
-
-	/* Also reinitialize the host templates with new values. */
-	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-
 	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
+	lpfc_enable_fip_init(phba, lpfc_enable_fip);
+	lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
+
 	return;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index e0f1cd4b3d3..d2a922997c0 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
 struct fc_rport;
 void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
 void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
@@ -108,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
 int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
 int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
 int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
 int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2c034a554c8..a3b56d7f72f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -387,6 +387,75 @@ fail:
 	return -ENXIO;
 }
 
+/**
+ * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for FCoE only.
+ *
+ * Return code
+ *   0 - successfully issued REG_VFI for @vport
+ *   A failure code otherwise.
+ **/
+static int
+lpfc_issue_reg_vfi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_nodelist *ndlp;
+	struct serv_parm *sp;
+	struct lpfc_dmabuf *dmabuf;
+	int rc = 0;
+
+	sp = &phba->fc_fabparam;
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+	if (!dmabuf->virt) {
+		rc = -ENOMEM;
+		goto fail_free_dmabuf;
+	}
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		rc = -ENOMEM;
+		goto fail_free_coherent;
+	}
+	vport->port_state = LPFC_FABRIC_CFG_LINK;
+	memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
+	lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
+	mboxq->vport = vport;
+	mboxq->context1 = dmabuf;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		rc = -ENXIO;
+		goto fail_free_mbox;
+	}
+	return 0;
+
+fail_free_mbox:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+fail_free_coherent:
+	lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+fail_free_dmabuf:
+	kfree(dmabuf);
+fail:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+		"0289 Issue Register VFI failed: Err %d\n", rc);
+	return rc;
+}
+
 /**
  * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
  * @vport: pointer to a host virtual N_Port data structure.
@@ -499,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		}
 	}
 
-	lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
-
-	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
-	    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
-		lpfc_register_new_vport(phba, vport, ndlp);
-		return 0;
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
+		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+			lpfc_register_new_vport(phba, vport, ndlp);
+		else
+			lpfc_issue_fabric_reglogin(vport);
+	} else {
+		ndlp->nlp_type |= NLP_FABRIC;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+		if (vport->vfi_state & LPFC_VFI_REGISTERED) {
+			lpfc_start_fdiscs(phba);
+			lpfc_do_scr_ns_plogi(phba, vport);
+		} else
+			lpfc_issue_reg_vfi(vport);
 	}
-	lpfc_issue_fabric_reglogin(vport);
 	return 0;
 }
-
 /**
  * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
  * @vport: pointer to a host virtual N_Port data structure.
@@ -817,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (sp->cmn.fcphHigh < FC_PH3)
 		sp->cmn.fcphHigh = FC_PH3;
 
-	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+	if  (phba->sli_rev == LPFC_SLI_REV4) {
+		elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
+		elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
+		/* FLOGI needs to be 3 for WQE FCFI */
+		/* Set the fcfi to the fcfi we registered with */
+		elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+	} else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
 		sp->cmn.request_multiple_Nport = 1;
-
 		/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
 		icmd->ulpCt_h = 1;
 		icmd->ulpCt_l = 0;
@@ -932,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
 		if (!ndlp)
 			return 0;
 		lpfc_nlp_init(vport, ndlp, Fabric_DID);
+		/* Set the node type */
+		ndlp->nlp_type |= NLP_FABRIC;
 		/* Put ndlp onto node list */
 		lpfc_enqueue_node(vport, ndlp);
 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1604,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
 	 * and continue discovery.
 	 */
 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
-	    !(vport->fc_flag & FC_RSCN_MODE)) {
+	    !(vport->fc_flag & FC_RSCN_MODE) &&
+	    (phba->sli_rev < LPFC_SLI_REV4)) {
 		lpfc_issue_reg_vpi(phba, vport);
 		return;
 	}
@@ -2937,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 
+	/*
+	 * This routine is used to register and unregister in previous SLI
+	 * modes.
+	 */
+	if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
+	    (phba->sli_rev == LPFC_SLI_REV4))
+		lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
+
 	pmb->context1 = NULL;
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
@@ -3816,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
 			payload_len -= sizeof(uint32_t);
 			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
 			case RSCN_ADDRESS_FORMAT_PORT:
-				if (ns_did.un.word == rscn_did.un.word)
+				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+				    && (ns_did.un.b.area == rscn_did.un.b.area)
+				    && (ns_did.un.b.id == rscn_did.un.b.id))
 					goto return_did_out;
 				break;
 			case RSCN_ADDRESS_FORMAT_AREA:
@@ -4857,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		} else {
 			/* FAN verified - skip FLOGI */
 			vport->fc_myDID = vport->fc_prevDID;
-			lpfc_issue_fabric_reglogin(vport);
+			if (phba->sli_rev < LPFC_SLI_REV4)
+				lpfc_issue_fabric_reglogin(vport);
+			else
+				lpfc_issue_reg_vfi(vport);
 		}
 	}
 	return 0;
@@ -5540,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
 dropit:
 	if (vport && !(vport->load_flag & FC_UNLOADING))
-		lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
-			"(%d):0111 Dropping received ELS cmd "
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			"0111 Dropping received ELS cmd "
 			"Data: x%x x%x x%x\n",
-			vport->vpi, icmd->ulpStatus,
-			icmd->un.ulpWord[4], icmd->ulpTimeout);
+			icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
 	phba->fc_stat.elsRcvDrop++;
 }
 
@@ -5620,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
 		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
 			vport = phba->pport;
-		else {
-			uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
-			vport = lpfc_find_vport_by_vpid(phba, vpi);
-		}
+		else
+			vport = lpfc_find_vport_by_vpid(phba,
+				icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
 	}
 	/* If there are no BDEs associated
 	 * with this IOCB, there is nothing to do.
@@ -5792,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
 	} else {
 		if (vport == phba->pport)
-			lpfc_issue_fabric_reglogin(vport);
+			if (phba->sli_rev < LPFC_SLI_REV4)
+				lpfc_issue_fabric_reglogin(vport);
+			else
+				lpfc_issue_reg_vfi(vport);
 		else
 			lpfc_do_scr_ns_plogi(phba, vport);
 	}
@@ -5824,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
 
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (mbox) {
-		lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
+		lpfc_reg_vpi(vport, mbox);
 		mbox->vport = vport;
 		mbox->context2 = lpfc_nlp_get(ndlp);
 		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6496,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
 			      IOERR_SLI_ABORTED);
 }
+
+/**
+ * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the els xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 slow-path
+ * ELS aborted xri.
+ **/
+void
+lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
+			  struct sli4_wcqe_xri_aborted *axri)
+{
+	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+	list_for_each_entry_safe(sglq_entry, sglq_next,
+			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+		if (sglq_entry->sli4_xritag == xri) {
+			list_del(&sglq_entry->list);
+			spin_unlock_irqrestore(
+					&phba->sli4_hba.abts_sgl_list_lock,
+					 iflag);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+
+			list_add_tail(&sglq_entry->list,
+				&phba->sli4_hba.lpfc_sgl_list);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2270d9a7c8e..9bd7a8927a3 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -275,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
 	    (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+
+	lpfc_unregister_unused_fcf(phba);
 }
 
 /**
@@ -297,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
 
 	ret = kzalloc(sizeof(struct lpfc_fast_path_event),
 			GFP_ATOMIC);
-	if (ret)
+	if (ret) {
 		atomic_inc(&phba->fast_event_count);
-	INIT_LIST_HEAD(&ret->work_evt.evt_listp);
-	ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+		INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+		ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+	}
 	return ret;
 }
 
@@ -741,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
 	if (phba->link_state == LPFC_LINK_DOWN)
 		return 0;
 	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
 	if (phba->link_state > LPFC_LINK_DOWN) {
 		phba->link_state = LPFC_LINK_DOWN;
 		phba->pport->fc_flag &= ~FC_LBIT;
@@ -748,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
 	spin_unlock_irq(&phba->hbalock);
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			/* Issue a LINK DOWN event to all nodes */
 			lpfc_linkdown_port(vports[i]);
 		}
@@ -858,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
 
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
 			lpfc_linkup_port(vports[i]);
 	lpfc_destroy_vport_work_array(phba, vports);
-	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+	    (phba->sli_rev < LPFC_SLI_REV4))
 		lpfc_issue_clear_la(phba, phba->pport);
 
 	return 0;
@@ -983,10 +988,593 @@ out:
 	return;
 }
 
+static void
+lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+	unsigned long flags;
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "2017 REG_FCFI mbxStatus error x%x "
+			 "HBA state x%x\n",
+			 mboxq->u.mb.mbxStatus, vport->port_state);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		return;
+	}
+
+	/* Start FCoE discovery by sending a FLOGI. */
+	phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
+	/* Set the FCFI registered flag */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	phba->fcf.fcf_flag |= FCF_REGISTERED;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	if (vport->port_state != LPFC_FLOGI) {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_initial_flogi(vport);
+	}
+
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_fab_name_match - Check if the fcf fabric name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's fabric name with provided
+ * fabric name. If the fabric name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
+{
+	if ((fab_name[0] ==
+		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
+	    (fab_name[1] ==
+		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
+	    (fab_name[2] ==
+		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
+	    (fab_name[3] ==
+		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
+	    (fab_name[4] ==
+		bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
+	    (fab_name[5] ==
+		bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
+	    (fab_name[6] ==
+		bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
+	    (fab_name[7] ==
+		bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * lpfc_mac_addr_match - Check if the fcf mac address match.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's mac address with HBA's
+ * FCF mac address. If the mac addresses are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+{
+	if ((phba->fcf.mac_addr[0] ==
+		bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
+	    (phba->fcf.mac_addr[1] ==
+		bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
+	    (phba->fcf.mac_addr[2] ==
+		bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
+	    (phba->fcf.mac_addr[3] ==
+		bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
+	    (phba->fcf.mac_addr[4] ==
+		bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
+	    (phba->fcf.mac_addr[5] ==
+		bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine copies the FCF information from the FCF
+ * record to lpfc_hba data structure.
+ **/
+static void
+lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+{
+	phba->fcf.fabric_name[0] =
+		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
+	phba->fcf.fabric_name[1] =
+		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
+	phba->fcf.fabric_name[2] =
+		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
+	phba->fcf.fabric_name[3] =
+		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
+	phba->fcf.fabric_name[4] =
+		bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
+	phba->fcf.fabric_name[5] =
+		bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
+	phba->fcf.fabric_name[6] =
+		bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
+	phba->fcf.fabric_name[7] =
+		bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
+	phba->fcf.mac_addr[0] =
+		bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
+	phba->fcf.mac_addr[1] =
+		bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
+	phba->fcf.mac_addr[2] =
+		bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
+	phba->fcf.mac_addr[3] =
+		bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
+	phba->fcf.mac_addr[4] =
+		bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
+	phba->fcf.mac_addr[5] =
+		bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
+	phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+	phba->fcf.priority = new_fcf_record->fip_priority;
+}
+
+/**
+ * lpfc_register_fcf - Register the FCF with hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues a register fcfi mailbox command to register
+ * the fcf with HBA.
+ **/
+static void
+lpfc_register_fcf(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *fcf_mbxq;
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+
+	/* If the FCF is not availabe do nothing. */
+	if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return;
+	}
+
+	/* The FCF is already registered, start discovery */
+	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
+		phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		if (phba->pport->port_state != LPFC_FLOGI)
+			lpfc_initial_flogi(phba->pport);
+		return;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
+		GFP_KERNEL);
+	if (!fcf_mbxq)
+		return;
+
+	lpfc_reg_fcfi(phba, fcf_mbxq);
+	fcf_mbxq->vport = phba->pport;
+	fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
+	rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		mempool_free(fcf_mbxq, phba->mbox_mem_pool);
+
+	return;
+}
+
+/**
+ * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ * @boot_flag: Indicates if this record used by boot bios.
+ * @addr_mode: The address mode to be used by this FCF
+ *
+ * This routine compare the fcf record with connect list obtained from the
+ * config region to decide if this FCF can be used for SAN discovery. It returns
+ * 1 if this record can be used for SAN discovery else return zero. If this FCF
+ * record can be used for SAN discovery, the boot_flag will indicate if this FCF
+ * is used by boot bios and addr_mode will indicate the addressing mode to be
+ * used for this FCF when the function returns.
+ * If the FCF record need to be used with a particular vlan id, the vlan is
+ * set in the vlan_id on return of the function. If not VLAN tagging need to
+ * be used with the FCF vlan_id will be set to 0xFFFF;
+ **/
+static int
+lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
+			struct fcf_record *new_fcf_record,
+			uint32_t *boot_flag, uint32_t *addr_mode,
+			uint16_t *vlan_id)
+{
+	struct lpfc_fcf_conn_entry *conn_entry;
+
+	if (!phba->cfg_enable_fip) {
+		*boot_flag = 0;
+		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+				new_fcf_record);
+		if (phba->valid_vlan)
+			*vlan_id = phba->vlan_id;
+		else
+			*vlan_id = 0xFFFF;
+		return 1;
+	}
+
+	/*
+	 * If there are no FCF connection table entry, driver connect to all
+	 * FCFs.
+	 */
+	if (list_empty(&phba->fcf_conn_rec_list)) {
+		*boot_flag = 0;
+		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+			new_fcf_record);
+		*vlan_id = 0xFFFF;
+		return 1;
+	}
+
+	list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
+		if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
+			continue;
+
+		if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
+			!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
+				new_fcf_record))
+			continue;
+
+		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
+			/*
+			 * If the vlan bit map does not have the bit set for the
+			 * vlan id to be used, then it is not a match.
+			 */
+			if (!(new_fcf_record->vlan_bitmap
+				[conn_entry->conn_rec.vlan_tag / 8] &
+				(1 << (conn_entry->conn_rec.vlan_tag % 8))))
+				continue;
+		}
+
+		/*
+		 * Check if the connection record specifies a required
+		 * addressing mode.
+		 */
+		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
+
+			/*
+			 * If SPMA required but FCF not support this continue.
+			 */
+			if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+				!(bf_get(lpfc_fcf_record_mac_addr_prov,
+					new_fcf_record) & LPFC_FCF_SPMA))
+				continue;
+
+			/*
+			 * If FPMA required but FCF not support this continue.
+			 */
+			if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+				!(bf_get(lpfc_fcf_record_mac_addr_prov,
+				new_fcf_record) & LPFC_FCF_FPMA))
+				continue;
+		}
+
+		/*
+		 * This fcf record matches filtering criteria.
+		 */
+		if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
+			*boot_flag = 1;
+		else
+			*boot_flag = 0;
+
+		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+				new_fcf_record);
+		/*
+		 * If the user specified a required address mode, assign that
+		 * address mode
+		 */
+		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			(!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
+			*addr_mode = (conn_entry->conn_rec.flags &
+				FCFCNCT_AM_SPMA) ?
+				LPFC_FCF_SPMA : LPFC_FCF_FPMA;
+		/*
+		 * If the user specified a prefered address mode, use the
+		 * addr mode only if FCF support the addr_mode.
+		 */
+		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+			(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+			(*addr_mode & LPFC_FCF_SPMA))
+				*addr_mode = LPFC_FCF_SPMA;
+		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+			!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+			(*addr_mode & LPFC_FCF_FPMA))
+				*addr_mode = LPFC_FCF_FPMA;
+		/*
+		 * If user did not specify any addressing mode, use FPMA if
+		 * possible else use SPMA.
+		 */
+		else if (*addr_mode & LPFC_FCF_FPMA)
+			*addr_mode = LPFC_FCF_FPMA;
+
+		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
+			*vlan_id = conn_entry->conn_rec.vlan_tag;
+		else
+			*vlan_id = 0xFFFF;
+
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This function iterate through all the fcf records available in
+ * HBA and choose the optimal FCF record for discovery. After finding
+ * the FCF for discovery it register the FCF record and kick start
+ * discovery.
+ * If FCF_IN_USE flag is set in currently used FCF, the routine try to
+ * use a FCF record which match fabric name and mac address of the
+ * currently used FCF record.
+ * If the driver support only one FCF, it will try to use the FCF record
+ * used by BOOT_BIOS.
+ */
+void
+lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	void *virt_addr;
+	dma_addr_t phys_addr;
+	uint8_t *bytep;
+	struct lpfc_mbx_sge sge;
+	struct lpfc_mbx_read_fcf_tbl *read_fcf;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	struct fcf_record *new_fcf_record;
+	int rc;
+	uint32_t boot_flag, addr_mode;
+	uint32_t next_fcf_index;
+	unsigned long flags;
+	uint16_t vlan_id;
+
+	/* Get the first SGE entry from the non-embedded DMA memory. This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+	if (unlikely(!mboxq->sge_array)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2524 Failed to get the non-embedded SGE "
+				"virtual address\n");
+		goto out;
+	}
+	virt_addr = mboxq->sge_array->addr[0];
+
+	shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+				 &shdr->response);
+	/*
+	 * The FCF Record was read and there is no reason for the driver
+	 * to maintain the FCF record data or memory. Instead, just need
+	 * to book keeping the FCFIs can be used.
+	 */
+	if (shdr_status || shdr_add_status) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2521 READ_FCF_RECORD mailbox failed "
+				"with status x%x add_status x%x, mbx\n",
+				shdr_status, shdr_add_status);
+		goto out;
+	}
+	/* Interpreting the returned information of FCF records */
+	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+	lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
+			      sizeof(struct lpfc_mbx_read_fcf_tbl));
+	next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
+
+	new_fcf_record = (struct fcf_record *)(virt_addr +
+			  sizeof(struct lpfc_mbx_read_fcf_tbl));
+	lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
+			      sizeof(struct fcf_record));
+	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
+				      &boot_flag, &addr_mode,
+					&vlan_id);
+	/*
+	 * If the fcf record does not match with connect list entries
+	 * read the next entry.
+	 */
+	if (!rc)
+		goto read_next_fcf;
+	/*
+	 * If this is not the first FCF discovery of the HBA, use last
+	 * FCF record for the discovery.
+	 */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	if (phba->fcf.fcf_flag & FCF_IN_USE) {
+		if (lpfc_fab_name_match(phba->fcf.fabric_name,
+			new_fcf_record) &&
+		    lpfc_mac_addr_match(phba, new_fcf_record)) {
+			phba->fcf.fcf_flag |= FCF_AVAILABLE;
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			goto out;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		goto read_next_fcf;
+	}
+	if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
+		/*
+		 * If the current FCF record does not have boot flag
+		 * set and new fcf record has boot flag set, use the
+		 * new fcf record.
+		 */
+		if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
+			/* Use this FCF record */
+			lpfc_copy_fcf_record(phba, new_fcf_record);
+			phba->fcf.addr_mode = addr_mode;
+			phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
+			if (vlan_id != 0xFFFF) {
+				phba->fcf.fcf_flag |= FCF_VALID_VLAN;
+				phba->fcf.vlan_id = vlan_id;
+			}
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			goto read_next_fcf;
+		}
+		/*
+		 * If the current FCF record has boot flag set and the
+		 * new FCF record does not have boot flag, read the next
+		 * FCF record.
+		 */
+		if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			goto read_next_fcf;
+		}
+		/*
+		 * If there is a record with lower priority value for
+		 * the current FCF, use that record.
+		 */
+		if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
+			&& (new_fcf_record->fip_priority <
+				phba->fcf.priority)) {
+			/* Use this FCF record */
+			lpfc_copy_fcf_record(phba, new_fcf_record);
+			phba->fcf.addr_mode = addr_mode;
+			if (vlan_id != 0xFFFF) {
+				phba->fcf.fcf_flag |= FCF_VALID_VLAN;
+				phba->fcf.vlan_id = vlan_id;
+			}
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			goto read_next_fcf;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		goto read_next_fcf;
+	}
+	/*
+	 * This is the first available FCF record, use this
+	 * record.
+	 */
+	lpfc_copy_fcf_record(phba, new_fcf_record);
+	phba->fcf.addr_mode = addr_mode;
+	if (boot_flag)
+		phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
+	phba->fcf.fcf_flag |= FCF_AVAILABLE;
+	if (vlan_id != 0xFFFF) {
+		phba->fcf.fcf_flag |= FCF_VALID_VLAN;
+		phba->fcf.vlan_id = vlan_id;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	goto read_next_fcf;
+
+read_next_fcf:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
+		lpfc_register_fcf(phba);
+	else
+		lpfc_sli4_read_fcf_record(phba, next_fcf_index);
+	return;
+
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	lpfc_register_fcf(phba);
+
+	return;
+}
+
+/**
+ * lpfc_start_fdiscs - send fdiscs for each vports on this port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function loops through the list of vports on the @phba and issues an
+ * FDISC if possible.
+ */
+void
+lpfc_start_fdiscs(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+				continue;
+			/* There are no vpi for this vport */
+			if (vports[i]->vpi > phba->max_vpi) {
+				lpfc_vport_set_state(vports[i],
+						     FC_VPORT_FAILED);
+				continue;
+			}
+			if (phba->fc_topology == TOPOLOGY_LOOP) {
+				lpfc_vport_set_state(vports[i],
+						     FC_VPORT_LINKDOWN);
+				continue;
+			}
+			if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+				lpfc_initial_fdisc(vports[i]);
+			else {
+				lpfc_vport_set_state(vports[i],
+						     FC_VPORT_NO_FABRIC_SUPP);
+				lpfc_printf_vlog(vports[i], KERN_ERR,
+						 LOG_ELS,
+						 "0259 No NPIV "
+						 "Fabric support\n");
+			}
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_dmabuf *dmabuf = mboxq->context1;
+	struct lpfc_vport *vport = mboxq->vport;
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "2018 REG_VFI mbxStatus error x%x "
+			 "HBA state x%x\n",
+			 mboxq->u.mb.mbxStatus, vport->port_state);
+		if (phba->fc_topology == TOPOLOGY_LOOP) {
+			/* FLOGI failed, use loop map to make discovery list */
+			lpfc_disc_list_loopmap(vport);
+			/* Start discovery */
+			lpfc_disc_start(vport);
+			goto fail_free_mem;
+		}
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		goto fail_free_mem;
+	}
+	/* Mark the vport has registered with its VFI */
+	vport->vfi_state |= LPFC_VFI_REGISTERED;
+
+	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+		lpfc_start_fdiscs(phba);
+		lpfc_do_scr_ns_plogi(phba, vport);
+	}
+
+fail_free_mem:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+	kfree(dmabuf);
+	return;
+}
+
 static void
 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
 	struct lpfc_vport  *vport = pmb->vport;
 
@@ -1037,13 +1625,13 @@ static void
 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 {
 	struct lpfc_vport *vport = phba->pport;
-	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
 	int i;
 	struct lpfc_dmabuf *mp;
 	int rc;
+	struct fcf_record *fcf_record;
 
 	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 
 	spin_lock_irq(&phba->hbalock);
 	switch (la->UlnkSpeed) {
@@ -1140,22 +1728,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
 			kfree(mp);
 			mempool_free(sparam_mbox, phba->mbox_mem_pool);
-			if (cfglink_mbox)
-				mempool_free(cfglink_mbox, phba->mbox_mem_pool);
 			goto out;
 		}
 	}
 
-	if (cfglink_mbox) {
+	if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
+		cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!cfglink_mbox)
+			goto out;
 		vport->port_state = LPFC_LOCAL_CFG_LINK;
 		lpfc_config_link(phba, cfglink_mbox);
 		cfglink_mbox->vport = vport;
 		cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
 		rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
-		if (rc != MBX_NOT_FINISHED)
-			return;
-		mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+		if (rc == MBX_NOT_FINISHED) {
+			mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+			goto out;
+		}
+	} else {
+		/*
+		 * Add the driver's default FCF record at FCF index 0 now. This
+		 * is phase 1 implementation that support FCF index 0 and driver
+		 * defaults.
+		 */
+		if (phba->cfg_enable_fip == 0) {
+			fcf_record = kzalloc(sizeof(struct fcf_record),
+					GFP_KERNEL);
+			if (unlikely(!fcf_record)) {
+				lpfc_printf_log(phba, KERN_ERR,
+					LOG_MBOX | LOG_SLI,
+					"2554 Could not allocate memmory for "
+					"fcf record\n");
+				rc = -ENODEV;
+				goto out;
+			}
+
+			lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
+						LPFC_FCOE_FCF_DEF_INDEX);
+			rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
+			if (unlikely(rc)) {
+				lpfc_printf_log(phba, KERN_ERR,
+					LOG_MBOX | LOG_SLI,
+					"2013 Could not manually add FCF "
+					"record 0, status %d\n", rc);
+				rc = -ENODEV;
+				kfree(fcf_record);
+				goto out;
+			}
+			kfree(fcf_record);
+		}
+		/*
+		 * The driver is expected to do FIP/FCF. Call the port
+		 * and get the FCF Table.
+		 */
+		rc = lpfc_sli4_read_fcf_record(phba,
+					LPFC_FCOE_FCF_GET_FIRST);
+		if (rc)
+			goto out;
 	}
+
+	return;
 out:
 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1186,6 +1818,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
 {
 	lpfc_linkdown(phba);
 	lpfc_enable_la(phba);
+	lpfc_unregister_unused_fcf(phba);
 	/* turn on Link Attention interrupts - no CLEAR_LA needed */
 }
 
@@ -3330,3 +3963,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
 			return 1;
 	return 0;
 }
+
+/**
+ * lpfc_fcf_inuse - Check if FCF can be unregistered.
+ * @phba: Pointer to hba context object.
+ *
+ * This function iterate through all FC nodes associated
+ * will all vports to check if there is any node with
+ * fc_rports associated with it. If there is an fc_rport
+ * associated with the node, then the node is either in
+ * discovered state or its devloss_timer is pending.
+ */
+static int
+lpfc_fcf_inuse(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i, ret = 0;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
+
+	vports = lpfc_create_vport_work_array(phba);
+
+	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+		shost = lpfc_shost_from_vport(vports[i]);
+		spin_lock_irq(shost->host_lock);
+		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+			if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+			  (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+				ret = 1;
+				spin_unlock_irq(shost->host_lock);
+				goto out;
+			}
+		}
+		spin_unlock_irq(shost->host_lock);
+	}
+out:
+	lpfc_destroy_vport_work_array(phba, vports);
+	return ret;
+}
+
+/**
+ * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+static void
+lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2555 UNREG_VFI mbxStatus error x%x "
+			"HBA state x%x\n",
+			mboxq->u.mb.mbxStatus, vport->port_state);
+	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+static void
+lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2550 UNREG_FCFI mbxStatus error x%x "
+			"HBA state x%x\n",
+			mboxq->u.mb.mbxStatus, vport->port_state);
+	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
+ * @phba: Pointer to hba context object.
+ *
+ * This function check if there are any connected remote port for the FCF and
+ * if all the devices are disconnected, this function unregister FCFI.
+ * This function also tries to use another FCF for discovery.
+ */
+void
+lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+	struct lpfc_vport **vports;
+	int i;
+
+	spin_lock_irq(&phba->hbalock);
+	/*
+	 * If HBA is not running in FIP mode or
+	 * If HBA does not support FCoE or
+	 * If FCF is not registered.
+	 * do nothing.
+	 */
+	if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
+		!(phba->fcf.fcf_flag & FCF_REGISTERED) ||
+		(phba->cfg_enable_fip == 0)) {
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	if (lpfc_fcf_inuse(phba))
+		return;
+
+
+	/* Unregister VPIs */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports &&
+		(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			lpfc_mbx_unreg_vpi(vports[i]);
+			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Unregister VFI */
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2556 UNREG_VFI mbox allocation failed"
+			"HBA state x%x\n",
+			phba->pport->port_state);
+		return;
+	}
+
+	lpfc_unreg_vfi(mbox, phba->pport->vfi);
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2557 UNREG_VFI issue mbox failed rc x%x "
+			"HBA state x%x\n",
+			rc, phba->pport->port_state);
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+
+	/* Unregister FCF */
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2551 UNREG_FCFI mbox allocation failed"
+			"HBA state x%x\n",
+			phba->pport->port_state);
+		return;
+	}
+
+	lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2552 UNREG_FCFI issue mbox failed rc x%x "
+			"HBA state x%x\n",
+			rc, phba->pport->port_state);
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
+		FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
+		FCF_VALID_VLAN);
+	spin_unlock_irq(&phba->hbalock);
+
+	/*
+	 * If driver is not unloading, check if there is any other
+	 * FCF record that can be used for discovery.
+	 */
+	if ((phba->pport->load_flag & FC_UNLOADING) ||
+		(phba->link_state < LPFC_LINK_UP))
+		return;
+
+	rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+
+	if (rc)
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2553 lpfc_unregister_unused_fcf failed to read FCF"
+			" record HBA state x%x\n",
+			phba->pport->port_state);
+}
+
+/**
+ * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCF connection table as in the config
+ *         region.
+ * This function create driver data structure for the FCF connection
+ * record table read from config region 23.
+ */
+static void
+lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
+	uint8_t *buff)
+{
+	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+	struct lpfc_fcf_conn_hdr *conn_hdr;
+	struct lpfc_fcf_conn_rec *conn_rec;
+	uint32_t record_count;
+	int i;
+
+	/* Free the current connect table */
+	list_for_each_entry_safe(conn_entry, next_conn_entry,
+		&phba->fcf_conn_rec_list, list)
+		kfree(conn_entry);
+
+	conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
+	record_count = conn_hdr->length * sizeof(uint32_t)/
+		sizeof(struct lpfc_fcf_conn_rec);
+
+	conn_rec = (struct lpfc_fcf_conn_rec *)
+		(buff + sizeof(struct lpfc_fcf_conn_hdr));
+
+	for (i = 0; i < record_count; i++) {
+		if (!(conn_rec[i].flags & FCFCNCT_VALID))
+			continue;
+		conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
+			GFP_KERNEL);
+		if (!conn_entry) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2566 Failed to allocate connection"
+				" table entry\n");
+			return;
+		}
+
+		memcpy(&conn_entry->conn_rec, &conn_rec[i],
+			sizeof(struct lpfc_fcf_conn_rec));
+		conn_entry->conn_rec.vlan_tag =
+			le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
+		conn_entry->conn_rec.flags =
+			le16_to_cpu(conn_entry->conn_rec.flags);
+		list_add_tail(&conn_entry->list,
+			&phba->fcf_conn_rec_list);
+	}
+}
+
+/**
+ * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCoE parameter data structure.
+ *
+ *  This function update driver data structure with config
+ *  parameters read from config region 23.
+ */
+static void
+lpfc_read_fcoe_param(struct lpfc_hba *phba,
+			uint8_t *buff)
+{
+	struct lpfc_fip_param_hdr *fcoe_param_hdr;
+	struct lpfc_fcoe_params *fcoe_param;
+
+	fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
+		buff;
+	fcoe_param = (struct lpfc_fcoe_params *)
+		buff + sizeof(struct lpfc_fip_param_hdr);
+
+	if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
+		(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
+		return;
+
+	if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
+			FIPP_MODE_ON)
+		phba->cfg_enable_fip = 1;
+
+	if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
+		FIPP_MODE_OFF)
+		phba->cfg_enable_fip = 0;
+
+	if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
+		phba->valid_vlan = 1;
+		phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
+			0xFFF;
+	}
+
+	phba->fc_map[0] = fcoe_param->fc_map[0];
+	phba->fc_map[1] = fcoe_param->fc_map[1];
+	phba->fc_map[2] = fcoe_param->fc_map[2];
+	return;
+}
+
+/**
+ * lpfc_get_rec_conf23 - Get a record type in config region data.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ * @rec_type: Record type to be searched.
+ *
+ * This function searches config region data to find the begining
+ * of the record specified by record_type. If record found, this
+ * function return pointer to the record else return NULL.
+ */
+static uint8_t *
+lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
+{
+	uint32_t offset = 0, rec_length;
+
+	if ((buff[0] == LPFC_REGION23_LAST_REC) ||
+		(size < sizeof(uint32_t)))
+		return NULL;
+
+	rec_length = buff[offset + 1];
+
+	/*
+	 * One TLV record has one word header and number of data words
+	 * specified in the rec_length field of the record header.
+	 */
+	while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
+		<= size) {
+		if (buff[offset] == rec_type)
+			return &buff[offset];
+
+		if (buff[offset] == LPFC_REGION23_LAST_REC)
+			return NULL;
+
+		offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
+		rec_length = buff[offset + 1];
+	}
+	return NULL;
+}
+
+/**
+ * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
+ * @phba: Pointer to lpfc_hba data structure.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ *
+ * This fuction parse the FCoE config parameters in config region 23 and
+ * populate driver data structure with the parameters.
+ */
+void
+lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
+		uint8_t *buff,
+		uint32_t size)
+{
+	uint32_t offset = 0, rec_length;
+	uint8_t *rec_ptr;
+
+	/*
+	 * If data size is less than 2 words signature and version cannot be
+	 * verified.
+	 */
+	if (size < 2*sizeof(uint32_t))
+		return;
+
+	/* Check the region signature first */
+	if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2567 Config region 23 has bad signature\n");
+		return;
+	}
+
+	offset += 4;
+
+	/* Check the data structure version */
+	if (buff[offset] != LPFC_REGION23_VERSION) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2568 Config region 23 has bad version\n");
+		return;
+	}
+	offset += 4;
+
+	rec_length = buff[offset + 1];
+
+	/* Read FCoE param record */
+	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+			size - offset, FCOE_PARAM_TYPE);
+	if (rec_ptr)
+		lpfc_read_fcoe_param(phba, rec_ptr);
+
+	/* Read FCF connection table */
+	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+		size - offset, FCOE_CONN_TBL_TYPE);
+	if (rec_ptr)
+		lpfc_read_fcf_conn_tbl(phba, rec_ptr);
+
+}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 6efe459e8dd..2c7eba68626 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -363,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (!mbox)
 		goto out;
 
-	rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
+	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
 			    (uint8_t *) sp, mbox, 0);
 	if (rc) {
 		mempool_free(mbox, phba->mbox_mem_pool);
@@ -497,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
 	else
 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	if ((ndlp->nlp_type & NLP_FABRIC) &&
+		vport->port_type == LPFC_NPIV_PORT) {
+		lpfc_linkdown_port(vport);
+		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		spin_unlock_irq(shost->host_lock);
 
-	if ((!(ndlp->nlp_type & NLP_FABRIC) &&
-	     ((ndlp->nlp_type & NLP_FCP_TARGET) ||
-	      !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
-	    (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+		ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+		((ndlp->nlp_type & NLP_FCP_TARGET) ||
+		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
 		/* Only try to re-login if this is NOT a Fabric Node */
 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
 		spin_lock_irq(shost->host_lock);
@@ -569,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-	if (!ndlp->nlp_rpi) {
+	if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 		return 0;
 	}
@@ -859,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
 
 	lpfc_unreg_rpi(vport, ndlp);
 
-	if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
 			   (uint8_t *) sp, mbox, 0) == 0) {
 		switch (ndlp->nlp_DID) {
 		case NameServer_DID:
@@ -1070,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
 	struct lpfc_iocbq *cmdiocb, *rspiocb;
 	IOCB_t *irsp;
 	ADISC *ap;
+	int rc;
 
 	cmdiocb = (struct lpfc_iocbq *) arg;
 	rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1095,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
 		return ndlp->nlp_state;
 	}
 
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rc = lpfc_sli4_resume_rpi(ndlp);
+		if (rc) {
+			/* Stay in state and retry. */
+			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+			return ndlp->nlp_state;
+		}
+	}
+
 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1102,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 	}
+
 	return ndlp->nlp_state;
 }
 
@@ -1285,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
 	}
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_VALID;
 
 	/* Only if we are not a fabric nport do we issue PRLI */
 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index b53af993628..9645d32e6f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -10929,3 +10929,520 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
 	};
 	return 0;
 }
+
+/**
+ * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * This routine does not require any locks.  It's usage is expected
+ * to be driver load or reset recovery when the driver is
+ * sequential.
+ *
+ * Return codes
+ * 	0 - sucessful
+ *      EIO - The mailbox failed to complete successfully.
+ * 	When this error occurs, the driver is not guaranteed
+ *	to have any rpi regions posted to the device and
+ *	must either attempt to repost the regions or take a
+ *	fatal error.
+ **/
+int
+lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
+{
+	struct lpfc_rpi_hdr *rpi_page;
+	uint32_t rc = 0;
+
+	/* Post all rpi memory regions to the port. */
+	list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+		rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2008 Error %d posting all rpi "
+					"headers\n", rc);
+			rc = -EIO;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @rpi_page:  pointer to the rpi memory region.
+ *
+ * This routine is invoked to post a single rpi header to the
+ * HBA consistent with the SLI-4 interface spec.  This memory region
+ * maps up to 64 rpi context regions.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	ENOMEM - No available memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
+	uint32_t rc = 0;
+	uint32_t mbox_tmo;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* The port is notified of the header region via a mailbox command. */
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2001 Unable to allocate memory for issuing "
+				"SLI_CONFIG_SPECIAL mailbox command\n");
+		return -ENOMEM;
+	}
+
+	/* Post all rpi memory regions to the port. */
+	hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
+	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
+			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
+			 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+	       hdr_tmpl, rpi_page->page_count);
+	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
+	       rpi_page->start_rpi);
+	hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
+	hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2514 POST_RPI_HDR mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * Returns
+ * 	A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
+ * 	LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
+int
+lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
+{
+	int rpi;
+	uint16_t max_rpi, rpi_base, rpi_limit;
+	uint16_t rpi_remaining;
+	struct lpfc_rpi_hdr *rpi_hdr;
+
+	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+	rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
+	rpi_limit = phba->sli4_hba.next_rpi;
+
+	/*
+	 * The valid rpi range is not guaranteed to be zero-based.  Start
+	 * the search at the rpi_base as reported by the port.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
+	if (rpi >= rpi_limit || rpi < rpi_base)
+		rpi = LPFC_RPI_ALLOC_ERROR;
+	else {
+		set_bit(rpi, phba->sli4_hba.rpi_bmask);
+		phba->sli4_hba.max_cfg_param.rpi_used++;
+		phba->sli4_hba.rpi_count++;
+	}
+
+	/*
+	 * Don't try to allocate more rpi header regions if the device limit
+	 * on available rpis max has been exhausted.
+	 */
+	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
+	    (phba->sli4_hba.rpi_count >= max_rpi)) {
+		spin_unlock_irq(&phba->hbalock);
+		return rpi;
+	}
+
+	/*
+	 * If the driver is running low on rpi resources, allocate another
+	 * page now.  Note that the next_rpi value is used because
+	 * it represents how many are actually in use whereas max_rpi notes
+	 * how many are supported max by the device.
+	 */
+	rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
+		phba->sli4_hba.rpi_count;
+	spin_unlock_irq(&phba->hbalock);
+	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
+		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+		if (!rpi_hdr) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2002 Error Could not grow rpi "
+					"count\n");
+		} else {
+			lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
+		}
+	}
+
+	return rpi;
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+	spin_lock_irq(&phba->hbalock);
+	clear_bit(rpi, phba->sli4_hba.rpi_bmask);
+	phba->sli4_hba.rpi_count--;
+	phba->sli4_hba.max_cfg_param.rpi_used--;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+void
+lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
+{
+	kfree(phba->sli4_hba.rpi_bmask);
+}
+
+/**
+ * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+int
+lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_hba *phba = ndlp->phba;
+	int rc;
+
+	/* The port is notified of the header region via a mailbox command. */
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+
+	/* Post all rpi memory regions to the port. */
+	lpfc_resume_rpi(mboxq, ndlp);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2010 Resume RPI Mailbox failed "
+				"status %d, mbxStatus x%x\n", rc,
+				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli4_init_vpi - Initialize a vpi with the port
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: vpi value to activate with the port.
+ *
+ * This routine is invoked to activate a vpi with the
+ * port when the host intends to use vports with a
+ * nonzero vpi.
+ *
+ * Returns:
+ *    0 success
+ *    -Evalue otherwise
+ **/
+int
+lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc = 0;
+	uint32_t mbox_tmo;
+
+	if (vpi == 0)
+		return -EINVAL;
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+	lpfc_init_vpi(mboxq, vpi);
+	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
+	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2022 INIT VPI Mailbox failed "
+				"status %d, mbxStatus x%x\n", rc,
+				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+		rc = -EIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record.  This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+static void
+lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	void *virt_addr;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t shdr_status, shdr_add_status;
+
+	virt_addr = mboxq->sge_array->addr[0];
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+	if ((shdr_status || shdr_add_status) &&
+		(shdr_status != STATUS_FCF_IN_USE))
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2558 ADD_FCF_RECORD mailbox failed with "
+			"status x%x add_status x%x\n",
+			shdr_status, shdr_add_status);
+
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record:  pointer to the initialized fcf record to add.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record.  This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+int
+lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
+{
+	int rc = 0;
+	LPFC_MBOXQ_t *mboxq;
+	uint8_t *bytep;
+	void *virt_addr;
+	dma_addr_t phys_addr;
+	struct lpfc_mbx_sge sge;
+	uint32_t alloc_len, req_len;
+	uint32_t fcfindex;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2009 Failed to allocate mbox for ADD_FCF cmd\n");
+		return -ENOMEM;
+	}
+
+	req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
+		  sizeof(uint32_t);
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
+				     req_len, LPFC_SLI4_MBX_NEMBED);
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2523 Allocated DMA memory size (x%x) is "
+			"less than the requested DMA memory "
+			"size (x%x)\n", alloc_len, req_len);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Get the first SGE entry from the non-embedded DMA memory.  This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+	if (unlikely(!mboxq->sge_array)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2526 Failed to get the non-embedded SGE "
+				"virtual address\n");
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return -ENOMEM;
+	}
+	virt_addr = mboxq->sge_array->addr[0];
+	/*
+	 * Configure the FCF record for FCFI 0.  This is the driver's
+	 * hardcoded default and gets used in nonFIP mode.
+	 */
+	fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
+	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+	lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
+
+	/*
+	 * Copy the fcf_index and the FCF Record Data. The data starts after
+	 * the FCoE header plus word10. The data copy needs to be endian
+	 * correct.
+	 */
+	bytep += sizeof(uint32_t);
+	lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2515 ADD_FCF_RECORD mailbox failed with "
+			"status 0x%x\n", rc);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		rc = -EIO;
+	} else
+		rc = 0;
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record:  pointer to the fcf record to write the default data.
+ * @fcf_index: FCF table entry index.
+ *
+ * This routine is invoked to build the driver's default FCF record.  The
+ * values used are hardcoded.  This routine handles memory initialization.
+ *
+ **/
+void
+lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
+				struct fcf_record *fcf_record,
+				uint16_t fcf_index)
+{
+	memset(fcf_record, 0, sizeof(struct fcf_record));
+	fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
+	fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
+	fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
+	bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
+	bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
+	bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
+	bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
+	bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
+	bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
+	bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
+	bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
+	bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
+	bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
+	bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
+	bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
+		LPFC_FCF_FPMA | LPFC_FCF_SPMA);
+	/* Set the VLAN bit map */
+	if (phba->valid_vlan) {
+		fcf_record->vlan_bitmap[phba->vlan_id / 8]
+			= 1 << (phba->vlan_id % 8);
+	}
+}
+
+/**
+ * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read up to @fcf_num of FCF record from the
+ * device starting with the given @fcf_index.
+ **/
+int
+lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	int rc = 0, error;
+	LPFC_MBOXQ_t *mboxq;
+	void *virt_addr;
+	dma_addr_t phys_addr;
+	uint8_t *bytep;
+	struct lpfc_mbx_sge sge;
+	uint32_t alloc_len, req_len;
+	struct lpfc_mbx_read_fcf_tbl *read_fcf;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2000 Failed to allocate mbox for "
+				"READ_FCF cmd\n");
+		return -ENOMEM;
+	}
+
+	req_len = sizeof(struct fcf_record) +
+		  sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
+
+	/* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
+	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
+			 LPFC_SLI4_MBX_NEMBED);
+
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0291 Allocated DMA memory size (x%x) is "
+				"less than the requested DMA memory "
+				"size (x%x)\n", alloc_len, req_len);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return -ENOMEM;
+	}
+
+	/* Get the first SGE entry from the non-embedded DMA memory. This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+	if (unlikely(!mboxq->sge_array)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2527 Failed to get the non-embedded SGE "
+				"virtual address\n");
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return -ENOMEM;
+	}
+	virt_addr = mboxq->sge_array->addr[0];
+	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+
+	/* Set up command fields */
+	bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
+	/* Perform necessary endian conversion */
+	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+	lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		error = -EIO;
+	} else
+		error = 0;
+	return error;
+}
-- 
cgit v1.2.3-70-g09d2


From d8e93df13c8f7bde45a7756944aab528c58df4cf Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Fri, 22 May 2009 14:53:05 -0400
Subject: [SCSI] lpfc 8.3.2 : Update of copyrights

Update of copyrights on modified files

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc.h           | 2 +-
 drivers/scsi/lpfc/lpfc_attr.c      | 2 +-
 drivers/scsi/lpfc/lpfc_ct.c        | 2 +-
 drivers/scsi/lpfc/lpfc_debugfs.c   | 2 +-
 drivers/scsi/lpfc/lpfc_els.c       | 2 +-
 drivers/scsi/lpfc/lpfc_hbadisc.c   | 2 +-
 drivers/scsi/lpfc/lpfc_hw.h        | 2 +-
 drivers/scsi/lpfc/lpfc_init.c      | 2 +-
 drivers/scsi/lpfc/lpfc_logmsg.h    | 2 +-
 drivers/scsi/lpfc/lpfc_mbox.c      | 2 +-
 drivers/scsi/lpfc/lpfc_mem.c       | 2 +-
 drivers/scsi/lpfc/lpfc_nportdisc.c | 2 +-
 drivers/scsi/lpfc/lpfc_scsi.c      | 2 +-
 drivers/scsi/lpfc/lpfc_sli.c       | 2 +-
 14 files changed, 14 insertions(+), 14 deletions(-)

(limited to 'drivers/scsi/lpfc')

diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 13ac108a244..54056984909 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 270a4c6cd3a..f032d8b7bb4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 51990787796..38c18518a5e 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 42ef258c7d5..2b02b1fb39a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2007-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2007-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index a3b56d7f72f..6bdeb14878a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9bd7a8927a3..d507a581b99 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index a9d64cfbe5c..02aa016b93e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ff821bb7716..8c30f5707f9 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 1aa85709b01..db1ba22b801 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 6aeb1c668e2..29fe5c17f4e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 516f4802f84..e198c917c13 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 2c7eba68626..09f659f77bb 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
  /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9af2db355bc..1a7659c8f38 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9645d32e6f8..a53c267a067 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
-- 
cgit v1.2.3-70-g09d2


From f4b4c68f74dcd5da03df851090cad28ad4e8d7cc Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Fri, 22 May 2009 14:53:12 -0400
Subject: [SCSI] lpfc 8.3.2 : Miscellaneous Changes

Miscellaneous Changes:
- Convert from SLI2_ACTIVE flag to more correct SLI_ACTIVE (generic) flag
- Reposition log verbose messaging definitions
- Update naming for vpi object name from vport slang name
- Handle deferred error attention condition
- Add 10G link support
- Small bug fixup

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc_attr.c    | 55 ++++++++++++++++++++++------------------
 drivers/scsi/lpfc/lpfc_ct.c      |  7 +++--
 drivers/scsi/lpfc/lpfc_disc.h    |  1 +
 drivers/scsi/lpfc/lpfc_hbadisc.c |  3 +++
 drivers/scsi/lpfc/lpfc_init.c    | 20 ++++++++++++---
 drivers/scsi/lpfc/lpfc_logmsg.h  | 52 ++++++++++++++++++++-----------------
 drivers/scsi/lpfc/lpfc_sli.c     |  8 +++---
 drivers/scsi/lpfc/lpfc_sli.h     |  2 +-
 drivers/scsi/lpfc/lpfc_vport.c   |  2 +-
 9 files changed, 91 insertions(+), 59 deletions(-)

(limited to 'drivers/scsi/lpfc')

diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f032d8b7bb4..46e032aa0be 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -805,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
 	 */
 	if (phba->link_state < LPFC_LINK_DOWN ||
 	    !phba->mbox_mem_pool ||
-	    (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
 		return 0;
 
 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -822,7 +822,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
 	pmboxq->context1 = NULL;
 
 	if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
-		(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = MBX_NOT_FINISHED;
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -2045,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
 # deluged with LOTS of information.
 # You can set a bit mask to record specific types of verbose messages:
-#
-# LOG_ELS                       0x1        ELS events
-# LOG_DISCOVERY                 0x2        Link discovery events
-# LOG_MBOX                      0x4        Mailbox events
-# LOG_INIT                      0x8        Initialization events
-# LOG_LINK_EVENT                0x10       Link events
-# LOG_FCP                       0x40       FCP traffic history
-# LOG_NODE                      0x80       Node table events
-# LOG_BG                        0x200      BlockBuard events
-# LOG_MISC                      0x400      Miscellaneous events
-# LOG_SLI                       0x800      SLI events
-# LOG_FCP_ERROR                 0x1000     Only log FCP errors
-# LOG_LIBDFC                    0x2000     LIBDFC events
-# LOG_ALL_MSG                   0xffff     LOG all messages
+# See lpfc_logmsh.h for definitions.
 */
-LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff,
+LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
 		       "Verbose logging bit-mask");
 
 /*
@@ -2365,7 +2352,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
 		if (vports == NULL)
 			return -ENOMEM;
 
-		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			v_shost = lpfc_shost_from_vport(vports[i]);
 			spin_lock_irq(v_shost->host_lock);
 			/* Block and reset data collection */
@@ -2380,7 +2367,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
 		phba->bucket_base = base;
 		phba->bucket_step = step;
 
-		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			v_shost = lpfc_shost_from_vport(vports[i]);
 
 			/* Unblock data collection */
@@ -2397,7 +2384,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
 		if (vports == NULL)
 			return -ENOMEM;
 
-		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			v_shost = lpfc_shost_from_vport(vports[i]);
 			spin_lock_irq(shost->host_lock);
 			vports[i]->stat_data_blocked = 1;
@@ -3418,7 +3405,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 		}
 
 		if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-		    (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
+		    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
 
 			spin_unlock_irq(&phba->hbalock);
 			rc = lpfc_sli_issue_mbox (phba,
@@ -3646,6 +3633,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
 			case LA_8GHZ_LINK:
 				fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
 			break;
+			case LA_10GHZ_LINK:
+				fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+			break;
 			default:
 				fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 			break;
@@ -3713,7 +3703,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
 	 */
 	if (phba->link_state < LPFC_LINK_DOWN ||
 	    !phba->mbox_mem_pool ||
-	    (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
 		return NULL;
 
 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3756,7 +3746,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
 	pmboxq->vport = vport;
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-	    (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3838,7 +3828,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
 	pmboxq->vport = vport;
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-		(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3856,7 +3846,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
 	pmboxq->vport = vport;
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-	    (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -4023,6 +4013,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
 		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
 }
 
+/**
+ * lpfc_hba_log_verbose_init - Set hba's log verbose level
+ * @phba: Pointer to lpfc_hba struct.
+ *
+ * This function is called by the lpfc_get_cfgparam() routine to set the
+ * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
+ * log messsage according to the module's lpfc_log_verbose parameter setting
+ * before hba port or vport created.
+ **/
+static void
+lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
+{
+	phba->cfg_log_verbose = verbose;
+}
+
 struct fc_function_template lpfc_transport_functions = {
 	/* fixed attributes the driver supports */
 	.show_host_node_name = 1,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 38c18518a5e..1dbccfd3d02 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
 				case LA_8GHZ_LINK:
 					ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
 				break;
+				case LA_10GHZ_LINK:
+					ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
+				break;
 				default:
 					ae->un.PortSpeed =
 						HBA_PORTSPEED_UNKNOWN;
@@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
 	uint8_t *fwname;
 
 	if (vp->rev.rBit) {
-		if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+		if (psli->sli_flag & LPFC_SLI_ACTIVE)
 			rev = vp->rev.sli2FwRev;
 		else
 			rev = vp->rev.sli1FwRev;
@@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
 		}
 		b4 = (rev & 0x0000000f);
 
-		if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+		if (psli->sli_flag & LPFC_SLI_ACTIVE)
 			fwname = vp->rev.sli2FwName;
 		else
 			fwname = vp->rev.sli1FwName;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ffd10897207..1142070e948 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
 #define NLP_NODEV_REMOVE   0x08000000	/* Defer removal till discovery ends */
 #define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
 #define NLP_SC_REQ         0x20000000	/* Target requires authentication */
+#define NLP_RPI_VALID      0x80000000	/* nlp_rpi is valid */
 
 /* ndlp usage management macros */
 #define NLP_CHK_NODE_ACT(ndlp)		(((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index d507a581b99..126323a4dce 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1647,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 	case LA_8GHZ_LINK:
 		phba->fc_linkspeed = LA_8GHZ_LINK;
 		break;
+	case LA_10GHZ_LINK:
+		phba->fc_linkspeed = LA_10GHZ_LINK;
+		break;
 	default:
 		phba->fc_linkspeed = LA_UNKNW_LINK;
 		break;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 8c30f5707f9..65cd3fe6220 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -906,7 +906,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 					"taking this port offline.\n");
 
 			spin_lock_irq(&phba->hbalock);
-			psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
 			spin_unlock_irq(&phba->hbalock);
 
 			lpfc_offline_prep(phba);
@@ -931,13 +931,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
 	struct lpfc_sli   *psli = &phba->sli;
 
 	spin_lock_irq(&phba->hbalock);
-	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
 	spin_unlock_irq(&phba->hbalock);
 	lpfc_offline_prep(phba);
 
 	lpfc_offline(phba);
 	lpfc_reset_barrier(phba);
+	spin_lock_irq(&phba->hbalock);
 	lpfc_sli_brdreset(phba);
+	spin_unlock_irq(&phba->hbalock);
 	lpfc_hba_down_post(phba);
 	lpfc_sli_brdready(phba, HS_MBRDY);
 	lpfc_unblock_mgmt_io(phba);
@@ -980,6 +982,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
 	struct lpfc_sli_ring  *pring;
 	struct lpfc_sli *psli = &phba->sli;
 
+	/* If the pci channel is offline, ignore possible errors,
+	 * since we cannot communicate with the pci card anyway.
+	 */
+	if (pci_channel_offline(phba->pcidev)) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~DEFER_ERATT;
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 		"0479 Deferred Adapter Hardware Error "
 		"Data: x%x x%x x%x\n",
@@ -987,7 +999,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
 		phba->work_status[0], phba->work_status[1]);
 
 	spin_lock_irq(&phba->hbalock);
-	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
 	spin_unlock_irq(&phba->hbalock);
 
 
@@ -1097,7 +1109,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
 				phba->work_status[0], phba->work_status[1]);
 
 		spin_lock_irq(&phba->hbalock);
-		psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
 		spin_unlock_irq(&phba->hbalock);
 
 		/*
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index db1ba22b801..954ba57970a 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -18,33 +18,39 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LOG_ELS                       0x1	/* ELS events */
-#define LOG_DISCOVERY                 0x2	/* Link discovery events */
-#define LOG_MBOX                      0x4	/* Mailbox events */
-#define LOG_INIT                      0x8	/* Initialization events */
-#define LOG_LINK_EVENT                0x10	/* Link events */
-#define LOG_IP                        0x20	/* IP traffic history */
-#define LOG_FCP                       0x40	/* FCP traffic history */
-#define LOG_NODE                      0x80	/* Node table events */
-#define LOG_TEMP                      0x100	/* Temperature sensor events */
-#define LOG_BG			      0x200	/* BlockGuard events */
-#define LOG_MISC                      0x400	/* Miscellaneous events */
-#define LOG_SLI                       0x800	/* SLI events */
-#define LOG_FCP_ERROR                 0x1000	/* log errors, not underruns */
-#define LOG_LIBDFC                    0x2000	/* Libdfc events */
-#define LOG_VPORT                     0x4000	/* NPIV events */
-#define LOG_ALL_MSG                   0xffff	/* LOG all messages */
+#define LOG_ELS		0x00000001	/* ELS events */
+#define LOG_DISCOVERY	0x00000002	/* Link discovery events */
+#define LOG_MBOX	0x00000004	/* Mailbox events */
+#define LOG_INIT	0x00000008	/* Initialization events */
+#define LOG_LINK_EVENT	0x00000010	/* Link events */
+#define LOG_IP		0x00000020	/* IP traffic history */
+#define LOG_FCP		0x00000040	/* FCP traffic history */
+#define LOG_NODE	0x00000080	/* Node table events */
+#define LOG_TEMP	0x00000100	/* Temperature sensor events */
+#define LOG_BG		0x00000200	/* BlockGuard events */
+#define LOG_MISC	0x00000400	/* Miscellaneous events */
+#define LOG_SLI		0x00000800	/* SLI events */
+#define LOG_FCP_ERROR	0x00001000	/* log errors, not underruns */
+#define LOG_LIBDFC	0x00002000	/* Libdfc events */
+#define LOG_VPORT	0x00004000	/* NPIV events */
+#define LOF_SECURITY	0x00008000	/* Security events */
+#define LOG_EVENT	0x00010000	/* CT,TEMP,DUMP, logging */
+#define LOG_ALL_MSG	0xffffffff	/* LOG all messages */
 
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
-	do { \
-	{ if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \
+do { \
+	{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
 		dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
 			   fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
-	} while (0)
+} while (0)
 
 #define lpfc_printf_log(phba, level, mask, fmt, arg...) \
-	do { \
-	{ if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \
+do { \
+	{ uint32_t log_verbose = (phba)->pport ? \
+				 (phba)->pport->cfg_log_verbose : \
+				 (phba)->cfg_log_verbose; \
+	  if (((mask) & log_verbose) || (level[1] <= '3')) \
 		dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
-			   fmt, phba->brd_no, ##arg); } \
-	} while (0)
+			   fmt, phba->brd_no, ##arg); \
+	} \
+} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a53c267a067..ff04daf18f4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3272,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
 		mdelay(1);
 
 	if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
-		if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
+		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
 		    phba->pport->stopped)
 			goto restore_hc;
 		else
@@ -3353,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
 		return 1;
 	}
 
-	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
 
 	mempool_free(pmb, phba->mbox_mem_pool);
 
@@ -4643,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
 	spin_unlock_irq(&phba->pport->work_port_lock);
 	spin_lock_irq(&phba->hbalock);
 	phba->link_state = LPFC_LINK_UNKNOWN;
-	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
 	spin_unlock_irq(&phba->hbalock);
 
 	pring = &psli->ring[psli->fcp_ring];
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index e6c88ee8ee9..7d37eb7459b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -244,7 +244,7 @@ struct lpfc_sli {
 
 	/* Additional sli_flags */
 #define LPFC_SLI_MBOX_ACTIVE      0x100	/* HBA mailbox is currently active */
-#define LPFC_SLI2_ACTIVE          0x200	/* SLI2 overlay in firmware is active */
+#define LPFC_SLI_ACTIVE           0x200	/* SLI in firmware is active */
 #define LPFC_PROCESS_LA           0x400	/* Able to process link attention */
 #define LPFC_BLOCK_MGMT_IO        0x800	/* Don't allow mgmt mbx or iocb cmds */
 #define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 59e67f7ee53..a415ec0b9a8 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -121,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
 	if (!pmb) {
 		return -ENOMEM;
 	}
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 
 	lpfc_read_sparam(phba, pmb, vport->vpi);
 	/*
-- 
cgit v1.2.3-70-g09d2


From 21e9a0a5fbd2b7cb3ae29f6d491a30bc0e688422 Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Fri, 22 May 2009 14:53:21 -0400
Subject: [SCSI] lpfc 8.3.2 : Persistent Vport Support

Add support for persistent vport definitions at creation at boot time

Also includes a few misc fixes for:
- conversion to vpi name from vport slang name
- couple of small mailbox references
- some additional discovery mods

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc_attr.c    |  31 +++++++
 drivers/scsi/lpfc/lpfc_hbadisc.c | 180 ++++++++++++++++++++++++++++++---------
 drivers/scsi/lpfc/lpfc_init.c    |   1 +
 drivers/scsi/lpfc/lpfc_mbox.c    |  38 +++++++++
 drivers/scsi/lpfc/lpfc_scsi.c    |   6 +-
 drivers/scsi/lpfc/lpfc_vport.c   |  37 ++++----
 6 files changed, 236 insertions(+), 57 deletions(-)

(limited to 'drivers/scsi/lpfc')

diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 46e032aa0be..d73e677201f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2277,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
 static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
 		lpfc_topology_show, lpfc_topology_store);
 
+/**
+ * lpfc_static_vport_show: Read callback function for
+ *   lpfc_static_vport sysfs file.
+ * @dev: Pointer to class device object.
+ * @attr: device attribute structure.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_static_vport sysfs file. The lpfc_static_vport
+ * sysfs file report the mageability of the vport.
+ **/
+static ssize_t
+lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	if (vport->vport_flag & STATIC_VPORT)
+		sprintf(buf, "1\n");
+	else
+		sprintf(buf, "0\n");
+
+	return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
+		   lpfc_static_vport_show, NULL);
 
 /**
  * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -3051,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
 	&dev_attr_lpfc_enable_da_id,
 	&dev_attr_lpfc_max_scsicmpl_time,
 	&dev_attr_lpfc_stat_data_ctrl,
+	&dev_attr_lpfc_static_vport,
 	NULL,
 };
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 126323a4dce..35c41ae75be 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2079,6 +2079,128 @@ out:
 	return;
 }
 
+/**
+ * lpfc_create_static_vport - Read HBA config region to create static vports.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issue a DUMP mailbox command for config region 22 to get
+ * the list of static vports to be created. The function create vports
+ * based on the information returned from the HBA.
+ **/
+void
+lpfc_create_static_vport(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmb = NULL;
+	MAILBOX_t *mb;
+	struct static_vport_info *vport_info;
+	int rc, i;
+	struct fc_vport_identifiers vport_id;
+	struct fc_vport *new_fc_vport;
+	struct Scsi_Host *shost;
+	struct lpfc_vport *vport;
+	uint16_t offset = 0;
+	uint8_t *vport_buff;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0542 lpfc_create_static_vport failed to"
+				" allocate mailbox memory\n");
+		return;
+	}
+
+	mb = &pmb->u.mb;
+
+	vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
+	if (!vport_info) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0543 lpfc_create_static_vport failed to"
+				" allocate vport_info\n");
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return;
+	}
+
+	vport_buff = (uint8_t *) vport_info;
+	do {
+		lpfc_dump_static_vport(phba, pmb, offset);
+		pmb->vport = phba->pport;
+		rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
+
+		if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0544 lpfc_create_static_vport failed to"
+				" issue dump mailbox command ret 0x%x "
+				"status 0x%x\n",
+				rc, mb->mbxStatus);
+			goto out;
+		}
+
+		if (mb->un.varDmp.word_cnt >
+			sizeof(struct static_vport_info) - offset)
+			mb->un.varDmp.word_cnt =
+			sizeof(struct static_vport_info) - offset;
+
+		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+			vport_buff + offset,
+			mb->un.varDmp.word_cnt);
+		offset += mb->un.varDmp.word_cnt;
+
+	} while (mb->un.varDmp.word_cnt &&
+		offset < sizeof(struct static_vport_info));
+
+
+	if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
+		((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
+			!= VPORT_INFO_REV)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0545 lpfc_create_static_vport bad"
+			" information header 0x%x 0x%x\n",
+			le32_to_cpu(vport_info->signature),
+			le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
+
+		goto out;
+	}
+
+	shost = lpfc_shost_from_vport(phba->pport);
+
+	for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
+		memset(&vport_id, 0, sizeof(vport_id));
+		vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
+		vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
+		if (!vport_id.port_name || !vport_id.node_name)
+			continue;
+
+		vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+		vport_id.vport_type = FC_PORTTYPE_NPIV;
+		vport_id.disable = false;
+		new_fc_vport = fc_vport_create(shost, 0, &vport_id);
+
+		if (!new_fc_vport) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0546 lpfc_create_static_vport failed to"
+				" create vport \n");
+			continue;
+		}
+
+		vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
+		vport->vport_flag |= STATIC_VPORT;
+	}
+
+out:
+	/*
+	 * If this is timed out command, setting NULL to context2 tell SLI
+	 * layer not to use this buffer.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	pmb->context2 = NULL;
+	spin_unlock_irq(&phba->hbalock);
+	kfree(vport_info);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(pmb, phba->mbox_mem_pool);
+
+	return;
+}
+
 /*
  * This routine handles processing a Fabric REG_LOGIN mailbox
  * command upon completion. It is setup in the LPFC_MBOXQ
@@ -2089,16 +2211,17 @@ void
 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_vport *vport = pmb->vport;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 	struct lpfc_nodelist *ndlp;
-	struct lpfc_vport **vports;
-	int i;
 
 	ndlp = (struct lpfc_nodelist *) pmb->context2;
 	pmb->context1 = NULL;
 	pmb->context2 = NULL;
 	if (mb->mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				 "0258 Register Fabric login error: 0x%x\n",
+				 mb->mbxStatus);
 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
 		kfree(mp);
 		mempool_free(pmb, phba->mbox_mem_pool);
@@ -2117,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		}
 
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
-				 "0258 Register Fabric login error: 0x%x\n",
-				 mb->mbxStatus);
 		/* Decrement the reference count to ndlp after the reference
 		 * to the ndlp are done.
 		 */
@@ -2128,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	}
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_VALID;
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
-		vports = lpfc_create_vport_work_array(phba);
-		if (vports != NULL)
-			for(i = 0;
-			    i <= phba->max_vpi && vports[i] != NULL;
-			    i++) {
-				if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
-					continue;
-				if (phba->fc_topology == TOPOLOGY_LOOP) {
-					lpfc_vport_set_state(vports[i],
-							FC_VPORT_LINKDOWN);
-					continue;
-				}
-				if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
-					lpfc_initial_fdisc(vports[i]);
-				else {
-					lpfc_vport_set_state(vports[i],
-						FC_VPORT_NO_FABRIC_SUPP);
-					lpfc_printf_vlog(vport, KERN_ERR,
-							 LOG_ELS,
-							"0259 No NPIV "
-							"Fabric support\n");
-				}
-			}
-		lpfc_destroy_vport_work_array(phba, vports);
+		lpfc_start_fdiscs(phba);
 		lpfc_do_scr_ns_plogi(phba, vport);
 	}
 
@@ -2179,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 void
 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 	struct lpfc_vport *vport = pmb->vport;
 
 	if (mb->mbxStatus) {
 out:
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0260 Register NameServer error: 0x%x\n",
+				 mb->mbxStatus);
 		/* decrement the node reference count held for this
 		 * callback function.
 		 */
@@ -2209,15 +2310,13 @@ out:
 			return;
 		}
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-				 "0260 Register NameServer error: 0x%x\n",
-				 mb->mbxStatus);
 		return;
 	}
 
 	pmb->context1 = NULL;
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_VALID;
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
@@ -2718,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
 	if (pring->ringno == LPFC_ELS_RING) {
 		switch (icmd->ulpCommand) {
 		case CMD_GEN_REQUEST64_CR:
-			if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
+			if (iocb->context_un.ndlp == ndlp)
 				return 1;
 		case CMD_ELS_REQUEST64_CR:
 			if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2765,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 	 */
 	psli = &phba->sli;
 	rpi = ndlp->nlp_rpi;
-	if (rpi) {
+	if (ndlp->nlp_flag & NLP_RPI_VALID) {
 		/* Now process each ring */
 		for (i = 0; i < psli->num_rings; i++) {
 			pring = &psli->ring[i];
@@ -2813,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	LPFC_MBOXQ_t    *mbox;
 	int rc;
 
-	if (ndlp->nlp_rpi) {
+	if (ndlp->nlp_flag & NLP_RPI_VALID) {
 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 		if (mbox) {
 			lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2825,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 		}
 		lpfc_no_rpi(phba, ndlp);
 		ndlp->nlp_rpi = 0;
+		ndlp->nlp_flag &= ~NLP_RPI_VALID;
 		return 1;
 	}
 	return 0;
@@ -2972,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	int rc;
 
 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
-	if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
+	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
+	    !(ndlp->nlp_flag & NLP_RPI_VALID)) {
 		/* For this case we need to cleanup the default rpi
 		 * allocated by the firmware.
 		 */
 		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
 			!= NULL) {
-			rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
+			rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
 			    (uint8_t *) &vport->fc_sparam, mbox, 0);
 			if (rc) {
 				mempool_free(mbox, phba->mbox_mem_pool);
@@ -3713,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	pmb->context1 = NULL;
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_VALID;
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 65cd3fe6220..2f5907f92ee 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -385,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 	/* Update the fc_host data structures with new wwn. */
 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+	fc_host_max_npiv_vports(shost) = phba->max_vpi;
 
 	/* If no serial number in VPD data, use low 6 bytes of WWNN */
 	/* This should be consolidated into parse_vpd ? - mr */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 29fe5c17f4e..b9b451c0901 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -40,6 +40,44 @@
 #include "lpfc_crtn.h"
 #include "lpfc_compat.h"
 
+/**
+ * lpfc_dump_static_vport - Dump HBA's static vport information.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset for dumping vport info.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping list of static
+ * vports to be created.
+ **/
+void
+lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+		uint16_t offset)
+{
+	MAILBOX_t *mb;
+	void *ctx;
+
+	mb = &pmb->u.mb;
+	ctx = pmb->context2;
+
+	/* Setup to dump vport info region */
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.cv = 1;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.entry_index = offset;
+	mb->un.varDmp.region_id = DMP_REGION_VPORT;
+	mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
+	mb->un.varDmp.co = 0;
+	mb->un.varDmp.resp_offset = 0;
+	pmb->context2 = ctx;
+	mb->mbxOwner = OWN_HOST;
+
+	return;
+}
+
 /**
  * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
  * @phba: pointer to lpfc hba data structure.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1a7659c8f38..ccbde41c153 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -329,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
 
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			shost = lpfc_shost_from_vport(vports[i]);
 			shost_for_each_device(sdev, shost) {
 				new_queue_depth =
@@ -383,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
 
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			shost = lpfc_shost_from_vport(vports[i]);
 			shost_for_each_device(sdev, shost) {
 				if (vports[i]->cfg_lun_queue_depth <=
@@ -431,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
 
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			shost = lpfc_shost_from_vport(vports[i]);
 			shost_for_each_device(sdev, shost) {
 				rport = starget_to_rport(scsi_target(sdev));
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a415ec0b9a8..a6313ee84ac 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -251,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
 		    (vport->fc_flag & wait_flags)  ||
 		    ((vport->port_state > LPFC_VPORT_FAILED) &&
 		     (vport->port_state < LPFC_VPORT_READY))) {
-			lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
 					"1833 Vport discovery quiesce Wait:"
-					" vpi x%x state x%x fc_flags x%x"
+					" state x%x fc_flags x%x"
 					" num_nodes x%x, waiting 1000 msecs"
 					" total wait msecs x%x\n",
-					vport->vpi, vport->port_state,
-					vport->fc_flag, vport->num_disc_nodes,
+					vport->port_state, vport->fc_flag,
+					vport->num_disc_nodes,
 					jiffies_to_msecs(jiffies - start_time));
 			msleep(1000);
 		} else {
 			/* Base case.  Wait variants satisfied.  Break out */
-			lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
 					 "1834 Vport discovery quiesced:"
-					 " vpi x%x state x%x fc_flags x%x"
+					 " state x%x fc_flags x%x"
 					 " wait msecs x%x\n",
-					 vport->vpi, vport->port_state,
-					 vport->fc_flag,
+					 vport->port_state, vport->fc_flag,
 					 jiffies_to_msecs(jiffies
 						- start_time));
 			break;
@@ -275,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
 	}
 
 	if (time_after(jiffies, wait_time_max))
-		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
 				"1835 Vport discovery quiesce failed:"
-				" vpi x%x state x%x fc_flags x%x"
-				" wait msecs x%x\n",
-				vport->vpi, vport->port_state,
-				vport->fc_flag,
+				" state x%x fc_flags x%x wait msecs x%x\n",
+				vport->port_state, vport->fc_flag,
 				jiffies_to_msecs(jiffies - start_time));
 }
 
@@ -558,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
 				 "physical host\n");
 		return VPORT_ERROR;
 	}
+
+	/* If the vport is a static vport fail the deletion. */
+	if ((vport->vport_flag & STATIC_VPORT) &&
+		!(phba->pport->load_flag & FC_UNLOADING)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1837 vport_delete failed: Cannot delete "
+				 "static vport.\n");
+		return VPORT_ERROR;
+	}
+
 	/*
 	 * If we are not unloading the driver then prevent the vport_delete
 	 * from happening until after this vport's discovery is finished.
@@ -733,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
 	struct lpfc_vport *port_iterator;
 	struct lpfc_vport **vports;
 	int index = 0;
-	vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *),
+	vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
 			 GFP_KERNEL);
 	if (vports == NULL)
 		return NULL;
@@ -757,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
 	int i;
 	if (vports == NULL)
 		return;
-	for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++)
+	for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
 		scsi_host_put(lpfc_shost_from_vport(vports[i]));
 	kfree(vports);
 }
-- 
cgit v1.2.3-70-g09d2


From 53331aa1c721336b661567e4c0aacc04ab9725d8 Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Fri, 22 May 2009 14:53:27 -0400
Subject: [SCSI] lpfc 8.3.2 : Update the lpfc driver version to 8.3.2

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc_version.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'drivers/scsi/lpfc')

diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e599519e307..6b8a148f0a5 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.1"
+#define LPFC_DRIVER_VERSION "8.3.2"
 
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
-- 
cgit v1.2.3-70-g09d2


From 98a1708de1bfa5fe1c490febba850d6043d3c7fa Mon Sep 17 00:00:00 2001
From: Martin Olsson <martin@minimum.se>
Date: Wed, 22 Apr 2009 18:21:29 +0200
Subject: trivial: fix typos s/paramter/parameter/ and s/excute/execute/ in
 documentation and source comments.

Signed-off-by: Martin Olsson <martin@minimum.se>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
---
 Documentation/powerpc/dts-bindings/fsl/board.txt       | 2 +-
 Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt | 2 +-
 arch/s390/kernel/head.S                                | 2 +-
 drivers/ata/libata-acpi.c                              | 4 ++--
 drivers/ata/libata-eh.c                                | 2 +-
 drivers/edac/e752x_edac.c                              | 2 +-
 drivers/isdn/divert/isdn_divert.c                      | 2 +-
 drivers/net/appletalk/ltpc.c                           | 2 +-
 drivers/net/e1000e/e1000.h                             | 2 +-
 drivers/net/ehea/ehea.h                                | 2 +-
 drivers/net/igbvf/igbvf.h                              | 2 +-
 drivers/net/mlx4/en_netdev.c                           | 2 +-
 drivers/net/qlge/qlge_mpi.c                            | 6 +++---
 drivers/net/skfp/h/smt.h                               | 2 +-
 drivers/net/tokenring/3c359.c                          | 2 +-
 drivers/net/tokenring/lanstreamer.c                    | 2 +-
 drivers/net/tokenring/olympic.c                        | 2 +-
 drivers/net/ucc_geth_ethtool.c                         | 2 +-
 drivers/net/wireless/rt2x00/rt2x00lib.h                | 2 +-
 drivers/net/wireless/wavelan_cs.c                      | 2 +-
 drivers/scsi/lpfc/lpfc_scsi.c                          | 4 ++--
 sound/pci/sis7019.h                                    | 4 ++--
 22 files changed, 27 insertions(+), 27 deletions(-)

(limited to 'drivers/scsi/lpfc')

diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/powerpc/dts-bindings/fsl/board.txt
index 6c974d28eeb..e8b5bc24d0a 100644
--- a/Documentation/powerpc/dts-bindings/fsl/board.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/board.txt
@@ -38,7 +38,7 @@ Required properities:
 - reg : Should contain the address and the length of the GPIO bank
   register.
 - #gpio-cells : Should be two. The first cell is the pin number and the
-  second cell is used to specify optional paramters (currently unused).
+  second cell is used to specify optional parameters (currently unused).
 - gpio-controller : Marks the port as GPIO controller.
 
 Example:
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt
index 1815dfede1b..349f79fd707 100644
--- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt
@@ -11,7 +11,7 @@ Required properties:
   "fsl,cpm1-pario-bank-c", "fsl,cpm1-pario-bank-d",
   "fsl,cpm1-pario-bank-e", "fsl,cpm2-pario-bank"
 - #gpio-cells : Should be two. The first cell is the pin number and the
-  second cell is used to specify optional paramters (currently unused).
+  second cell is used to specify optional parameters (currently unused).
 - gpio-controller : Marks the port as GPIO controller.
 
 Example of three SOC GPIO banks defined as gpio-controller nodes:
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 22596d70fc2..2c54e970d0d 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -64,7 +64,7 @@ __HEAD
 	.org   0x100
 #
 # subroutine for loading from tape
-# Paramters:
+# Parameters:
 #  R1 = device number
 #  R2 = load address
 .Lloader:
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 6273d98d00e..ac176da1f94 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -748,9 +748,9 @@ static int ata_acpi_run_tf(struct ata_device *dev,
 /**
  * ata_acpi_exec_tfs - get then write drive taskfile settings
  * @dev: target ATA device
- * @nr_executed: out paramter for the number of executed commands
+ * @nr_executed: out parameter for the number of executed commands
  *
- * Evaluate _GTF and excute returned taskfiles.
+ * Evaluate _GTF and execute returned taskfiles.
  *
  * LOCKING:
  * EH context.
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 94919ad03df..fa22f94ca41 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2864,7 +2864,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
 /**
  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
  *	@link: link on which timings will be programmed
- *	@r_failed_dev: out paramter for failed device
+ *	@r_failed_dev: out parameter for failed device
  *
  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
  *	ata_set_mode() fails, pointer to the failing device is
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index facfdb1fa71..d205d493a68 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1084,7 +1084,7 @@ static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
 	struct pci_dev *dev = pvt->dev_d0f1;
 	int enable = 1;
 
-	/* Allow module paramter override, else see if CPU supports parity */
+	/* Allow module parameter override, else see if CPU supports parity */
 	if (sysbus_parity != -1) {
 		enable = sysbus_parity;
 	} else if (cpu_id[0] &&
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 7d97d54588d..77e9fdda059 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -183,7 +183,7 @@ int cf_command(int drvid, int mode,
           (mode != 1) ? "" : " 0 ",
           (mode != 1) ? "" : fwd_nr);
  
-  retval = divert_if.ll_cmd(&cs->ics); /* excute command */
+  retval = divert_if.ll_cmd(&cs->ics); /* execute command */
 
   if (!retval)
    { cs->prev = NULL;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 78cc7146913..b642647170b 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1220,7 +1220,7 @@ static int __init ltpc_setup(char *str)
 		if (ints[0] > 2) {
 			dma = ints[3];
 		}
-		/* ignore any other paramters */
+		/* ignore any other parameters */
 	}
 	return 1;
 }
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index f37360aa12a..44f0bf23daf 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -62,7 +62,7 @@ struct e1000_info;
 	e_printk(KERN_NOTICE, adapter, format, ## arg)
 
 
-/* Interrupt modes, as used by the IntMode paramter */
+/* Interrupt modes, as used by the IntMode parameter */
 #define E1000E_INT_MODE_LEGACY		0
 #define E1000E_INT_MODE_MSI		1
 #define E1000E_INT_MODE_MSIX		2
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 16a41389575..78952f8324e 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -268,7 +268,7 @@ struct ehea_qp_init_attr {
 };
 
 /*
- * Event Queue attributes, passed as paramter
+ * Event Queue attributes, passed as parameter
  */
 struct ehea_eq_attr {
 	u32 type;
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 4bff35e4687..d488733893a 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -45,7 +45,7 @@ struct igbvf_adapter;
 /* Interrupt defines */
 #define IGBVF_START_ITR                 648 /* ~6000 ints/sec */
 
-/* Interrupt modes, as used by the IntMode paramter */
+/* Interrupt modes, as used by the IntMode parameter */
 #define IGBVF_INT_MODE_LEGACY           0
 #define IGBVF_INT_MODE_MSI              1
 #define IGBVF_INT_MODE_MSIX             2
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 7bcc49de163..e8eeef0c9c9 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -371,7 +371,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
 	int i;
 
 	/* If we haven't received a specific coalescing setting
-	 * (module param), we set the moderation paramters as follows:
+	 * (module param), we set the moderation parameters as follows:
 	 * - moder_cnt is set to the number of mtu sized packets to
 	 *   satisfy our coelsing target.
 	 * - moder_time is set to a fixed value.
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 9f81b797f10..ac9493f6c1a 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -141,7 +141,7 @@ end:
 /* We are being asked by firmware to accept
  * a change to the port.  This is only
  * a change to max frame sizes (Tx/Rx), pause
- * paramters, or loopback mode. We wake up a worker
+ * parameters, or loopback mode. We wake up a worker
  * to handler processing this since a mailbox command
  * will need to be sent to ACK the request.
  */
@@ -371,7 +371,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
 	/* We are being asked by firmware to accept
 	 * a change to the port.  This is only
 	 * a change to max frame sizes (Tx/Rx), pause
-	 * paramters, or loopback mode.
+	 * parameters, or loopback mode.
 	 */
 	case AEN_IDC_REQ:
 		status = ql_idc_req_aen(qdev);
@@ -380,7 +380,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
 	/* Process and inbound IDC event.
 	 * This will happen when we're trying to
 	 * change tx/rx max frame size, change pause
-	 * paramters or loopback mode.
+	 * parameters or loopback mode.
 	 */
 	case AEN_IDC_CMPLT:
 	case AEN_IDC_EXT:
diff --git a/drivers/net/skfp/h/smt.h b/drivers/net/skfp/h/smt.h
index 1ff589988d1..2976757a36f 100644
--- a/drivers/net/skfp/h/smt.h
+++ b/drivers/net/skfp/h/smt.h
@@ -413,7 +413,7 @@ struct smt_p_reason {
 #define SMT_RDF_SUCCESS	0x00000003	/* success (PMF) */
 #define SMT_RDF_BADSET	0x00000004	/* bad set count (PMF) */
 #define SMT_RDF_ILLEGAL 0x00000005	/* read only (PMF) */
-#define SMT_RDF_NOPARAM	0x6		/* paramter not supported (PMF) */
+#define SMT_RDF_NOPARAM	0x6		/* parameter not supported (PMF) */
 #define SMT_RDF_RANGE	0x8		/* out of range */
 #define SMT_RDF_AUTHOR	0x9		/* not autohorized */
 #define SMT_RDF_LENGTH	0x0a		/* length error */
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 534c0f38483..0337b9d673f 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
 MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
 MODULE_FIRMWARE(FW_NAME);
 
-/* Module paramters */
+/* Module parameters */
 
 /* Ring Speed 0,4,16 
  * 0 = Autosense   
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 2e70ee8f145..46a2cc92d97 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -169,7 +169,7 @@ static char *open_min_error[] = {
 	"Monitor Contention failer for RPL", "FDX Protocol Error"
 };
 
-/* Module paramters */
+/* Module parameters */
 
 /* Ring Speed 0,4,16
  * 0 = Autosense         
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index d068a9d3688..2d819fc8558 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -132,7 +132,7 @@ static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost",
 				   "Reserved", "Reserved", "No Monitor Detected for RPL", 
 				   "Monitor Contention failer for RPL", "FDX Protocol Error"};
 
-/* Module paramters */
+/* Module parameters */
 
 MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; 
 MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ; 
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 6fcb500257b..61fe80dda3e 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -7,7 +7,7 @@
  *
  * Limitation:
  * Can only get/set setttings of the first queue.
- * Need to re-open the interface manually after changing some paramters.
+ * Need to re-open the interface manually after changing some parameters.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index a631613177d..d83e3794d34 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -235,7 +235,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna);
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
  *
  * Initialize work structure and all link tuning related
- * paramters. This will not start the link tuning process itself.
+ * parameters. This will not start the link tuning process itself.
  */
 void rt2x00link_register(struct rt2x00_dev *rt2x00dev);
 
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index e55b33961ae..fa2821be44c 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -138,7 +138,7 @@ psa_read(struct net_device *	dev,
 
 /*------------------------------------------------------------------*/
 /*
- * Write the Paramter Storage Area to the WaveLAN card's memory
+ * Write the Parameter Storage Area to the WaveLAN card's memory
  */
 static void
 psa_write(struct net_device *	dev,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 8032c5adb6a..679adfff0bf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -827,8 +827,8 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
  * @reftag:         out: ref tag (reference tag)
  *
  * Description:
- *   Extract DIF paramters from the command if possible.  Otherwise,
- *   use default paratmers.
+ *   Extract DIF parameters from the command if possible.  Otherwise,
+ *   use default parameters.
  *
  **/
 static inline void
diff --git a/sound/pci/sis7019.h b/sound/pci/sis7019.h
index 013b6739a74..bc8c7681940 100644
--- a/sound/pci/sis7019.h
+++ b/sound/pci/sis7019.h
@@ -203,7 +203,7 @@
 #define SIS_WEISR_B	0xac
 
 
-/* Playback DMA parameters (paramter RAM) */
+/* Playback DMA parameters (parameter RAM) */
 #define SIS_PLAY_DMA_OFFSET	0x0000
 #define SIS_PLAY_DMA_SIZE	0x10
 #define SIS_PLAY_DMA_ADDR(addr, num) \
@@ -228,7 +228,7 @@
 #define		SIS_PLAY_DMA_SSO_MASK		0xffff0000
 #define		SIS_PLAY_DMA_ESO_MASK		0x0000ffff
 
-/* Capture DMA parameters (paramter RAM) */
+/* Capture DMA parameters (parameter RAM) */
 #define SIS_CAPTURE_DMA_OFFSET	0x0800
 #define SIS_CAPTURE_DMA_SIZE	0x10
 #define SIS_CAPTURE_DMA_ADDR(addr, num) \
-- 
cgit v1.2.3-70-g09d2