From 5df6d737dd4b0fe9eccf943abb3677cfea05a6c4 Mon Sep 17 00:00:00 2001
From: Abhijeet Joglekar <abjoglek@cisco.com>
Date: Fri, 17 Apr 2009 18:33:26 -0700
Subject: [SCSI] fnic: Add new Cisco PCI-Express FCoE HBA

fnic is a driver for the Cisco PCI-Express FCoE HBA

Signed-off-by: Abhijeet Joglekar <abjoglek@cisco.com>
Signed-off-by: Joe Eykholt <jeykholt@cisco.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/Kconfig | 11 +++++++++++
 1 file changed, 11 insertions(+)

(limited to 'drivers/scsi/Kconfig')

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8ed2990c826..fb2740789b6 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -628,6 +628,17 @@ config FCOE
 	---help---
 	  Fibre Channel over Ethernet module
 
+config FCOE_FNIC
+	tristate "Cisco FNIC Driver"
+	depends on PCI && X86
+	select LIBFC
+	help
+	  This is support for the Cisco PCI-Express FCoE HBA.
+
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/scsi/scsi.txt>.
+	  The module will be called fnic.
+
 config SCSI_DMX3191D
 	tristate "DMX3191D SCSI support"
 	depends on PCI && SCSI
-- 
cgit v1.2.3-70-g09d2


From 2ad52f473bbc1aa5b33c4a329b8a359f125e19d1 Mon Sep 17 00:00:00 2001
From: Jeff Garzik <jeff@garzik.org>
Date: Fri, 8 May 2009 16:35:37 -0400
Subject: [SCSI] mvsas: move into new directory drivers/scsi/mvsas/

Zero functional changes, just file movement.

This commit prepares for the upcoming integration of the
Marvell-provided driver update that splits the driver into support
for both 64xx and 94xx chip families.

Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/Kconfig        |   11 +-
 drivers/scsi/Makefile       |    2 +-
 drivers/scsi/mvsas.c        | 3222 -------------------------------------------
 drivers/scsi/mvsas/Kconfig  |   35 +
 drivers/scsi/mvsas/Makefile |   26 +
 drivers/scsi/mvsas/mv_sas.c | 3222 +++++++++++++++++++++++++++++++++++++++++++
 6 files changed, 3285 insertions(+), 3233 deletions(-)
 delete mode 100644 drivers/scsi/mvsas.c
 create mode 100644 drivers/scsi/mvsas/Kconfig
 create mode 100644 drivers/scsi/mvsas/Makefile
 create mode 100644 drivers/scsi/mvsas/mv_sas.c

(limited to 'drivers/scsi/Kconfig')

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index fb2740789b6..6e8106a70b3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD
 
 source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
 source "drivers/scsi/aic94xx/Kconfig"
+source "drivers/scsi/mvsas/Kconfig"
 
 config SCSI_DPT_I2O
 	tristate "Adaptec I2O RAID support "
@@ -1050,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR
 
 	  Generally, saying N is fine.
 
-config SCSI_MVSAS
-	tristate "Marvell 88SE6440 SAS/SATA support"
-	depends on PCI && SCSI
-	select SCSI_SAS_LIBSAS
-	help
-	  This driver supports Marvell SAS/SATA PCI devices.
-
-	  To compiler this driver as a module, choose M here: the module
-	  will be called mvsas.
-
 config SCSI_NCR53C406A
 	tristate "NCR53c406a SCSI support"
 	depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a5049cfb40e..8795c309963 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -126,7 +126,7 @@ obj-$(CONFIG_SCSI_IBMVSCSIS)	+= ibmvscsi/
 obj-$(CONFIG_SCSI_IBMVFC)	+= ibmvscsi/
 obj-$(CONFIG_SCSI_HPTIOP)	+= hptiop.o
 obj-$(CONFIG_SCSI_STEX)		+= stex.o
-obj-$(CONFIG_SCSI_MVSAS)	+= mvsas.o
+obj-$(CONFIG_SCSI_MVSAS)	+= mvsas/
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
 obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
 
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
deleted file mode 100644
index e4acebd10d1..00000000000
--- a/drivers/scsi/mvsas.c
+++ /dev/null
@@ -1,3222 +0,0 @@
-/*
-	mvsas.c - Marvell 88SE6440 SAS/SATA support
-
-	Copyright 2007 Red Hat, Inc.
-	Copyright 2008 Marvell. <kewei@marvell.com>
-
-	This program is free software; you can redistribute it and/or
-	modify it under the terms of the GNU General Public License as
-	published by the Free Software Foundation; either version 2,
-	or (at your option) any later version.
-
-	This program is distributed in the hope that it will be useful,
-	but WITHOUT ANY WARRANTY; without even the implied warranty
-	of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-	See the GNU General Public License for more details.
-
-	You should have received a copy of the GNU General Public
-	License along with this program; see the file COPYING.	If not,
-	write to the Free Software Foundation, 675 Mass Ave, Cambridge,
-	MA 02139, USA.
-
-	---------------------------------------------------------------
-
-	Random notes:
-	* hardware supports controlling the endian-ness of data
-	  structures.  this permits elimination of all the le32_to_cpu()
-	  and cpu_to_le32() conversions.
-
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/ctype.h>
-#include <scsi/libsas.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/sas_ata.h>
-#include <asm/io.h>
-
-#define DRV_NAME	"mvsas"
-#define DRV_VERSION	"0.5.2"
-#define _MV_DUMP	0
-#define MVS_DISABLE_NVRAM
-#define MVS_DISABLE_MSI
-
-#define mr32(reg)	readl(regs + MVS_##reg)
-#define mw32(reg,val)	writel((val), regs + MVS_##reg)
-#define mw32_f(reg,val)	do {			\
-	writel((val), regs + MVS_##reg);	\
-	readl(regs + MVS_##reg);		\
-	} while (0)
-
-#define MVS_ID_NOT_MAPPED	0x7f
-#define MVS_CHIP_SLOT_SZ	(1U << mvi->chip->slot_width)
-
-/* offset for D2H FIS in the Received FIS List Structure */
-#define SATA_RECEIVED_D2H_FIS(reg_set)	\
-	((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
-#define SATA_RECEIVED_PIO_FIS(reg_set)	\
-	((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
-#define UNASSOC_D2H_FIS(id)		\
-	((void *) mvi->rx_fis + 0x100 * id)
-
-#define for_each_phy(__lseq_mask, __mc, __lseq, __rest)			\
-	for ((__mc) = (__lseq_mask), (__lseq) = 0;			\
-					(__mc) != 0 && __rest;		\
-					(++__lseq), (__mc) >>= 1)
-
-/* driver compile-time configuration */
-enum driver_configuration {
-	MVS_TX_RING_SZ		= 1024,	/* TX ring size (12-bit) */
-	MVS_RX_RING_SZ		= 1024, /* RX ring size (12-bit) */
-					/* software requires power-of-2
-					   ring size */
-
-	MVS_SLOTS		= 512,	/* command slots */
-	MVS_SLOT_BUF_SZ		= 8192, /* cmd tbl + IU + status + PRD */
-	MVS_SSP_CMD_SZ		= 64,	/* SSP command table buffer size */
-	MVS_ATA_CMD_SZ		= 96,	/* SATA command table buffer size */
-	MVS_OAF_SZ		= 64,	/* Open address frame buffer size */
-
-	MVS_RX_FIS_COUNT	= 17,	/* Optional rx'd FISs (max 17) */
-
-	MVS_QUEUE_SIZE		= 30,	/* Support Queue depth */
-	MVS_CAN_QUEUE		= MVS_SLOTS - 1,	/* SCSI Queue depth */
-};
-
-/* unchangeable hardware details */
-enum hardware_details {
-	MVS_MAX_PHYS		= 8,	/* max. possible phys */
-	MVS_MAX_PORTS		= 8,	/* max. possible ports */
-	MVS_RX_FISL_SZ		= 0x400 + (MVS_RX_FIS_COUNT * 0x100),
-};
-
-/* peripheral registers (BAR2) */
-enum peripheral_registers {
-	SPI_CTL			= 0x10,	/* EEPROM control */
-	SPI_CMD			= 0x14,	/* EEPROM command */
-	SPI_DATA		= 0x18, /* EEPROM data */
-};
-
-enum peripheral_register_bits {
-	TWSI_RDY		= (1U << 7),	/* EEPROM interface ready */
-	TWSI_RD			= (1U << 4),	/* EEPROM read access */
-
-	SPI_ADDR_MASK		= 0x3ffff,	/* bits 17:0 */
-};
-
-/* enhanced mode registers (BAR4) */
-enum hw_registers {
-	MVS_GBL_CTL		= 0x04,  /* global control */
-	MVS_GBL_INT_STAT	= 0x08,  /* global irq status */
-	MVS_GBL_PI		= 0x0C,  /* ports implemented bitmask */
-	MVS_GBL_PORT_TYPE	= 0xa0,  /* port type */
-
-	MVS_CTL			= 0x100, /* SAS/SATA port configuration */
-	MVS_PCS			= 0x104, /* SAS/SATA port control/status */
-	MVS_CMD_LIST_LO		= 0x108, /* cmd list addr */
-	MVS_CMD_LIST_HI		= 0x10C,
-	MVS_RX_FIS_LO		= 0x110, /* RX FIS list addr */
-	MVS_RX_FIS_HI		= 0x114,
-
-	MVS_TX_CFG		= 0x120, /* TX configuration */
-	MVS_TX_LO		= 0x124, /* TX (delivery) ring addr */
-	MVS_TX_HI		= 0x128,
-
-	MVS_TX_PROD_IDX		= 0x12C, /* TX producer pointer */
-	MVS_TX_CONS_IDX		= 0x130, /* TX consumer pointer (RO) */
-	MVS_RX_CFG		= 0x134, /* RX configuration */
-	MVS_RX_LO		= 0x138, /* RX (completion) ring addr */
-	MVS_RX_HI		= 0x13C,
-	MVS_RX_CONS_IDX		= 0x140, /* RX consumer pointer (RO) */
-
-	MVS_INT_COAL		= 0x148, /* Int coalescing config */
-	MVS_INT_COAL_TMOUT	= 0x14C, /* Int coalescing timeout */
-	MVS_INT_STAT		= 0x150, /* Central int status */
-	MVS_INT_MASK		= 0x154, /* Central int enable */
-	MVS_INT_STAT_SRS	= 0x158, /* SATA register set status */
-	MVS_INT_MASK_SRS	= 0x15C,
-
-					 /* ports 1-3 follow after this */
-	MVS_P0_INT_STAT		= 0x160, /* port0 interrupt status */
-	MVS_P0_INT_MASK		= 0x164, /* port0 interrupt mask */
-	MVS_P4_INT_STAT		= 0x200, /* Port 4 interrupt status */
-	MVS_P4_INT_MASK		= 0x204, /* Port 4 interrupt enable mask */
-
-					 /* ports 1-3 follow after this */
-	MVS_P0_SER_CTLSTAT	= 0x180, /* port0 serial control/status */
-	MVS_P4_SER_CTLSTAT	= 0x220, /* port4 serial control/status */
-
-	MVS_CMD_ADDR		= 0x1B8, /* Command register port (addr) */
-	MVS_CMD_DATA		= 0x1BC, /* Command register port (data) */
-
-					 /* ports 1-3 follow after this */
-	MVS_P0_CFG_ADDR		= 0x1C0, /* port0 phy register address */
-	MVS_P0_CFG_DATA		= 0x1C4, /* port0 phy register data */
-	MVS_P4_CFG_ADDR		= 0x230, /* Port 4 config address */
-	MVS_P4_CFG_DATA		= 0x234, /* Port 4 config data */
-
-					 /* ports 1-3 follow after this */
-	MVS_P0_VSR_ADDR		= 0x1E0, /* port0 VSR address */
-	MVS_P0_VSR_DATA		= 0x1E4, /* port0 VSR data */
-	MVS_P4_VSR_ADDR		= 0x250, /* port 4 VSR addr */
-	MVS_P4_VSR_DATA		= 0x254, /* port 4 VSR data */
-};
-
-enum hw_register_bits {
-	/* MVS_GBL_CTL */
-	INT_EN			= (1U << 1),	/* Global int enable */
-	HBA_RST			= (1U << 0),	/* HBA reset */
-
-	/* MVS_GBL_INT_STAT */
-	INT_XOR			= (1U << 4),	/* XOR engine event */
-	INT_SAS_SATA		= (1U << 0),	/* SAS/SATA event */
-
-	/* MVS_GBL_PORT_TYPE */			/* shl for ports 1-3 */
-	SATA_TARGET		= (1U << 16),	/* port0 SATA target enable */
-	MODE_AUTO_DET_PORT7 = (1U << 15),	/* port0 SAS/SATA autodetect */
-	MODE_AUTO_DET_PORT6 = (1U << 14),
-	MODE_AUTO_DET_PORT5 = (1U << 13),
-	MODE_AUTO_DET_PORT4 = (1U << 12),
-	MODE_AUTO_DET_PORT3 = (1U << 11),
-	MODE_AUTO_DET_PORT2 = (1U << 10),
-	MODE_AUTO_DET_PORT1 = (1U << 9),
-	MODE_AUTO_DET_PORT0 = (1U << 8),
-	MODE_AUTO_DET_EN    =	MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
-				MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
-				MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
-				MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
-	MODE_SAS_PORT7_MASK = (1U << 7),  /* port0 SAS(1), SATA(0) mode */
-	MODE_SAS_PORT6_MASK = (1U << 6),
-	MODE_SAS_PORT5_MASK = (1U << 5),
-	MODE_SAS_PORT4_MASK = (1U << 4),
-	MODE_SAS_PORT3_MASK = (1U << 3),
-	MODE_SAS_PORT2_MASK = (1U << 2),
-	MODE_SAS_PORT1_MASK = (1U << 1),
-	MODE_SAS_PORT0_MASK = (1U << 0),
-	MODE_SAS_SATA	=	MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
-				MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
-				MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
-				MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
-
-				/* SAS_MODE value may be
-				 * dictated (in hw) by values
-				 * of SATA_TARGET & AUTO_DET
-				 */
-
-	/* MVS_TX_CFG */
-	TX_EN			= (1U << 16),	/* Enable TX */
-	TX_RING_SZ_MASK		= 0xfff,	/* TX ring size, bits 11:0 */
-
-	/* MVS_RX_CFG */
-	RX_EN			= (1U << 16),	/* Enable RX */
-	RX_RING_SZ_MASK		= 0xfff,	/* RX ring size, bits 11:0 */
-
-	/* MVS_INT_COAL */
-	COAL_EN			= (1U << 16),	/* Enable int coalescing */
-
-	/* MVS_INT_STAT, MVS_INT_MASK */
-	CINT_I2C		= (1U << 31),	/* I2C event */
-	CINT_SW0		= (1U << 30),	/* software event 0 */
-	CINT_SW1		= (1U << 29),	/* software event 1 */
-	CINT_PRD_BC		= (1U << 28),	/* PRD BC err for read cmd */
-	CINT_DMA_PCIE		= (1U << 27),	/* DMA to PCIE timeout */
-	CINT_MEM		= (1U << 26),	/* int mem parity err */
-	CINT_I2C_SLAVE		= (1U << 25),	/* slave I2C event */
-	CINT_SRS		= (1U << 3),	/* SRS event */
-	CINT_CI_STOP		= (1U << 1),	/* cmd issue stopped */
-	CINT_DONE		= (1U << 0),	/* cmd completion */
-
-						/* shl for ports 1-3 */
-	CINT_PORT_STOPPED	= (1U << 16),	/* port0 stopped */
-	CINT_PORT		= (1U << 8),	/* port0 event */
-	CINT_PORT_MASK_OFFSET	= 8,
-	CINT_PORT_MASK		= (0xFF << CINT_PORT_MASK_OFFSET),
-
-	/* TX (delivery) ring bits */
-	TXQ_CMD_SHIFT		= 29,
-	TXQ_CMD_SSP		= 1,		/* SSP protocol */
-	TXQ_CMD_SMP		= 2,		/* SMP protocol */
-	TXQ_CMD_STP		= 3,		/* STP/SATA protocol */
-	TXQ_CMD_SSP_FREE_LIST	= 4,		/* add to SSP targ free list */
-	TXQ_CMD_SLOT_RESET	= 7,		/* reset command slot */
-	TXQ_MODE_I		= (1U << 28),	/* mode: 0=target,1=initiator */
-	TXQ_PRIO_HI		= (1U << 27),	/* priority: 0=normal, 1=high */
-	TXQ_SRS_SHIFT		= 20,		/* SATA register set */
-	TXQ_SRS_MASK		= 0x7f,
-	TXQ_PHY_SHIFT		= 12,		/* PHY bitmap */
-	TXQ_PHY_MASK		= 0xff,
-	TXQ_SLOT_MASK		= 0xfff,	/* slot number */
-
-	/* RX (completion) ring bits */
-	RXQ_GOOD		= (1U << 23),	/* Response good */
-	RXQ_SLOT_RESET		= (1U << 21),	/* Slot reset complete */
-	RXQ_CMD_RX		= (1U << 20),	/* target cmd received */
-	RXQ_ATTN		= (1U << 19),	/* attention */
-	RXQ_RSP			= (1U << 18),	/* response frame xfer'd */
-	RXQ_ERR			= (1U << 17),	/* err info rec xfer'd */
-	RXQ_DONE		= (1U << 16),	/* cmd complete */
-	RXQ_SLOT_MASK		= 0xfff,	/* slot number */
-
-	/* mvs_cmd_hdr bits */
-	MCH_PRD_LEN_SHIFT	= 16,		/* 16-bit PRD table len */
-	MCH_SSP_FR_TYPE_SHIFT	= 13,		/* SSP frame type */
-
-						/* SSP initiator only */
-	MCH_SSP_FR_CMD		= 0x0,		/* COMMAND frame */
-
-						/* SSP initiator or target */
-	MCH_SSP_FR_TASK		= 0x1,		/* TASK frame */
-
-						/* SSP target only */
-	MCH_SSP_FR_XFER_RDY	= 0x4,		/* XFER_RDY frame */
-	MCH_SSP_FR_RESP		= 0x5,		/* RESPONSE frame */
-	MCH_SSP_FR_READ		= 0x6,		/* Read DATA frame(s) */
-	MCH_SSP_FR_READ_RESP	= 0x7,		/* ditto, plus RESPONSE */
-
-	MCH_PASSTHRU		= (1U << 12),	/* pass-through (SSP) */
-	MCH_FBURST		= (1U << 11),	/* first burst (SSP) */
-	MCH_CHK_LEN		= (1U << 10),	/* chk xfer len (SSP) */
-	MCH_RETRY		= (1U << 9),	/* tport layer retry (SSP) */
-	MCH_PROTECTION		= (1U << 8),	/* protection info rec (SSP) */
-	MCH_RESET		= (1U << 7),	/* Reset (STP/SATA) */
-	MCH_FPDMA		= (1U << 6),	/* First party DMA (STP/SATA) */
-	MCH_ATAPI		= (1U << 5),	/* ATAPI (STP/SATA) */
-	MCH_BIST		= (1U << 4),	/* BIST activate (STP/SATA) */
-	MCH_PMP_MASK		= 0xf,		/* PMP from cmd FIS (STP/SATA)*/
-
-	CCTL_RST		= (1U << 5),	/* port logic reset */
-
-						/* 0(LSB first), 1(MSB first) */
-	CCTL_ENDIAN_DATA	= (1U << 3),	/* PRD data */
-	CCTL_ENDIAN_RSP		= (1U << 2),	/* response frame */
-	CCTL_ENDIAN_OPEN	= (1U << 1),	/* open address frame */
-	CCTL_ENDIAN_CMD		= (1U << 0),	/* command table */
-
-	/* MVS_Px_SER_CTLSTAT (per-phy control) */
-	PHY_SSP_RST		= (1U << 3),	/* reset SSP link layer */
-	PHY_BCAST_CHG		= (1U << 2),	/* broadcast(change) notif */
-	PHY_RST_HARD		= (1U << 1),	/* hard reset + phy reset */
-	PHY_RST			= (1U << 0),	/* phy reset */
-	PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
-	PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
-	PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
-	PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
-			(0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
-	PHY_READY_MASK		= (1U << 20),
-
-	/* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
-	PHYEV_DEC_ERR		= (1U << 24),	/* Phy Decoding Error */
-	PHYEV_UNASSOC_FIS	= (1U << 19),	/* unassociated FIS rx'd */
-	PHYEV_AN		= (1U << 18),	/* SATA async notification */
-	PHYEV_BIST_ACT		= (1U << 17),	/* BIST activate FIS */
-	PHYEV_SIG_FIS		= (1U << 16),	/* signature FIS */
-	PHYEV_POOF		= (1U << 12),	/* phy ready from 1 -> 0 */
-	PHYEV_IU_BIG		= (1U << 11),	/* IU too long err */
-	PHYEV_IU_SMALL		= (1U << 10),	/* IU too short err */
-	PHYEV_UNK_TAG		= (1U << 9),	/* unknown tag */
-	PHYEV_BROAD_CH		= (1U << 8),	/* broadcast(CHANGE) */
-	PHYEV_COMWAKE		= (1U << 7),	/* COMWAKE rx'd */
-	PHYEV_PORT_SEL		= (1U << 6),	/* port selector present */
-	PHYEV_HARD_RST		= (1U << 5),	/* hard reset rx'd */
-	PHYEV_ID_TMOUT		= (1U << 4),	/* identify timeout */
-	PHYEV_ID_FAIL		= (1U << 3),	/* identify failed */
-	PHYEV_ID_DONE		= (1U << 2),	/* identify done */
-	PHYEV_HARD_RST_DONE	= (1U << 1),	/* hard reset done */
-	PHYEV_RDY_CH		= (1U << 0),	/* phy ready changed state */
-
-	/* MVS_PCS */
-	PCS_EN_SATA_REG_SHIFT	= (16),		/* Enable SATA Register Set */
-	PCS_EN_PORT_XMT_SHIFT	= (12),		/* Enable Port Transmit */
-	PCS_EN_PORT_XMT_SHIFT2	= (8),		/* For 6480 */
-	PCS_SATA_RETRY		= (1U << 8),	/* retry ctl FIS on R_ERR */
-	PCS_RSP_RX_EN		= (1U << 7),	/* raw response rx */
-	PCS_SELF_CLEAR		= (1U << 5),	/* self-clearing int mode */
-	PCS_FIS_RX_EN		= (1U << 4),	/* FIS rx enable */
-	PCS_CMD_STOP_ERR	= (1U << 3),	/* cmd stop-on-err enable */
-	PCS_CMD_RST		= (1U << 1),	/* reset cmd issue */
-	PCS_CMD_EN		= (1U << 0),	/* enable cmd issue */
-
-	/* Port n Attached Device Info */
-	PORT_DEV_SSP_TRGT	= (1U << 19),
-	PORT_DEV_SMP_TRGT	= (1U << 18),
-	PORT_DEV_STP_TRGT	= (1U << 17),
-	PORT_DEV_SSP_INIT	= (1U << 11),
-	PORT_DEV_SMP_INIT	= (1U << 10),
-	PORT_DEV_STP_INIT	= (1U << 9),
-	PORT_PHY_ID_MASK	= (0xFFU << 24),
-	PORT_DEV_TRGT_MASK	= (0x7U << 17),
-	PORT_DEV_INIT_MASK	= (0x7U << 9),
-	PORT_DEV_TYPE_MASK	= (0x7U << 0),
-
-	/* Port n PHY Status */
-	PHY_RDY			= (1U << 2),
-	PHY_DW_SYNC		= (1U << 1),
-	PHY_OOB_DTCTD		= (1U << 0),
-
-	/* VSR */
-	/* PHYMODE 6 (CDB) */
-	PHY_MODE6_LATECLK	= (1U << 29),	/* Lock Clock */
-	PHY_MODE6_DTL_SPEED	= (1U << 27),	/* Digital Loop Speed */
-	PHY_MODE6_FC_ORDER	= (1U << 26),	/* Fibre Channel Mode Order*/
-	PHY_MODE6_MUCNT_EN	= (1U << 24),	/* u Count Enable */
-	PHY_MODE6_SEL_MUCNT_LEN	= (1U << 22),	/* Training Length Select */
-	PHY_MODE6_SELMUPI	= (1U << 20),	/* Phase Multi Select (init) */
-	PHY_MODE6_SELMUPF	= (1U << 18),	/* Phase Multi Select (final) */
-	PHY_MODE6_SELMUFF	= (1U << 16),	/* Freq Loop Multi Sel(final) */
-	PHY_MODE6_SELMUFI	= (1U << 14),	/* Freq Loop Multi Sel(init) */
-	PHY_MODE6_FREEZE_LOOP	= (1U << 12),	/* Freeze Rx CDR Loop */
-	PHY_MODE6_INT_RXFOFFS	= (1U << 3),	/* Rx CDR Freq Loop Enable */
-	PHY_MODE6_FRC_RXFOFFS	= (1U << 2),	/* Initial Rx CDR Offset */
-	PHY_MODE6_STAU_0D8	= (1U << 1),	/* Rx CDR Freq Loop Saturate */
-	PHY_MODE6_RXSAT_DIS	= (1U << 0),	/* Saturate Ctl */
-};
-
-enum mvs_info_flags {
-	MVF_MSI			= (1U << 0),	/* MSI is enabled */
-	MVF_PHY_PWR_FIX		= (1U << 1),	/* bug workaround */
-};
-
-enum sas_cmd_port_registers {
-	CMD_CMRST_OOB_DET	= 0x100, /* COMRESET OOB detect register */
-	CMD_CMWK_OOB_DET	= 0x104, /* COMWAKE OOB detect register */
-	CMD_CMSAS_OOB_DET	= 0x108, /* COMSAS OOB detect register */
-	CMD_BRST_OOB_DET	= 0x10c, /* burst OOB detect register */
-	CMD_OOB_SPACE		= 0x110, /* OOB space control register */
-	CMD_OOB_BURST		= 0x114, /* OOB burst control register */
-	CMD_PHY_TIMER		= 0x118, /* PHY timer control register */
-	CMD_PHY_CONFIG0		= 0x11c, /* PHY config register 0 */
-	CMD_PHY_CONFIG1		= 0x120, /* PHY config register 1 */
-	CMD_SAS_CTL0		= 0x124, /* SAS control register 0 */
-	CMD_SAS_CTL1		= 0x128, /* SAS control register 1 */
-	CMD_SAS_CTL2		= 0x12c, /* SAS control register 2 */
-	CMD_SAS_CTL3		= 0x130, /* SAS control register 3 */
-	CMD_ID_TEST		= 0x134, /* ID test register */
-	CMD_PL_TIMER		= 0x138, /* PL timer register */
-	CMD_WD_TIMER		= 0x13c, /* WD timer register */
-	CMD_PORT_SEL_COUNT	= 0x140, /* port selector count register */
-	CMD_APP_MEM_CTL		= 0x144, /* Application Memory Control */
-	CMD_XOR_MEM_CTL		= 0x148, /* XOR Block Memory Control */
-	CMD_DMA_MEM_CTL		= 0x14c, /* DMA Block Memory Control */
-	CMD_PORT_MEM_CTL0	= 0x150, /* Port Memory Control 0 */
-	CMD_PORT_MEM_CTL1	= 0x154, /* Port Memory Control 1 */
-	CMD_SATA_PORT_MEM_CTL0	= 0x158, /* SATA Port Memory Control 0 */
-	CMD_SATA_PORT_MEM_CTL1	= 0x15c, /* SATA Port Memory Control 1 */
-	CMD_XOR_MEM_BIST_CTL	= 0x160, /* XOR Memory BIST Control */
-	CMD_XOR_MEM_BIST_STAT	= 0x164, /* XOR Memroy BIST Status */
-	CMD_DMA_MEM_BIST_CTL	= 0x168, /* DMA Memory BIST Control */
-	CMD_DMA_MEM_BIST_STAT	= 0x16c, /* DMA Memory BIST Status */
-	CMD_PORT_MEM_BIST_CTL	= 0x170, /* Port Memory BIST Control */
-	CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
-	CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
-	CMD_STP_MEM_BIST_CTL	= 0x17c, /* STP Memory BIST Control */
-	CMD_STP_MEM_BIST_STAT0	= 0x180, /* STP Memory BIST Status 0 */
-	CMD_STP_MEM_BIST_STAT1	= 0x184, /* STP Memory BIST Status 1 */
-	CMD_RESET_COUNT		= 0x188, /* Reset Count */
-	CMD_MONTR_DATA_SEL	= 0x18C, /* Monitor Data/Select */
-	CMD_PLL_PHY_CONFIG	= 0x190, /* PLL/PHY Configuration */
-	CMD_PHY_CTL		= 0x194, /* PHY Control and Status */
-	CMD_PHY_TEST_COUNT0	= 0x198, /* Phy Test Count 0 */
-	CMD_PHY_TEST_COUNT1	= 0x19C, /* Phy Test Count 1 */
-	CMD_PHY_TEST_COUNT2	= 0x1A0, /* Phy Test Count 2 */
-	CMD_APP_ERR_CONFIG	= 0x1A4, /* Application Error Configuration */
-	CMD_PND_FIFO_CTL0	= 0x1A8, /* Pending FIFO Control 0 */
-	CMD_HOST_CTL		= 0x1AC, /* Host Control Status */
-	CMD_HOST_WR_DATA	= 0x1B0, /* Host Write Data */
-	CMD_HOST_RD_DATA	= 0x1B4, /* Host Read Data */
-	CMD_PHY_MODE_21		= 0x1B8, /* Phy Mode 21 */
-	CMD_SL_MODE0		= 0x1BC, /* SL Mode 0 */
-	CMD_SL_MODE1		= 0x1C0, /* SL Mode 1 */
-	CMD_PND_FIFO_CTL1	= 0x1C4, /* Pending FIFO Control 1 */
-};
-
-/* SAS/SATA configuration port registers, aka phy registers */
-enum sas_sata_config_port_regs {
-	PHYR_IDENTIFY		= 0x00,	/* info for IDENTIFY frame */
-	PHYR_ADDR_LO		= 0x04,	/* my SAS address (low) */
-	PHYR_ADDR_HI		= 0x08,	/* my SAS address (high) */
-	PHYR_ATT_DEV_INFO	= 0x0C,	/* attached device info */
-	PHYR_ATT_ADDR_LO	= 0x10,	/* attached dev SAS addr (low) */
-	PHYR_ATT_ADDR_HI	= 0x14,	/* attached dev SAS addr (high) */
-	PHYR_SATA_CTL		= 0x18,	/* SATA control */
-	PHYR_PHY_STAT		= 0x1C,	/* PHY status */
-	PHYR_SATA_SIG0		= 0x20,	/*port SATA signature FIS(Byte 0-3) */
-	PHYR_SATA_SIG1		= 0x24,	/*port SATA signature FIS(Byte 4-7) */
-	PHYR_SATA_SIG2		= 0x28,	/*port SATA signature FIS(Byte 8-11) */
-	PHYR_SATA_SIG3		= 0x2c,	/*port SATA signature FIS(Byte 12-15) */
-	PHYR_R_ERR_COUNT	= 0x30, /* port R_ERR count register */
-	PHYR_CRC_ERR_COUNT	= 0x34, /* port CRC error count register */
-	PHYR_WIDE_PORT		= 0x38,	/* wide port participating */
-	PHYR_CURRENT0		= 0x80,	/* current connection info 0 */
-	PHYR_CURRENT1		= 0x84,	/* current connection info 1 */
-	PHYR_CURRENT2		= 0x88,	/* current connection info 2 */
-};
-
-/*  SAS/SATA Vendor Specific Port Registers */
-enum sas_sata_vsp_regs {
-	VSR_PHY_STAT		= 0x00, /* Phy Status */
-	VSR_PHY_MODE1		= 0x01, /* phy tx */
-	VSR_PHY_MODE2		= 0x02, /* tx scc */
-	VSR_PHY_MODE3		= 0x03, /* pll */
-	VSR_PHY_MODE4		= 0x04, /* VCO */
-	VSR_PHY_MODE5		= 0x05, /* Rx */
-	VSR_PHY_MODE6		= 0x06, /* CDR */
-	VSR_PHY_MODE7		= 0x07, /* Impedance */
-	VSR_PHY_MODE8		= 0x08, /* Voltage */
-	VSR_PHY_MODE9		= 0x09, /* Test */
-	VSR_PHY_MODE10		= 0x0A, /* Power */
-	VSR_PHY_MODE11		= 0x0B, /* Phy Mode */
-	VSR_PHY_VS0		= 0x0C, /* Vednor Specific 0 */
-	VSR_PHY_VS1		= 0x0D, /* Vednor Specific 1 */
-};
-
-enum pci_cfg_registers {
-	PCR_PHY_CTL	= 0x40,
-	PCR_PHY_CTL2	= 0x90,
-	PCR_DEV_CTRL	= 0xE8,
-};
-
-enum pci_cfg_register_bits {
-	PCTL_PWR_ON	= (0xFU << 24),
-	PCTL_OFF	= (0xFU << 12),
-	PRD_REQ_SIZE	= (0x4000),
-	PRD_REQ_MASK	= (0x00007000),
-};
-
-enum nvram_layout_offsets {
-	NVR_SIG		= 0x00,		/* 0xAA, 0x55 */
-	NVR_SAS_ADDR	= 0x02,		/* 8-byte SAS address */
-};
-
-enum chip_flavors {
-	chip_6320,
-	chip_6440,
-	chip_6480,
-};
-
-enum port_type {
-	PORT_TYPE_SAS	=  (1L << 1),
-	PORT_TYPE_SATA	=  (1L << 0),
-};
-
-/* Command Table Format */
-enum ct_format {
-	/* SSP */
-	SSP_F_H		=  0x00,
-	SSP_F_IU	=  0x18,
-	SSP_F_MAX	=  0x4D,
-	/* STP */
-	STP_CMD_FIS	=  0x00,
-	STP_ATAPI_CMD	=  0x40,
-	STP_F_MAX	=  0x10,
-	/* SMP */
-	SMP_F_T		=  0x00,
-	SMP_F_DEP	=  0x01,
-	SMP_F_MAX	=  0x101,
-};
-
-enum status_buffer {
-	SB_EIR_OFF	=  0x00,	/* Error Information Record */
-	SB_RFB_OFF	=  0x08,	/* Response Frame Buffer */
-	SB_RFB_MAX	=  0x400,	/* RFB size*/
-};
-
-enum error_info_rec {
-	CMD_ISS_STPD	= (1U << 31),	/* Cmd Issue Stopped */
-	CMD_PI_ERR	= (1U << 30),	/* Protection info error.  see flags2 */
-	RSP_OVER	= (1U << 29),	/* rsp buffer overflow */
-	RETRY_LIM	= (1U << 28),	/* FIS/frame retry limit exceeded */
-	UNK_FIS 	= (1U << 27),	/* unknown FIS */
-	DMA_TERM	= (1U << 26),	/* DMA terminate primitive rx'd */
-	SYNC_ERR	= (1U << 25),	/* SYNC rx'd during frame xmit */
-	TFILE_ERR	= (1U << 24),	/* SATA taskfile Error bit set */
-	R_ERR		= (1U << 23),	/* SATA returned R_ERR prim */
-	RD_OFS		= (1U << 20),	/* Read DATA frame invalid offset */
-	XFER_RDY_OFS	= (1U << 19),	/* XFER_RDY offset error */
-	UNEXP_XFER_RDY	= (1U << 18),	/* unexpected XFER_RDY error */
-	DATA_OVER_UNDER = (1U << 16),	/* data overflow/underflow */
-	INTERLOCK	= (1U << 15),	/* interlock error */
-	NAK		= (1U << 14),	/* NAK rx'd */
-	ACK_NAK_TO	= (1U << 13),	/* ACK/NAK timeout */
-	CXN_CLOSED	= (1U << 12),	/* cxn closed w/out ack/nak */
-	OPEN_TO 	= (1U << 11),	/* I_T nexus lost, open cxn timeout */
-	PATH_BLOCKED	= (1U << 10),	/* I_T nexus lost, pathway blocked */
-	NO_DEST 	= (1U << 9),	/* I_T nexus lost, no destination */
-	STP_RES_BSY	= (1U << 8),	/* STP resources busy */
-	BREAK		= (1U << 7),	/* break received */
-	BAD_DEST	= (1U << 6),	/* bad destination */
-	BAD_PROTO	= (1U << 5),	/* protocol not supported */
-	BAD_RATE	= (1U << 4),	/* cxn rate not supported */
-	WRONG_DEST	= (1U << 3),	/* wrong destination error */
-	CREDIT_TO	= (1U << 2),	/* credit timeout */
-	WDOG_TO 	= (1U << 1),	/* watchdog timeout */
-	BUF_PAR 	= (1U << 0),	/* buffer parity error */
-};
-
-enum error_info_rec_2 {
-	SLOT_BSY_ERR	= (1U << 31),	/* Slot Busy Error */
-	GRD_CHK_ERR	= (1U << 14),	/* Guard Check Error */
-	APP_CHK_ERR	= (1U << 13),	/* Application Check error */
-	REF_CHK_ERR	= (1U << 12),	/* Reference Check Error */
-	USR_BLK_NM	= (1U << 0),	/* User Block Number */
-};
-
-struct mvs_chip_info {
-	u32		n_phy;
-	u32		srs_sz;
-	u32		slot_width;
-};
-
-struct mvs_err_info {
-	__le32			flags;
-	__le32			flags2;
-};
-
-struct mvs_prd {
-	__le64			addr;		/* 64-bit buffer address */
-	__le32			reserved;
-	__le32			len;		/* 16-bit length */
-};
-
-struct mvs_cmd_hdr {
-	__le32			flags;		/* PRD tbl len; SAS, SATA ctl */
-	__le32			lens;		/* cmd, max resp frame len */
-	__le32			tags;		/* targ port xfer tag; tag */
-	__le32			data_len;	/* data xfer len */
-	__le64			cmd_tbl;	/* command table address */
-	__le64			open_frame;	/* open addr frame address */
-	__le64			status_buf;	/* status buffer address */
-	__le64			prd_tbl;	/* PRD tbl address */
-	__le32			reserved[4];
-};
-
-struct mvs_port {
-	struct asd_sas_port	sas_port;
-	u8			port_attached;
-	u8			taskfileset;
-	u8			wide_port_phymap;
-	struct list_head	list;
-};
-
-struct mvs_phy {
-	struct mvs_port		*port;
-	struct asd_sas_phy	sas_phy;
-	struct sas_identify	identify;
-	struct scsi_device	*sdev;
-	u64		dev_sas_addr;
-	u64		att_dev_sas_addr;
-	u32		att_dev_info;
-	u32		dev_info;
-	u32		phy_type;
-	u32		phy_status;
-	u32		irq_status;
-	u32		frame_rcvd_size;
-	u8		frame_rcvd[32];
-	u8		phy_attached;
-	enum sas_linkrate	minimum_linkrate;
-	enum sas_linkrate	maximum_linkrate;
-};
-
-struct mvs_slot_info {
-	struct list_head	list;
-	struct sas_task		*task;
-	u32			n_elem;
-	u32			tx;
-
-	/* DMA buffer for storing cmd tbl, open addr frame, status buffer,
-	 * and PRD table
-	 */
-	void			*buf;
-	dma_addr_t		buf_dma;
-#if _MV_DUMP
-	u32			cmd_size;
-#endif
-
-	void			*response;
-	struct mvs_port		*port;
-};
-
-struct mvs_info {
-	unsigned long		flags;
-
-	spinlock_t		lock;		/* host-wide lock */
-	struct pci_dev		*pdev;		/* our device */
-	void __iomem		*regs;		/* enhanced mode registers */
-	void __iomem		*peri_regs;	/* peripheral registers */
-
-	u8			sas_addr[SAS_ADDR_SIZE];
-	struct sas_ha_struct	sas;		/* SCSI/SAS glue */
-	struct Scsi_Host	*shost;
-
-	__le32			*tx;		/* TX (delivery) DMA ring */
-	dma_addr_t		tx_dma;
-	u32			tx_prod;	/* cached next-producer idx */
-
-	__le32			*rx;		/* RX (completion) DMA ring */
-	dma_addr_t		rx_dma;
-	u32			rx_cons;	/* RX consumer idx */
-
-	__le32			*rx_fis;	/* RX'd FIS area */
-	dma_addr_t		rx_fis_dma;
-
-	struct mvs_cmd_hdr	*slot;	/* DMA command header slots */
-	dma_addr_t		slot_dma;
-
-	const struct mvs_chip_info *chip;
-
-	u8			tags[MVS_SLOTS];
-	struct mvs_slot_info	slot_info[MVS_SLOTS];
-				/* further per-slot information */
-	struct mvs_phy		phy[MVS_MAX_PHYS];
-	struct mvs_port		port[MVS_MAX_PHYS];
-#ifdef MVS_USE_TASKLET
-	struct tasklet_struct	tasklet;
-#endif
-};
-
-static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
-			   void *funcdata);
-static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
-static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
-static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
-static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
-static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
-static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
-
-static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
-static void mvs_detect_porttype(struct mvs_info *mvi, int i);
-static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
-static void mvs_release_task(struct mvs_info *mvi, int phy_no);
-
-static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
-static void mvs_scan_start(struct Scsi_Host *);
-static int mvs_slave_configure(struct scsi_device *sdev);
-
-static struct scsi_transport_template *mvs_stt;
-
-static const struct mvs_chip_info mvs_chips[] = {
-	[chip_6320] =		{ 2, 16, 9  },
-	[chip_6440] =		{ 4, 16, 9  },
-	[chip_6480] =		{ 8, 32, 10 },
-};
-
-static struct scsi_host_template mvs_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.queuecommand		= sas_queuecommand,
-	.target_alloc		= sas_target_alloc,
-	.slave_configure	= mvs_slave_configure,
-	.slave_destroy		= sas_slave_destroy,
-	.scan_finished		= mvs_scan_finished,
-	.scan_start		= mvs_scan_start,
-	.change_queue_depth	= sas_change_queue_depth,
-	.change_queue_type	= sas_change_queue_type,
-	.bios_param		= sas_bios_param,
-	.can_queue		= 1,
-	.cmd_per_lun		= 1,
-	.this_id		= -1,
-	.sg_tablesize		= SG_ALL,
-	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
-	.use_clustering		= ENABLE_CLUSTERING,
-	.eh_device_reset_handler	= sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
-	.slave_alloc		= sas_slave_alloc,
-	.target_destroy		= sas_target_destroy,
-	.ioctl			= sas_ioctl,
-};
-
-static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
-{
-	u32 i;
-	u32 run;
-	u32 offset;
-
-	offset = 0;
-	while (size) {
-		printk("%08X : ", baseaddr + offset);
-		if (size >= 16)
-			run = 16;
-		else
-			run = size;
-		size -= run;
-		for (i = 0; i < 16; i++) {
-			if (i < run)
-				printk("%02X ", (u32)data[i]);
-			else
-				printk("   ");
-		}
-		printk(": ");
-		for (i = 0; i < run; i++)
-			printk("%c", isalnum(data[i]) ? data[i] : '.');
-		printk("\n");
-		data = &data[16];
-		offset += run;
-	}
-	printk("\n");
-}
-
-#if _MV_DUMP
-static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
-				   enum sas_protocol proto)
-{
-	u32 offset;
-	struct pci_dev *pdev = mvi->pdev;
-	struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
-	offset = slot->cmd_size + MVS_OAF_SZ +
-	    sizeof(struct mvs_prd) * slot->n_elem;
-	dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
-			tag);
-	mvs_hexdump(32, (u8 *) slot->response,
-		    (u32) slot->buf_dma + offset);
-}
-#endif
-
-static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
-				enum sas_protocol proto)
-{
-#if _MV_DUMP
-	u32 sz, w_ptr;
-	u64 addr;
-	void __iomem *regs = mvi->regs;
-	struct pci_dev *pdev = mvi->pdev;
-	struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
-	/*Delivery Queue */
-	sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
-	w_ptr = slot->tx;
-	addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Delivery Queue Base Address=0x%llX (PA)"
-		"(tx_dma=0x%llX), Entry=%04d\n",
-		addr, mvi->tx_dma, w_ptr);
-	mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
-			(u32) mvi->tx_dma + sizeof(u32) * w_ptr);
-	/*Command List */
-	addr = mvi->slot_dma;
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Command List Base Address=0x%llX (PA)"
-		"(slot_dma=0x%llX), Header=%03d\n",
-		addr, slot->buf_dma, tag);
-	dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
-	/*mvs_cmd_hdr */
-	mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
-		(u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
-	/*1.command table area */
-	dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
-	mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
-	/*2.open address frame area */
-	dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
-	mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
-				(u32) slot->buf_dma + slot->cmd_size);
-	/*3.status buffer */
-	mvs_hba_sb_dump(mvi, tag, proto);
-	/*4.PRD table */
-	dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
-	mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
-		(u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
-		(u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
-#endif
-}
-
-static void mvs_hba_cq_dump(struct mvs_info *mvi)
-{
-#if (_MV_DUMP > 2)
-	u64 addr;
-	void __iomem *regs = mvi->regs;
-	struct pci_dev *pdev = mvi->pdev;
-	u32 entry = mvi->rx_cons + 1;
-	u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
-
-	/*Completion Queue */
-	addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
-	dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
-		   mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Completion List Base Address=0x%llX (PA), "
-		"CQ_Entry=%04d, CQ_WP=0x%08X\n",
-		addr, entry - 1, mvi->rx[0]);
-	mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
-		    mvi->rx_dma + sizeof(u32) * entry);
-#endif
-}
-
-static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp;
-
-	tmp = mr32(GBL_CTL);
-
-	mw32(GBL_CTL, tmp | INT_EN);
-}
-
-static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp;
-
-	tmp = mr32(GBL_CTL);
-
-	mw32(GBL_CTL, tmp & ~INT_EN);
-}
-
-static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
-
-/* move to PCI layer or libata core? */
-static int pci_go_64(struct pci_dev *pdev)
-{
-	int rc;
-
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (rc) {
-			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-			if (rc) {
-				dev_printk(KERN_ERR, &pdev->dev,
-					   "64-bit DMA enable failed\n");
-				return rc;
-			}
-		}
-	} else {
-		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (rc) {
-			dev_printk(KERN_ERR, &pdev->dev,
-				   "32-bit DMA enable failed\n");
-			return rc;
-		}
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (rc) {
-			dev_printk(KERN_ERR, &pdev->dev,
-				   "32-bit consistent DMA enable failed\n");
-			return rc;
-		}
-	}
-
-	return rc;
-}
-
-static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
-{
-	if (task->lldd_task) {
-		struct mvs_slot_info *slot;
-		slot = (struct mvs_slot_info *) task->lldd_task;
-		*tag = slot - mvi->slot_info;
-		return 1;
-	}
-	return 0;
-}
-
-static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
-{
-	void *bitmap = (void *) &mvi->tags;
-	clear_bit(tag, bitmap);
-}
-
-static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
-{
-	mvs_tag_clear(mvi, tag);
-}
-
-static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
-{
-	void *bitmap = (void *) &mvi->tags;
-	set_bit(tag, bitmap);
-}
-
-static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
-{
-	unsigned int index, tag;
-	void *bitmap = (void *) &mvi->tags;
-
-	index = find_first_zero_bit(bitmap, MVS_SLOTS);
-	tag = index;
-	if (tag >= MVS_SLOTS)
-		return -SAS_QUEUE_FULL;
-	mvs_tag_set(mvi, tag);
-	*tag_out = tag;
-	return 0;
-}
-
-static void mvs_tag_init(struct mvs_info *mvi)
-{
-	int i;
-	for (i = 0; i < MVS_SLOTS; ++i)
-		mvs_tag_clear(mvi, i);
-}
-
-#ifndef MVS_DISABLE_NVRAM
-static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
-{
-	int timeout = 1000;
-
-	if (addr & ~SPI_ADDR_MASK)
-		return -EINVAL;
-
-	writel(addr, regs + SPI_CMD);
-	writel(TWSI_RD, regs + SPI_CTL);
-
-	while (timeout-- > 0) {
-		if (readl(regs + SPI_CTL) & TWSI_RDY) {
-			*data = readl(regs + SPI_DATA);
-			return 0;
-		}
-
-		udelay(10);
-	}
-
-	return -EBUSY;
-}
-
-static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
-			    void *buf, u32 buflen)
-{
-	u32 addr_end, tmp_addr, i, j;
-	u32 tmp = 0;
-	int rc;
-	u8 *tmp8, *buf8 = buf;
-
-	addr_end = addr + buflen;
-	tmp_addr = ALIGN(addr, 4);
-	if (addr > 0xff)
-		return -EINVAL;
-
-	j = addr & 0x3;
-	if (j) {
-		rc = mvs_eep_read(regs, tmp_addr, &tmp);
-		if (rc)
-			return rc;
-
-		tmp8 = (u8 *)&tmp;
-		for (i = j; i < 4; i++)
-			*buf8++ = tmp8[i];
-
-		tmp_addr += 4;
-	}
-
-	for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
-		rc = mvs_eep_read(regs, tmp_addr, &tmp);
-		if (rc)
-			return rc;
-
-		memcpy(buf8, &tmp, 4);
-		buf8 += 4;
-	}
-
-	if (tmp_addr < addr_end) {
-		rc = mvs_eep_read(regs, tmp_addr, &tmp);
-		if (rc)
-			return rc;
-
-		tmp8 = (u8 *)&tmp;
-		j = addr_end - tmp_addr;
-		for (i = 0; i < j; i++)
-			*buf8++ = tmp8[i];
-
-		tmp_addr += 4;
-	}
-
-	return 0;
-}
-#endif
-
-static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
-			  void *buf, u32 buflen)
-{
-#ifndef MVS_DISABLE_NVRAM
-	void __iomem *regs = mvi->regs;
-	int rc, i;
-	u32 sum;
-	u8 hdr[2], *tmp;
-	const char *msg;
-
-	rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
-	if (rc) {
-		msg = "nvram hdr read failed";
-		goto err_out;
-	}
-	rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
-	if (rc) {
-		msg = "nvram read failed";
-		goto err_out;
-	}
-
-	if (hdr[0] != 0x5A) {
-		/* entry id */
-		msg = "invalid nvram entry id";
-		rc = -ENOENT;
-		goto err_out;
-	}
-
-	tmp = buf;
-	sum = ((u32)hdr[0]) + ((u32)hdr[1]);
-	for (i = 0; i < buflen; i++)
-		sum += ((u32)tmp[i]);
-
-	if (sum) {
-		msg = "nvram checksum failure";
-		rc = -EILSEQ;
-		goto err_out;
-	}
-
-	return 0;
-
-err_out:
-	dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
-	return rc;
-#else
-	/* FIXME , For SAS target mode */
-	memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
-	return 0;
-#endif
-}
-
-static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
-{
-	struct mvs_phy *phy = &mvi->phy[i];
-	struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
-
-	if (!phy->phy_attached)
-		return;
-
-	if (sas_phy->phy) {
-		struct sas_phy *sphy = sas_phy->phy;
-
-		sphy->negotiated_linkrate = sas_phy->linkrate;
-		sphy->minimum_linkrate = phy->minimum_linkrate;
-		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
-		sphy->maximum_linkrate = phy->maximum_linkrate;
-		sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
-	}
-
-	if (phy->phy_type & PORT_TYPE_SAS) {
-		struct sas_identify_frame *id;
-
-		id = (struct sas_identify_frame *)phy->frame_rcvd;
-		id->dev_type = phy->identify.device_type;
-		id->initiator_bits = SAS_PROTOCOL_ALL;
-		id->target_bits = phy->identify.target_port_protocols;
-	} else if (phy->phy_type & PORT_TYPE_SATA) {
-		/* TODO */
-	}
-	mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
-	mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
-				   PORTE_BYTES_DMAED);
-}
-
-static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
-{
-	/* give the phy enabling interrupt event time to come in (1s
-	 * is empirically about all it takes) */
-	if (time < HZ)
-		return 0;
-	/* Wait for discovery to finish */
-	scsi_flush_work(shost);
-	return 1;
-}
-
-static void mvs_scan_start(struct Scsi_Host *shost)
-{
-	int i;
-	struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
-
-	for (i = 0; i < mvi->chip->n_phy; ++i) {
-		mvs_bytes_dmaed(mvi, i);
-	}
-}
-
-static int mvs_slave_configure(struct scsi_device *sdev)
-{
-	struct domain_device *dev = sdev_to_domain_dev(sdev);
-	int ret = sas_slave_configure(sdev);
-
-	if (ret)
-		return ret;
-
-	if (dev_is_sata(dev)) {
-		/* struct ata_port *ap = dev->sata_dev.ap; */
-		/* struct ata_device *adev = ap->link.device; */
-
-		/* clamp at no NCQ for the time being */
-		/* adev->flags |= ATA_DFLAG_NCQ_OFF; */
-		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
-	}
-	return 0;
-}
-
-static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
-{
-	struct pci_dev *pdev = mvi->pdev;
-	struct sas_ha_struct *sas_ha = &mvi->sas;
-	struct mvs_phy *phy = &mvi->phy[phy_no];
-	struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
-	phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
-	/*
-	* events is port event now ,
-	* we need check the interrupt status which belongs to per port.
-	*/
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Port %d Event = %X\n",
-		phy_no, phy->irq_status);
-
-	if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
-		mvs_release_task(mvi, phy_no);
-		if (!mvs_is_phy_ready(mvi, phy_no)) {
-			sas_phy_disconnected(sas_phy);
-			sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
-			dev_printk(KERN_INFO, &pdev->dev,
-				"Port %d Unplug Notice\n", phy_no);
-
-		} else
-			mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
-	}
-	if (!(phy->irq_status & PHYEV_DEC_ERR)) {
-		if (phy->irq_status & PHYEV_COMWAKE) {
-			u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
-			mvs_write_port_irq_mask(mvi, phy_no,
-						tmp | PHYEV_SIG_FIS);
-		}
-		if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
-			phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
-			if (phy->phy_status) {
-				mvs_detect_porttype(mvi, phy_no);
-
-				if (phy->phy_type & PORT_TYPE_SATA) {
-					u32 tmp = mvs_read_port_irq_mask(mvi,
-								phy_no);
-					tmp &= ~PHYEV_SIG_FIS;
-					mvs_write_port_irq_mask(mvi,
-								phy_no, tmp);
-				}
-
-				mvs_update_phyinfo(mvi, phy_no, 0);
-				sas_ha->notify_phy_event(sas_phy,
-							PHYE_OOB_DONE);
-				mvs_bytes_dmaed(mvi, phy_no);
-			} else {
-				dev_printk(KERN_DEBUG, &pdev->dev,
-					"plugin interrupt but phy is gone\n");
-				mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
-							NULL);
-			}
-		} else if (phy->irq_status & PHYEV_BROAD_CH) {
-			mvs_release_task(mvi, phy_no);
-			sas_ha->notify_port_event(sas_phy,
-						PORTE_BROADCAST_RCVD);
-		}
-	}
-	mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
-}
-
-static void mvs_int_sata(struct mvs_info *mvi)
-{
-	u32 tmp;
-	void __iomem *regs = mvi->regs;
-	tmp = mr32(INT_STAT_SRS);
-	mw32(INT_STAT_SRS, tmp & 0xFFFF);
-}
-
-static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
-				u32 slot_idx)
-{
-	void __iomem *regs = mvi->regs;
-	struct domain_device *dev = task->dev;
-	struct asd_sas_port *sas_port = dev->port;
-	struct mvs_port *port = mvi->slot_info[slot_idx].port;
-	u32 reg_set, phy_mask;
-
-	if (!sas_protocol_ata(task->task_proto)) {
-		reg_set = 0;
-		phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
-				sas_port->phy_mask;
-	} else {
-		reg_set = port->taskfileset;
-		phy_mask = sas_port->phy_mask;
-	}
-	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
-					(TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
-					(phy_mask << TXQ_PHY_SHIFT) |
-					(reg_set << TXQ_SRS_SHIFT));
-
-	mw32(TX_PROD_IDX, mvi->tx_prod);
-	mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
-}
-
-static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
-			u32 slot_idx, int err)
-{
-	struct mvs_port *port = mvi->slot_info[slot_idx].port;
-	struct task_status_struct *tstat = &task->task_status;
-	struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
-	int stat = SAM_GOOD;
-
-	resp->frame_len = sizeof(struct dev_to_host_fis);
-	memcpy(&resp->ending_fis[0],
-	       SATA_RECEIVED_D2H_FIS(port->taskfileset),
-	       sizeof(struct dev_to_host_fis));
-	tstat->buf_valid_size = sizeof(*resp);
-	if (unlikely(err))
-		stat = SAS_PROTO_RESPONSE;
-	return stat;
-}
-
-static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
-{
-	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
-	mvs_tag_clear(mvi, slot_idx);
-}
-
-static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
-			  struct mvs_slot_info *slot, u32 slot_idx)
-{
-	if (!sas_protocol_ata(task->task_proto))
-		if (slot->n_elem)
-			pci_unmap_sg(mvi->pdev, task->scatter,
-				     slot->n_elem, task->data_dir);
-
-	switch (task->task_proto) {
-	case SAS_PROTOCOL_SMP:
-		pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
-			     PCI_DMA_FROMDEVICE);
-		pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
-			     PCI_DMA_TODEVICE);
-		break;
-
-	case SAS_PROTOCOL_SATA:
-	case SAS_PROTOCOL_STP:
-	case SAS_PROTOCOL_SSP:
-	default:
-		/* do nothing */
-		break;
-	}
-	list_del(&slot->list);
-	task->lldd_task = NULL;
-	slot->task = NULL;
-	slot->port = NULL;
-}
-
-static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
-			 u32 slot_idx)
-{
-	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
-	u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
-	u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
-	int stat = SAM_CHECK_COND;
-
-	if (err_dw1 & SLOT_BSY_ERR) {
-		stat = SAS_QUEUE_FULL;
-		mvs_slot_reset(mvi, task, slot_idx);
-	}
-	switch (task->task_proto) {
-	case SAS_PROTOCOL_SSP:
-		break;
-	case SAS_PROTOCOL_SMP:
-		break;
-	case SAS_PROTOCOL_SATA:
-	case SAS_PROTOCOL_STP:
-	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
-		if (err_dw0 & TFILE_ERR)
-			stat = mvs_sata_done(mvi, task, slot_idx, 1);
-		break;
-	default:
-		break;
-	}
-
-	mvs_hexdump(16, (u8 *) slot->response, 0);
-	return stat;
-}
-
-static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
-{
-	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
-	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
-	struct sas_task *task = slot->task;
-	struct task_status_struct *tstat;
-	struct mvs_port *port;
-	bool aborted;
-	void *to;
-
-	if (unlikely(!task || !task->lldd_task))
-		return -1;
-
-	mvs_hba_cq_dump(mvi);
-
-	spin_lock(&task->task_state_lock);
-	aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
-	if (!aborted) {
-		task->task_state_flags &=
-		    ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
-		task->task_state_flags |= SAS_TASK_STATE_DONE;
-	}
-	spin_unlock(&task->task_state_lock);
-
-	if (aborted) {
-		mvs_slot_task_free(mvi, task, slot, slot_idx);
-		mvs_slot_free(mvi, rx_desc);
-		return -1;
-	}
-
-	port = slot->port;
-	tstat = &task->task_status;
-	memset(tstat, 0, sizeof(*tstat));
-	tstat->resp = SAS_TASK_COMPLETE;
-
-	if (unlikely(!port->port_attached || flags)) {
-		mvs_slot_err(mvi, task, slot_idx);
-		if (!sas_protocol_ata(task->task_proto))
-			tstat->stat = SAS_PHY_DOWN;
-		goto out;
-	}
-
-	/* error info record present */
-	if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
-		tstat->stat = mvs_slot_err(mvi, task, slot_idx);
-		goto out;
-	}
-
-	switch (task->task_proto) {
-	case SAS_PROTOCOL_SSP:
-		/* hw says status == 0, datapres == 0 */
-		if (rx_desc & RXQ_GOOD) {
-			tstat->stat = SAM_GOOD;
-			tstat->resp = SAS_TASK_COMPLETE;
-		}
-		/* response frame present */
-		else if (rx_desc & RXQ_RSP) {
-			struct ssp_response_iu *iu =
-			    slot->response + sizeof(struct mvs_err_info);
-			sas_ssp_task_response(&mvi->pdev->dev, task, iu);
-		}
-
-		/* should never happen? */
-		else
-			tstat->stat = SAM_CHECK_COND;
-		break;
-
-	case SAS_PROTOCOL_SMP: {
-			struct scatterlist *sg_resp = &task->smp_task.smp_resp;
-			tstat->stat = SAM_GOOD;
-			to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
-			memcpy(to + sg_resp->offset,
-				slot->response + sizeof(struct mvs_err_info),
-				sg_dma_len(sg_resp));
-			kunmap_atomic(to, KM_IRQ0);
-			break;
-		}
-
-	case SAS_PROTOCOL_SATA:
-	case SAS_PROTOCOL_STP:
-	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
-			tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
-			break;
-		}
-
-	default:
-		tstat->stat = SAM_CHECK_COND;
-		break;
-	}
-
-out:
-	mvs_slot_task_free(mvi, task, slot, slot_idx);
-	if (unlikely(tstat->stat != SAS_QUEUE_FULL))
-		mvs_slot_free(mvi, rx_desc);
-
-	spin_unlock(&mvi->lock);
-	task->task_done(task);
-	spin_lock(&mvi->lock);
-	return tstat->stat;
-}
-
-static void mvs_release_task(struct mvs_info *mvi, int phy_no)
-{
-	struct list_head *pos, *n;
-	struct mvs_slot_info *slot;
-	struct mvs_phy *phy = &mvi->phy[phy_no];
-	struct mvs_port *port = phy->port;
-	u32 rx_desc;
-
-	if (!port)
-		return;
-
-	list_for_each_safe(pos, n, &port->list) {
-		slot = container_of(pos, struct mvs_slot_info, list);
-		rx_desc = (u32) (slot - mvi->slot_info);
-		mvs_slot_complete(mvi, rx_desc, 1);
-	}
-}
-
-static void mvs_int_full(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp, stat;
-	int i;
-
-	stat = mr32(INT_STAT);
-
-	mvs_int_rx(mvi, false);
-
-	for (i = 0; i < MVS_MAX_PORTS; i++) {
-		tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
-		if (tmp)
-			mvs_int_port(mvi, i, tmp);
-	}
-
-	if (stat & CINT_SRS)
-		mvs_int_sata(mvi);
-
-	mw32(INT_STAT, stat);
-}
-
-static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
-{
-	void __iomem *regs = mvi->regs;
-	u32 rx_prod_idx, rx_desc;
-	bool attn = false;
-	struct pci_dev *pdev = mvi->pdev;
-
-	/* the first dword in the RX ring is special: it contains
-	 * a mirror of the hardware's RX producer index, so that
-	 * we don't have to stall the CPU reading that register.
-	 * The actual RX ring is offset by one dword, due to this.
-	 */
-	rx_prod_idx = mvi->rx_cons;
-	mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
-	if (mvi->rx_cons == 0xfff)	/* h/w hasn't touched RX ring yet */
-		return 0;
-
-	/* The CMPL_Q may come late, read from register and try again
-	* note: if coalescing is enabled,
-	* it will need to read from register every time for sure
-	*/
-	if (mvi->rx_cons == rx_prod_idx)
-		mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
-
-	if (mvi->rx_cons == rx_prod_idx)
-		return 0;
-
-	while (mvi->rx_cons != rx_prod_idx) {
-
-		/* increment our internal RX consumer pointer */
-		rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
-
-		rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
-
-		if (likely(rx_desc & RXQ_DONE))
-			mvs_slot_complete(mvi, rx_desc, 0);
-		if (rx_desc & RXQ_ATTN) {
-			attn = true;
-			dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
-				rx_desc);
-		} else if (rx_desc & RXQ_ERR) {
-			if (!(rx_desc & RXQ_DONE))
-				mvs_slot_complete(mvi, rx_desc, 0);
-			dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
-				rx_desc);
-		} else if (rx_desc & RXQ_SLOT_RESET) {
-			dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
-				rx_desc);
-			mvs_slot_free(mvi, rx_desc);
-		}
-	}
-
-	if (attn && self_clear)
-		mvs_int_full(mvi);
-
-	return 0;
-}
-
-#ifdef MVS_USE_TASKLET
-static void mvs_tasklet(unsigned long data)
-{
-	struct mvs_info *mvi = (struct mvs_info *) data;
-	unsigned long flags;
-
-	spin_lock_irqsave(&mvi->lock, flags);
-
-#ifdef MVS_DISABLE_MSI
-	mvs_int_full(mvi);
-#else
-	mvs_int_rx(mvi, true);
-#endif
-	spin_unlock_irqrestore(&mvi->lock, flags);
-}
-#endif
-
-static irqreturn_t mvs_interrupt(int irq, void *opaque)
-{
-	struct mvs_info *mvi = opaque;
-	void __iomem *regs = mvi->regs;
-	u32 stat;
-
-	stat = mr32(GBL_INT_STAT);
-
-	if (stat == 0 || stat == 0xffffffff)
-		return IRQ_NONE;
-
-	/* clear CMD_CMPLT ASAP */
-	mw32_f(INT_STAT, CINT_DONE);
-
-#ifndef MVS_USE_TASKLET
-	spin_lock(&mvi->lock);
-
-	mvs_int_full(mvi);
-
-	spin_unlock(&mvi->lock);
-#else
-	tasklet_schedule(&mvi->tasklet);
-#endif
-	return IRQ_HANDLED;
-}
-
-#ifndef MVS_DISABLE_MSI
-static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
-{
-	struct mvs_info *mvi = opaque;
-
-#ifndef MVS_USE_TASKLET
-	spin_lock(&mvi->lock);
-
-	mvs_int_rx(mvi, true);
-
-	spin_unlock(&mvi->lock);
-#else
-	tasklet_schedule(&mvi->tasklet);
-#endif
-	return IRQ_HANDLED;
-}
-#endif
-
-struct mvs_task_exec_info {
-	struct sas_task *task;
-	struct mvs_cmd_hdr *hdr;
-	struct mvs_port *port;
-	u32 tag;
-	int n_elem;
-};
-
-static int mvs_task_prep_smp(struct mvs_info *mvi,
-			     struct mvs_task_exec_info *tei)
-{
-	int elem, rc, i;
-	struct sas_task *task = tei->task;
-	struct mvs_cmd_hdr *hdr = tei->hdr;
-	struct scatterlist *sg_req, *sg_resp;
-	u32 req_len, resp_len, tag = tei->tag;
-	void *buf_tmp;
-	u8 *buf_oaf;
-	dma_addr_t buf_tmp_dma;
-	struct mvs_prd *buf_prd;
-	struct scatterlist *sg;
-	struct mvs_slot_info *slot = &mvi->slot_info[tag];
-	struct asd_sas_port *sas_port = task->dev->port;
-	u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#if _MV_DUMP
-	u8 *buf_cmd;
-	void *from;
-#endif
-	/*
-	 * DMA-map SMP request, response buffers
-	 */
-	sg_req = &task->smp_task.smp_req;
-	elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
-	if (!elem)
-		return -ENOMEM;
-	req_len = sg_dma_len(sg_req);
-
-	sg_resp = &task->smp_task.smp_resp;
-	elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
-	if (!elem) {
-		rc = -ENOMEM;
-		goto err_out;
-	}
-	resp_len = sg_dma_len(sg_resp);
-
-	/* must be in dwords */
-	if ((req_len & 0x3) || (resp_len & 0x3)) {
-		rc = -EINVAL;
-		goto err_out_2;
-	}
-
-	/*
-	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
-	 */
-
-	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
-	buf_tmp = slot->buf;
-	buf_tmp_dma = slot->buf_dma;
-
-#if _MV_DUMP
-	buf_cmd = buf_tmp;
-	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-	buf_tmp += req_len;
-	buf_tmp_dma += req_len;
-	slot->cmd_size = req_len;
-#else
-	hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
-#endif
-
-	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
-	buf_oaf = buf_tmp;
-	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_OAF_SZ;
-	buf_tmp_dma += MVS_OAF_SZ;
-
-	/* region 3: PRD table ********************************************* */
-	buf_prd = buf_tmp;
-	if (tei->n_elem)
-		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
-	else
-		hdr->prd_tbl = 0;
-
-	i = sizeof(struct mvs_prd) * tei->n_elem;
-	buf_tmp += i;
-	buf_tmp_dma += i;
-
-	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
-	slot->response = buf_tmp;
-	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
-	/*
-	 * Fill in TX ring and command slot header
-	 */
-	slot->tx = mvi->tx_prod;
-	mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
-					TXQ_MODE_I | tag |
-					(sas_port->phy_mask << TXQ_PHY_SHIFT));
-
-	hdr->flags |= flags;
-	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
-	hdr->tags = cpu_to_le32(tag);
-	hdr->data_len = 0;
-
-	/* generate open address frame hdr (first 12 bytes) */
-	buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
-	buf_oaf[1] = task->dev->linkrate & 0xf;
-	*(u16 *)(buf_oaf + 2) = 0xFFFF;		/* SAS SPEC */
-	memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
-	/* fill in PRD (scatter/gather) table, if any */
-	for_each_sg(task->scatter, sg, tei->n_elem, i) {
-		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
-		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
-		buf_prd++;
-	}
-
-#if _MV_DUMP
-	/* copy cmd table */
-	from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
-	memcpy(buf_cmd, from + sg_req->offset, req_len);
-	kunmap_atomic(from, KM_IRQ0);
-#endif
-	return 0;
-
-err_out_2:
-	pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
-		     PCI_DMA_FROMDEVICE);
-err_out:
-	pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
-		     PCI_DMA_TODEVICE);
-	return rc;
-}
-
-static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp, offs;
-	u8 *tfs = &port->taskfileset;
-
-	if (*tfs == MVS_ID_NOT_MAPPED)
-		return;
-
-	offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
-	if (*tfs < 16) {
-		tmp = mr32(PCS);
-		mw32(PCS, tmp & ~offs);
-	} else {
-		tmp = mr32(CTL);
-		mw32(CTL, tmp & ~offs);
-	}
-
-	tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
-	if (tmp)
-		mw32(INT_STAT_SRS, tmp);
-
-	*tfs = MVS_ID_NOT_MAPPED;
-}
-
-static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
-{
-	int i;
-	u32 tmp, offs;
-	void __iomem *regs = mvi->regs;
-
-	if (port->taskfileset != MVS_ID_NOT_MAPPED)
-		return 0;
-
-	tmp = mr32(PCS);
-
-	for (i = 0; i < mvi->chip->srs_sz; i++) {
-		if (i == 16)
-			tmp = mr32(CTL);
-		offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
-		if (!(tmp & offs)) {
-			port->taskfileset = i;
-
-			if (i < 16)
-				mw32(PCS, tmp | offs);
-			else
-				mw32(CTL, tmp | offs);
-			tmp = mr32(INT_STAT_SRS) & (1U << i);
-			if (tmp)
-				mw32(INT_STAT_SRS, tmp);
-			return 0;
-		}
-	}
-	return MVS_ID_NOT_MAPPED;
-}
-
-static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
-{
-	struct ata_queued_cmd *qc = task->uldd_task;
-
-	if (qc) {
-		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
-			qc->tf.command == ATA_CMD_FPDMA_READ) {
-			*tag = qc->tag;
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-static int mvs_task_prep_ata(struct mvs_info *mvi,
-			     struct mvs_task_exec_info *tei)
-{
-	struct sas_task *task = tei->task;
-	struct domain_device *dev = task->dev;
-	struct mvs_cmd_hdr *hdr = tei->hdr;
-	struct asd_sas_port *sas_port = dev->port;
-	struct mvs_slot_info *slot;
-	struct scatterlist *sg;
-	struct mvs_prd *buf_prd;
-	struct mvs_port *port = tei->port;
-	u32 tag = tei->tag;
-	u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-	void *buf_tmp;
-	u8 *buf_cmd, *buf_oaf;
-	dma_addr_t buf_tmp_dma;
-	u32 i, req_len, resp_len;
-	const u32 max_resp_len = SB_RFB_MAX;
-
-	if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
-		return -EBUSY;
-
-	slot = &mvi->slot_info[tag];
-	slot->tx = mvi->tx_prod;
-	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
-					(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
-					(sas_port->phy_mask << TXQ_PHY_SHIFT) |
-					(port->taskfileset << TXQ_SRS_SHIFT));
-
-	if (task->ata_task.use_ncq)
-		flags |= MCH_FPDMA;
-	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
-		if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
-			flags |= MCH_ATAPI;
-	}
-
-	/* FIXME: fill in port multiplier number */
-
-	hdr->flags = cpu_to_le32(flags);
-
-	/* FIXME: the low order order 5 bits for the TAG if enable NCQ */
-	if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
-		task->ata_task.fis.sector_count |= hdr->tags << 3;
-	else
-		hdr->tags = cpu_to_le32(tag);
-	hdr->data_len = cpu_to_le32(task->total_xfer_len);
-
-	/*
-	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
-	 */
-
-	/* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
-	buf_cmd = buf_tmp = slot->buf;
-	buf_tmp_dma = slot->buf_dma;
-
-	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_ATA_CMD_SZ;
-	buf_tmp_dma += MVS_ATA_CMD_SZ;
-#if _MV_DUMP
-	slot->cmd_size = MVS_ATA_CMD_SZ;
-#endif
-
-	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
-	/* used for STP.  unused for SATA? */
-	buf_oaf = buf_tmp;
-	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_OAF_SZ;
-	buf_tmp_dma += MVS_OAF_SZ;
-
-	/* region 3: PRD table ********************************************* */
-	buf_prd = buf_tmp;
-	if (tei->n_elem)
-		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
-	else
-		hdr->prd_tbl = 0;
-
-	i = sizeof(struct mvs_prd) * tei->n_elem;
-	buf_tmp += i;
-	buf_tmp_dma += i;
-
-	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
-	/* FIXME: probably unused, for SATA.  kept here just in case
-	 * we get a STP/SATA error information record
-	 */
-	slot->response = buf_tmp;
-	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
-	req_len = sizeof(struct host_to_dev_fis);
-	resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
-	    sizeof(struct mvs_err_info) - i;
-
-	/* request, response lengths */
-	resp_len = min(resp_len, max_resp_len);
-	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
-
-	task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
-	/* fill in command FIS and ATAPI CDB */
-	memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
-	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
-		memcpy(buf_cmd + STP_ATAPI_CMD,
-			task->ata_task.atapi_packet, 16);
-
-	/* generate open address frame hdr (first 12 bytes) */
-	buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1;	/* initiator, STP, ftype 1h */
-	buf_oaf[1] = task->dev->linkrate & 0xf;
-	*(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
-	memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
-	/* fill in PRD (scatter/gather) table, if any */
-	for_each_sg(task->scatter, sg, tei->n_elem, i) {
-		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
-		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
-		buf_prd++;
-	}
-
-	return 0;
-}
-
-static int mvs_task_prep_ssp(struct mvs_info *mvi,
-			     struct mvs_task_exec_info *tei)
-{
-	struct sas_task *task = tei->task;
-	struct mvs_cmd_hdr *hdr = tei->hdr;
-	struct mvs_port *port = tei->port;
-	struct mvs_slot_info *slot;
-	struct scatterlist *sg;
-	struct mvs_prd *buf_prd;
-	struct ssp_frame_hdr *ssp_hdr;
-	void *buf_tmp;
-	u8 *buf_cmd, *buf_oaf, fburst = 0;
-	dma_addr_t buf_tmp_dma;
-	u32 flags;
-	u32 resp_len, req_len, i, tag = tei->tag;
-	const u32 max_resp_len = SB_RFB_MAX;
-	u8 phy_mask;
-
-	slot = &mvi->slot_info[tag];
-
-	phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
-		task->dev->port->phy_mask;
-	slot->tx = mvi->tx_prod;
-	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
-				(TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
-				(phy_mask << TXQ_PHY_SHIFT));
-
-	flags = MCH_RETRY;
-	if (task->ssp_task.enable_first_burst) {
-		flags |= MCH_FBURST;
-		fburst = (1 << 7);
-	}
-	hdr->flags = cpu_to_le32(flags |
-				 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
-				 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
-
-	hdr->tags = cpu_to_le32(tag);
-	hdr->data_len = cpu_to_le32(task->total_xfer_len);
-
-	/*
-	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
-	 */
-
-	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
-	buf_cmd = buf_tmp = slot->buf;
-	buf_tmp_dma = slot->buf_dma;
-
-	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_SSP_CMD_SZ;
-	buf_tmp_dma += MVS_SSP_CMD_SZ;
-#if _MV_DUMP
-	slot->cmd_size = MVS_SSP_CMD_SZ;
-#endif
-
-	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
-	buf_oaf = buf_tmp;
-	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_OAF_SZ;
-	buf_tmp_dma += MVS_OAF_SZ;
-
-	/* region 3: PRD table ********************************************* */
-	buf_prd = buf_tmp;
-	if (tei->n_elem)
-		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
-	else
-		hdr->prd_tbl = 0;
-
-	i = sizeof(struct mvs_prd) * tei->n_elem;
-	buf_tmp += i;
-	buf_tmp_dma += i;
-
-	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
-	slot->response = buf_tmp;
-	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
-	resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
-	    sizeof(struct mvs_err_info) - i;
-	resp_len = min(resp_len, max_resp_len);
-
-	req_len = sizeof(struct ssp_frame_hdr) + 28;
-
-	/* request, response lengths */
-	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
-
-	/* generate open address frame hdr (first 12 bytes) */
-	buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1;	/* initiator, SSP, ftype 1h */
-	buf_oaf[1] = task->dev->linkrate & 0xf;
-	*(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
-	memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
-	/* fill in SSP frame header (Command Table.SSP frame header) */
-	ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
-	ssp_hdr->frame_type = SSP_COMMAND;
-	memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
-	       HASHED_SAS_ADDR_SIZE);
-	memcpy(ssp_hdr->hashed_src_addr,
-	       task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
-	ssp_hdr->tag = cpu_to_be16(tag);
-
-	/* fill in command frame IU */
-	buf_cmd += sizeof(*ssp_hdr);
-	memcpy(buf_cmd, &task->ssp_task.LUN, 8);
-	buf_cmd[9] = fburst | task->ssp_task.task_attr |
-			(task->ssp_task.task_prio << 3);
-	memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
-
-	/* fill in PRD (scatter/gather) table, if any */
-	for_each_sg(task->scatter, sg, tei->n_elem, i) {
-		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
-		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
-		buf_prd++;
-	}
-
-	return 0;
-}
-
-static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
-{
-	struct domain_device *dev = task->dev;
-	struct mvs_info *mvi = dev->port->ha->lldd_ha;
-	struct pci_dev *pdev = mvi->pdev;
-	void __iomem *regs = mvi->regs;
-	struct mvs_task_exec_info tei;
-	struct sas_task *t = task;
-	struct mvs_slot_info *slot;
-	u32 tag = 0xdeadbeef, rc, n_elem = 0;
-	unsigned long flags;
-	u32 n = num, pass = 0;
-
-	spin_lock_irqsave(&mvi->lock, flags);
-	do {
-		dev = t->dev;
-		tei.port = &mvi->port[dev->port->id];
-
-		if (!tei.port->port_attached) {
-			if (sas_protocol_ata(t->task_proto)) {
-				rc = SAS_PHY_DOWN;
-				goto out_done;
-			} else {
-				struct task_status_struct *ts = &t->task_status;
-				ts->resp = SAS_TASK_UNDELIVERED;
-				ts->stat = SAS_PHY_DOWN;
-				t->task_done(t);
-				if (n > 1)
-					t = list_entry(t->list.next,
-							struct sas_task, list);
-				continue;
-			}
-		}
-
-		if (!sas_protocol_ata(t->task_proto)) {
-			if (t->num_scatter) {
-				n_elem = pci_map_sg(mvi->pdev, t->scatter,
-						    t->num_scatter,
-						    t->data_dir);
-				if (!n_elem) {
-					rc = -ENOMEM;
-					goto err_out;
-				}
-			}
-		} else {
-			n_elem = t->num_scatter;
-		}
-
-		rc = mvs_tag_alloc(mvi, &tag);
-		if (rc)
-			goto err_out;
-
-		slot = &mvi->slot_info[tag];
-		t->lldd_task = NULL;
-		slot->n_elem = n_elem;
-		memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
-		tei.task = t;
-		tei.hdr = &mvi->slot[tag];
-		tei.tag = tag;
-		tei.n_elem = n_elem;
-
-		switch (t->task_proto) {
-		case SAS_PROTOCOL_SMP:
-			rc = mvs_task_prep_smp(mvi, &tei);
-			break;
-		case SAS_PROTOCOL_SSP:
-			rc = mvs_task_prep_ssp(mvi, &tei);
-			break;
-		case SAS_PROTOCOL_SATA:
-		case SAS_PROTOCOL_STP:
-		case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
-			rc = mvs_task_prep_ata(mvi, &tei);
-			break;
-		default:
-			dev_printk(KERN_ERR, &pdev->dev,
-				"unknown sas_task proto: 0x%x\n",
-				t->task_proto);
-			rc = -EINVAL;
-			break;
-		}
-
-		if (rc)
-			goto err_out_tag;
-
-		slot->task = t;
-		slot->port = tei.port;
-		t->lldd_task = (void *) slot;
-		list_add_tail(&slot->list, &slot->port->list);
-		/* TODO: select normal or high priority */
-
-		spin_lock(&t->task_state_lock);
-		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
-		spin_unlock(&t->task_state_lock);
-
-		mvs_hba_memory_dump(mvi, tag, t->task_proto);
-
-		++pass;
-		mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
-		if (n > 1)
-			t = list_entry(t->list.next, struct sas_task, list);
-	} while (--n);
-
-	rc = 0;
-	goto out_done;
-
-err_out_tag:
-	mvs_tag_free(mvi, tag);
-err_out:
-	dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
-	if (!sas_protocol_ata(t->task_proto))
-		if (n_elem)
-			pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
-				     t->data_dir);
-out_done:
-	if (pass)
-		mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
-	spin_unlock_irqrestore(&mvi->lock, flags);
-	return rc;
-}
-
-static int mvs_task_abort(struct sas_task *task)
-{
-	int rc;
-	unsigned long flags;
-	struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
-	struct pci_dev *pdev = mvi->pdev;
-	int tag;
-
-	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
-		rc = TMF_RESP_FUNC_COMPLETE;
-		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		goto out_done;
-	}
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	switch (task->task_proto) {
-	case SAS_PROTOCOL_SMP:
-		dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
-		break;
-	case SAS_PROTOCOL_SSP:
-		dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
-		break;
-	case SAS_PROTOCOL_SATA:
-	case SAS_PROTOCOL_STP:
-	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
-		dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
-#if _MV_DUMP
-		dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
-		mvs_hexdump(sizeof(struct host_to_dev_fis),
-				(void *)&task->ata_task.fis, 0);
-		dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
-		mvs_hexdump(16, task->ata_task.atapi_packet, 0);
-#endif
-		spin_lock_irqsave(&task->task_state_lock, flags);
-		if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
-			/* TODO */
-			;
-		}
-		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		break;
-	}
-	default:
-		break;
-	}
-
-	if (mvs_find_tag(mvi, task, &tag)) {
-		spin_lock_irqsave(&mvi->lock, flags);
-		mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
-		spin_unlock_irqrestore(&mvi->lock, flags);
-	}
-	if (!mvs_task_exec(task, 1, GFP_ATOMIC))
-		rc = TMF_RESP_FUNC_COMPLETE;
-	else
-		rc = TMF_RESP_FUNC_FAILED;
-out_done:
-	return rc;
-}
-
-static void mvs_free(struct mvs_info *mvi)
-{
-	int i;
-
-	if (!mvi)
-		return;
-
-	for (i = 0; i < MVS_SLOTS; i++) {
-		struct mvs_slot_info *slot = &mvi->slot_info[i];
-
-		if (slot->buf)
-			dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
-					  slot->buf, slot->buf_dma);
-	}
-
-	if (mvi->tx)
-		dma_free_coherent(&mvi->pdev->dev,
-				  sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
-				  mvi->tx, mvi->tx_dma);
-	if (mvi->rx_fis)
-		dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
-				  mvi->rx_fis, mvi->rx_fis_dma);
-	if (mvi->rx)
-		dma_free_coherent(&mvi->pdev->dev,
-				  sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
-				  mvi->rx, mvi->rx_dma);
-	if (mvi->slot)
-		dma_free_coherent(&mvi->pdev->dev,
-				  sizeof(*mvi->slot) * MVS_SLOTS,
-				  mvi->slot, mvi->slot_dma);
-#ifdef MVS_ENABLE_PERI
-	if (mvi->peri_regs)
-		iounmap(mvi->peri_regs);
-#endif
-	if (mvi->regs)
-		iounmap(mvi->regs);
-	if (mvi->shost)
-		scsi_host_put(mvi->shost);
-	kfree(mvi->sas.sas_port);
-	kfree(mvi->sas.sas_phy);
-	kfree(mvi);
-}
-
-/* FIXME: locking? */
-static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
-			   void *funcdata)
-{
-	struct mvs_info *mvi = sas_phy->ha->lldd_ha;
-	int rc = 0, phy_id = sas_phy->id;
-	u32 tmp;
-
-	tmp = mvs_read_phy_ctl(mvi, phy_id);
-
-	switch (func) {
-	case PHY_FUNC_SET_LINK_RATE:{
-			struct sas_phy_linkrates *rates = funcdata;
-			u32 lrmin = 0, lrmax = 0;
-
-			lrmin = (rates->minimum_linkrate << 8);
-			lrmax = (rates->maximum_linkrate << 12);
-
-			if (lrmin) {
-				tmp &= ~(0xf << 8);
-				tmp |= lrmin;
-			}
-			if (lrmax) {
-				tmp &= ~(0xf << 12);
-				tmp |= lrmax;
-			}
-			mvs_write_phy_ctl(mvi, phy_id, tmp);
-			break;
-		}
-
-	case PHY_FUNC_HARD_RESET:
-		if (tmp & PHY_RST_HARD)
-			break;
-		mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
-		break;
-
-	case PHY_FUNC_LINK_RESET:
-		mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
-		break;
-
-	case PHY_FUNC_DISABLE:
-	case PHY_FUNC_RELEASE_SPINUP_HOLD:
-	default:
-		rc = -EOPNOTSUPP;
-	}
-
-	return rc;
-}
-
-static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
-{
-	struct mvs_phy *phy = &mvi->phy[phy_id];
-	struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
-	sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
-	sas_phy->class = SAS;
-	sas_phy->iproto = SAS_PROTOCOL_ALL;
-	sas_phy->tproto = 0;
-	sas_phy->type = PHY_TYPE_PHYSICAL;
-	sas_phy->role = PHY_ROLE_INITIATOR;
-	sas_phy->oob_mode = OOB_NOT_CONNECTED;
-	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
-
-	sas_phy->id = phy_id;
-	sas_phy->sas_addr = &mvi->sas_addr[0];
-	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
-	sas_phy->ha = &mvi->sas;
-	sas_phy->lldd_phy = phy;
-}
-
-static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
-					    const struct pci_device_id *ent)
-{
-	struct mvs_info *mvi;
-	unsigned long res_start, res_len, res_flag;
-	struct asd_sas_phy **arr_phy;
-	struct asd_sas_port **arr_port;
-	const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
-	int i;
-
-	/*
-	 * alloc and init our per-HBA mvs_info struct
-	 */
-
-	mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
-	if (!mvi)
-		return NULL;
-
-	spin_lock_init(&mvi->lock);
-#ifdef MVS_USE_TASKLET
-	tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
-#endif
-	mvi->pdev = pdev;
-	mvi->chip = chip;
-
-	if (pdev->device == 0x6440 && pdev->revision == 0)
-		mvi->flags |= MVF_PHY_PWR_FIX;
-
-	/*
-	 * alloc and init SCSI, SAS glue
-	 */
-
-	mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
-	if (!mvi->shost)
-		goto err_out;
-
-	arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
-	arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
-	if (!arr_phy || !arr_port)
-		goto err_out;
-
-	for (i = 0; i < MVS_MAX_PHYS; i++) {
-		mvs_phy_init(mvi, i);
-		arr_phy[i] = &mvi->phy[i].sas_phy;
-		arr_port[i] = &mvi->port[i].sas_port;
-		mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
-		mvi->port[i].wide_port_phymap = 0;
-		mvi->port[i].port_attached = 0;
-		INIT_LIST_HEAD(&mvi->port[i].list);
-	}
-
-	SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
-	mvi->shost->transportt = mvs_stt;
-	mvi->shost->max_id = 21;
-	mvi->shost->max_lun = ~0;
-	mvi->shost->max_channel = 0;
-	mvi->shost->max_cmd_len = 16;
-
-	mvi->sas.sas_ha_name = DRV_NAME;
-	mvi->sas.dev = &pdev->dev;
-	mvi->sas.lldd_module = THIS_MODULE;
-	mvi->sas.sas_addr = &mvi->sas_addr[0];
-	mvi->sas.sas_phy = arr_phy;
-	mvi->sas.sas_port = arr_port;
-	mvi->sas.num_phys = chip->n_phy;
-	mvi->sas.lldd_max_execute_num = 1;
-	mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
-	mvi->shost->can_queue = MVS_CAN_QUEUE;
-	mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
-	mvi->sas.lldd_ha = mvi;
-	mvi->sas.core.shost = mvi->shost;
-
-	mvs_tag_init(mvi);
-
-	/*
-	 * ioremap main and peripheral registers
-	 */
-
-#ifdef MVS_ENABLE_PERI
-	res_start = pci_resource_start(pdev, 2);
-	res_len = pci_resource_len(pdev, 2);
-	if (!res_start || !res_len)
-		goto err_out;
-
-	mvi->peri_regs = ioremap_nocache(res_start, res_len);
-	if (!mvi->peri_regs)
-		goto err_out;
-#endif
-
-	res_start = pci_resource_start(pdev, 4);
-	res_len = pci_resource_len(pdev, 4);
-	if (!res_start || !res_len)
-		goto err_out;
-
-	res_flag = pci_resource_flags(pdev, 4);
-	if (res_flag & IORESOURCE_CACHEABLE)
-		mvi->regs = ioremap(res_start, res_len);
-	else
-		mvi->regs = ioremap_nocache(res_start, res_len);
-
-	if (!mvi->regs)
-		goto err_out;
-
-	/*
-	 * alloc and init our DMA areas
-	 */
-
-	mvi->tx = dma_alloc_coherent(&pdev->dev,
-				     sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
-				     &mvi->tx_dma, GFP_KERNEL);
-	if (!mvi->tx)
-		goto err_out;
-	memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
-
-	mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
-					 &mvi->rx_fis_dma, GFP_KERNEL);
-	if (!mvi->rx_fis)
-		goto err_out;
-	memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
-
-	mvi->rx = dma_alloc_coherent(&pdev->dev,
-				     sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
-				     &mvi->rx_dma, GFP_KERNEL);
-	if (!mvi->rx)
-		goto err_out;
-	memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
-
-	mvi->rx[0] = cpu_to_le32(0xfff);
-	mvi->rx_cons = 0xfff;
-
-	mvi->slot = dma_alloc_coherent(&pdev->dev,
-				       sizeof(*mvi->slot) * MVS_SLOTS,
-				       &mvi->slot_dma, GFP_KERNEL);
-	if (!mvi->slot)
-		goto err_out;
-	memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
-
-	for (i = 0; i < MVS_SLOTS; i++) {
-		struct mvs_slot_info *slot = &mvi->slot_info[i];
-
-		slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
-					       &slot->buf_dma, GFP_KERNEL);
-		if (!slot->buf)
-			goto err_out;
-		memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
-	}
-
-	/* finally, read NVRAM to get our SAS address */
-	if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
-		goto err_out;
-	return mvi;
-
-err_out:
-	mvs_free(mvi);
-	return NULL;
-}
-
-static u32 mvs_cr32(void __iomem *regs, u32 addr)
-{
-	mw32(CMD_ADDR, addr);
-	return mr32(CMD_DATA);
-}
-
-static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
-{
-	mw32(CMD_ADDR, addr);
-	mw32(CMD_DATA, val);
-}
-
-static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
-{
-	void __iomem *regs = mvi->regs;
-	return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
-		mr32(P4_SER_CTLSTAT + (port - 4) * 4);
-}
-
-static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
-{
-	void __iomem *regs = mvi->regs;
-	if (port < 4)
-		mw32(P0_SER_CTLSTAT + port * 4, val);
-	else
-		mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
-}
-
-static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
-{
-	void __iomem *regs = mvi->regs + off;
-	void __iomem *regs2 = mvi->regs + off2;
-	return (port < 4)?readl(regs + port * 8):
-		readl(regs2 + (port - 4) * 8);
-}
-
-static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
-				u32 port, u32 val)
-{
-	void __iomem *regs = mvi->regs + off;
-	void __iomem *regs2 = mvi->regs + off2;
-	if (port < 4)
-		writel(val, regs + port * 8);
-	else
-		writel(val, regs2 + (port - 4) * 8);
-}
-
-static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
-{
-	return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
-}
-
-static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
-{
-	mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
-}
-
-static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
-{
-	mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
-}
-
-static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
-{
-	return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
-}
-
-static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
-{
-	mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
-}
-
-static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
-{
-	mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
-}
-
-static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
-{
-	return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
-}
-
-static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
-{
-	mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
-}
-
-static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
-{
-	return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
-}
-
-static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
-{
-	mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
-}
-
-static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp;
-
-	/* workaround for SATA R-ERR, to ignore phy glitch */
-	tmp = mvs_cr32(regs, CMD_PHY_TIMER);
-	tmp &= ~(1 << 9);
-	tmp |= (1 << 10);
-	mvs_cw32(regs, CMD_PHY_TIMER, tmp);
-
-	/* enable retry 127 times */
-	mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
-
-	/* extend open frame timeout to max */
-	tmp = mvs_cr32(regs, CMD_SAS_CTL0);
-	tmp &= ~0xffff;
-	tmp |= 0x3fff;
-	mvs_cw32(regs, CMD_SAS_CTL0, tmp);
-
-	/* workaround for WDTIMEOUT , set to 550 ms */
-	mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
-
-	/* not to halt for different port op during wideport link change */
-	mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
-
-	/* workaround for Seagate disk not-found OOB sequence, recv
-	 * COMINIT before sending out COMWAKE */
-	tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
-	tmp &= 0x0000ffff;
-	tmp |= 0x00fa0000;
-	mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
-
-	tmp = mvs_cr32(regs, CMD_PHY_TIMER);
-	tmp &= 0x1fffffff;
-	tmp |= (2U << 29);	/* 8 ms retry */
-	mvs_cw32(regs, CMD_PHY_TIMER, tmp);
-
-	/* TEST - for phy decoding error, adjust voltage levels */
-	mw32(P0_VSR_ADDR + 0, 0x8);
-	mw32(P0_VSR_DATA + 0, 0x2F0);
-
-	mw32(P0_VSR_ADDR + 8, 0x8);
-	mw32(P0_VSR_DATA + 8, 0x2F0);
-
-	mw32(P0_VSR_ADDR + 16, 0x8);
-	mw32(P0_VSR_DATA + 16, 0x2F0);
-
-	mw32(P0_VSR_ADDR + 24, 0x8);
-	mw32(P0_VSR_DATA + 24, 0x2F0);
-
-}
-
-static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp;
-
-	tmp = mr32(PCS);
-	if (mvi->chip->n_phy <= 4)
-		tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
-	else
-		tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
-	mw32(PCS, tmp);
-}
-
-static void mvs_detect_porttype(struct mvs_info *mvi, int i)
-{
-	void __iomem *regs = mvi->regs;
-	u32 reg;
-	struct mvs_phy *phy = &mvi->phy[i];
-
-	/* TODO check & save device type */
-	reg = mr32(GBL_PORT_TYPE);
-
-	if (reg & MODE_SAS_SATA & (1 << i))
-		phy->phy_type |= PORT_TYPE_SAS;
-	else
-		phy->phy_type |= PORT_TYPE_SATA;
-}
-
-static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
-{
-	u32 *s = (u32 *) buf;
-
-	if (!s)
-		return NULL;
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
-	s[3] = mvs_read_port_cfg_data(mvi, i);
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
-	s[2] = mvs_read_port_cfg_data(mvi, i);
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
-	s[1] = mvs_read_port_cfg_data(mvi, i);
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
-	s[0] = mvs_read_port_cfg_data(mvi, i);
-
-	return (void *)s;
-}
-
-static u32 mvs_is_sig_fis_received(u32 irq_status)
-{
-	return irq_status & PHYEV_SIG_FIS;
-}
-
-static void mvs_update_wideport(struct mvs_info *mvi, int i)
-{
-	struct mvs_phy *phy = &mvi->phy[i];
-	struct mvs_port *port = phy->port;
-	int j, no;
-
-	for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
-		if (no & 1) {
-			mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
-			mvs_write_port_cfg_data(mvi, no,
-						port->wide_port_phymap);
-		} else {
-			mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
-			mvs_write_port_cfg_data(mvi, no, 0);
-		}
-}
-
-static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
-{
-	u32 tmp;
-	struct mvs_phy *phy = &mvi->phy[i];
-	struct mvs_port *port = phy->port;;
-
-	tmp = mvs_read_phy_ctl(mvi, i);
-
-	if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
-		if (!port)
-			phy->phy_attached = 1;
-		return tmp;
-	}
-
-	if (port) {
-		if (phy->phy_type & PORT_TYPE_SAS) {
-			port->wide_port_phymap &= ~(1U << i);
-			if (!port->wide_port_phymap)
-				port->port_attached = 0;
-			mvs_update_wideport(mvi, i);
-		} else if (phy->phy_type & PORT_TYPE_SATA)
-			port->port_attached = 0;
-		mvs_free_reg_set(mvi, phy->port);
-		phy->port = NULL;
-		phy->phy_attached = 0;
-		phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
-	}
-	return 0;
-}
-
-static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
-					int get_st)
-{
-	struct mvs_phy *phy = &mvi->phy[i];
-	struct pci_dev *pdev = mvi->pdev;
-	u32 tmp;
-	u64 tmp64;
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
-	phy->dev_info = mvs_read_port_cfg_data(mvi, i);
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
-	phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
-	phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
-
-	if (get_st) {
-		phy->irq_status = mvs_read_port_irq_stat(mvi, i);
-		phy->phy_status = mvs_is_phy_ready(mvi, i);
-	}
-
-	if (phy->phy_status) {
-		u32 phy_st;
-		struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
-
-		mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
-		phy_st = mvs_read_port_cfg_data(mvi, i);
-
-		sas_phy->linkrate =
-			(phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
-				PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
-		phy->minimum_linkrate =
-			(phy->phy_status &
-				PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
-		phy->maximum_linkrate =
-			(phy->phy_status &
-				PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
-
-		if (phy->phy_type & PORT_TYPE_SAS) {
-			/* Updated attached_sas_addr */
-			mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
-			phy->att_dev_sas_addr =
-				(u64) mvs_read_port_cfg_data(mvi, i) << 32;
-			mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
-			phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
-			mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
-			phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
-			phy->identify.device_type =
-			    phy->att_dev_info & PORT_DEV_TYPE_MASK;
-
-			if (phy->identify.device_type == SAS_END_DEV)
-				phy->identify.target_port_protocols =
-							SAS_PROTOCOL_SSP;
-			else if (phy->identify.device_type != NO_DEVICE)
-				phy->identify.target_port_protocols =
-							SAS_PROTOCOL_SMP;
-			if (phy_st & PHY_OOB_DTCTD)
-				sas_phy->oob_mode = SAS_OOB_MODE;
-			phy->frame_rcvd_size =
-			    sizeof(struct sas_identify_frame);
-		} else if (phy->phy_type & PORT_TYPE_SATA) {
-			phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
-			if (mvs_is_sig_fis_received(phy->irq_status)) {
-				phy->att_dev_sas_addr = i;	/* temp */
-				if (phy_st & PHY_OOB_DTCTD)
-					sas_phy->oob_mode = SATA_OOB_MODE;
-				phy->frame_rcvd_size =
-				    sizeof(struct dev_to_host_fis);
-				mvs_get_d2h_reg(mvi, i,
-						(void *)sas_phy->frame_rcvd);
-			} else {
-				dev_printk(KERN_DEBUG, &pdev->dev,
-					"No sig fis\n");
-				phy->phy_type &= ~(PORT_TYPE_SATA);
-				goto out_done;
-			}
-		}
-		tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
-		memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
-
-		dev_printk(KERN_DEBUG, &pdev->dev,
-			"phy[%d] Get Attached Address 0x%llX ,"
-			" SAS Address 0x%llX\n",
-			i,
-			(unsigned long long)phy->att_dev_sas_addr,
-			(unsigned long long)phy->dev_sas_addr);
-		dev_printk(KERN_DEBUG, &pdev->dev,
-			"Rate = %x , type = %d\n",
-			sas_phy->linkrate, phy->phy_type);
-
-		/* workaround for HW phy decoding error on 1.5g disk drive */
-		mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
-		tmp = mvs_read_port_vsr_data(mvi, i);
-		if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
-		     PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
-			SAS_LINK_RATE_1_5_GBPS)
-			tmp &= ~PHY_MODE6_LATECLK;
-		else
-			tmp |= PHY_MODE6_LATECLK;
-		mvs_write_port_vsr_data(mvi, i, tmp);
-
-	}
-out_done:
-	if (get_st)
-		mvs_write_port_irq_stat(mvi, i, phy->irq_status);
-}
-
-static void mvs_port_formed(struct asd_sas_phy *sas_phy)
-{
-	struct sas_ha_struct *sas_ha = sas_phy->ha;
-	struct mvs_info *mvi = sas_ha->lldd_ha;
-	struct asd_sas_port *sas_port = sas_phy->port;
-	struct mvs_phy *phy = sas_phy->lldd_phy;
-	struct mvs_port *port = &mvi->port[sas_port->id];
-	unsigned long flags;
-
-	spin_lock_irqsave(&mvi->lock, flags);
-	port->port_attached = 1;
-	phy->port = port;
-	port->taskfileset = MVS_ID_NOT_MAPPED;
-	if (phy->phy_type & PORT_TYPE_SAS) {
-		port->wide_port_phymap = sas_port->phy_mask;
-		mvs_update_wideport(mvi, sas_phy->id);
-	}
-	spin_unlock_irqrestore(&mvi->lock, flags);
-}
-
-static int mvs_I_T_nexus_reset(struct domain_device *dev)
-{
-	return TMF_RESP_FUNC_FAILED;
-}
-
-static int __devinit mvs_hw_init(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	int i;
-	u32 tmp, cctl;
-
-	/* make sure interrupts are masked immediately (paranoia) */
-	mw32(GBL_CTL, 0);
-	tmp = mr32(GBL_CTL);
-
-	/* Reset Controller */
-	if (!(tmp & HBA_RST)) {
-		if (mvi->flags & MVF_PHY_PWR_FIX) {
-			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
-			tmp &= ~PCTL_PWR_ON;
-			tmp |= PCTL_OFF;
-			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
-
-			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
-			tmp &= ~PCTL_PWR_ON;
-			tmp |= PCTL_OFF;
-			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
-		}
-
-		/* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
-		mw32_f(GBL_CTL, HBA_RST);
-	}
-
-	/* wait for reset to finish; timeout is just a guess */
-	i = 1000;
-	while (i-- > 0) {
-		msleep(10);
-
-		if (!(mr32(GBL_CTL) & HBA_RST))
-			break;
-	}
-	if (mr32(GBL_CTL) & HBA_RST) {
-		dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
-		return -EBUSY;
-	}
-
-	/* Init Chip */
-	/* make sure RST is set; HBA_RST /should/ have done that for us */
-	cctl = mr32(CTL);
-	if (cctl & CCTL_RST)
-		cctl &= ~CCTL_RST;
-	else
-		mw32_f(CTL, cctl | CCTL_RST);
-
-	/* write to device control _AND_ device status register? - A.C. */
-	pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
-	tmp &= ~PRD_REQ_MASK;
-	tmp |= PRD_REQ_SIZE;
-	pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
-
-	pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
-	tmp |= PCTL_PWR_ON;
-	tmp &= ~PCTL_OFF;
-	pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
-
-	pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
-	tmp |= PCTL_PWR_ON;
-	tmp &= ~PCTL_OFF;
-	pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
-
-	mw32_f(CTL, cctl);
-
-	/* reset control */
-	mw32(PCS, 0);		/*MVS_PCS */
-
-	mvs_phy_hacks(mvi);
-
-	mw32(CMD_LIST_LO, mvi->slot_dma);
-	mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
-
-	mw32(RX_FIS_LO, mvi->rx_fis_dma);
-	mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
-
-	mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
-	mw32(TX_LO, mvi->tx_dma);
-	mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
-
-	mw32(RX_CFG, MVS_RX_RING_SZ);
-	mw32(RX_LO, mvi->rx_dma);
-	mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
-
-	/* enable auto port detection */
-	mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
-	msleep(1100);
-	/* init and reset phys */
-	for (i = 0; i < mvi->chip->n_phy; i++) {
-		u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
-		u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
-
-		mvs_detect_porttype(mvi, i);
-
-		/* set phy local SAS address */
-		mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
-		mvs_write_port_cfg_data(mvi, i, lo);
-		mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
-		mvs_write_port_cfg_data(mvi, i, hi);
-
-		/* reset phy */
-		tmp = mvs_read_phy_ctl(mvi, i);
-		tmp |= PHY_RST;
-		mvs_write_phy_ctl(mvi, i, tmp);
-	}
-
-	msleep(100);
-
-	for (i = 0; i < mvi->chip->n_phy; i++) {
-		/* clear phy int status */
-		tmp = mvs_read_port_irq_stat(mvi, i);
-		tmp &= ~PHYEV_SIG_FIS;
-		mvs_write_port_irq_stat(mvi, i, tmp);
-
-		/* set phy int mask */
-		tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
-			PHYEV_ID_DONE | PHYEV_DEC_ERR;
-		mvs_write_port_irq_mask(mvi, i, tmp);
-
-		msleep(100);
-		mvs_update_phyinfo(mvi, i, 1);
-		mvs_enable_xmt(mvi, i);
-	}
-
-	/* FIXME: update wide port bitmaps */
-
-	/* little endian for open address and command table, etc. */
-	/* A.C.
-	 * it seems that ( from the spec ) turning on big-endian won't
-	 * do us any good on big-endian machines, need further confirmation
-	 */
-	cctl = mr32(CTL);
-	cctl |= CCTL_ENDIAN_CMD;
-	cctl |= CCTL_ENDIAN_DATA;
-	cctl &= ~CCTL_ENDIAN_OPEN;
-	cctl |= CCTL_ENDIAN_RSP;
-	mw32_f(CTL, cctl);
-
-	/* reset CMD queue */
-	tmp = mr32(PCS);
-	tmp |= PCS_CMD_RST;
-	mw32(PCS, tmp);
-	/* interrupt coalescing may cause missing HW interrput in some case,
-	 * and the max count is 0x1ff, while our max slot is 0x200,
-	 * it will make count 0.
-	 */
-	tmp = 0;
-	mw32(INT_COAL, tmp);
-
-	tmp = 0x100;
-	mw32(INT_COAL_TMOUT, tmp);
-
-	/* ladies and gentlemen, start your engines */
-	mw32(TX_CFG, 0);
-	mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
-	mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
-	/* enable CMD/CMPL_Q/RESP mode */
-	mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
-
-	/* enable completion queue interrupt */
-	tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
-	mw32(INT_MASK, tmp);
-
-	/* Enable SRS interrupt */
-	mw32(INT_MASK_SRS, 0xFF);
-	return 0;
-}
-
-static void __devinit mvs_print_info(struct mvs_info *mvi)
-{
-	struct pci_dev *pdev = mvi->pdev;
-	static int printed_version;
-
-	if (!printed_version++)
-		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
-
-	dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
-		   mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
-}
-
-static int __devinit mvs_pci_init(struct pci_dev *pdev,
-				  const struct pci_device_id *ent)
-{
-	int rc;
-	struct mvs_info *mvi;
-	irq_handler_t irq_handler = mvs_interrupt;
-
-	rc = pci_enable_device(pdev);
-	if (rc)
-		return rc;
-
-	pci_set_master(pdev);
-
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc)
-		goto err_out_disable;
-
-	rc = pci_go_64(pdev);
-	if (rc)
-		goto err_out_regions;
-
-	mvi = mvs_alloc(pdev, ent);
-	if (!mvi) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
-
-	rc = mvs_hw_init(mvi);
-	if (rc)
-		goto err_out_mvi;
-
-#ifndef MVS_DISABLE_MSI
-	if (!pci_enable_msi(pdev)) {
-		u32 tmp;
-		void __iomem *regs = mvi->regs;
-		mvi->flags |= MVF_MSI;
-		irq_handler = mvs_msi_interrupt;
-		tmp = mr32(PCS);
-		mw32(PCS, tmp | PCS_SELF_CLEAR);
-	}
-#endif
-
-	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
-	if (rc)
-		goto err_out_msi;
-
-	rc = scsi_add_host(mvi->shost, &pdev->dev);
-	if (rc)
-		goto err_out_irq;
-
-	rc = sas_register_ha(&mvi->sas);
-	if (rc)
-		goto err_out_shost;
-
-	pci_set_drvdata(pdev, mvi);
-
-	mvs_print_info(mvi);
-
-	mvs_hba_interrupt_enable(mvi);
-
-	scsi_scan_host(mvi->shost);
-
-	return 0;
-
-err_out_shost:
-	scsi_remove_host(mvi->shost);
-err_out_irq:
-	free_irq(pdev->irq, mvi);
-err_out_msi:
-	if (mvi->flags |= MVF_MSI)
-		pci_disable_msi(pdev);
-err_out_mvi:
-	mvs_free(mvi);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out_disable:
-	pci_disable_device(pdev);
-	return rc;
-}
-
-static void __devexit mvs_pci_remove(struct pci_dev *pdev)
-{
-	struct mvs_info *mvi = pci_get_drvdata(pdev);
-
-	pci_set_drvdata(pdev, NULL);
-
-	if (mvi) {
-		sas_unregister_ha(&mvi->sas);
-		mvs_hba_interrupt_disable(mvi);
-		sas_remove_host(mvi->shost);
-		scsi_remove_host(mvi->shost);
-
-		free_irq(pdev->irq, mvi);
-		if (mvi->flags & MVF_MSI)
-			pci_disable_msi(pdev);
-		mvs_free(mvi);
-		pci_release_regions(pdev);
-	}
-	pci_disable_device(pdev);
-}
-
-static struct sas_domain_function_template mvs_transport_ops = {
-	.lldd_execute_task	= mvs_task_exec,
-	.lldd_control_phy	= mvs_phy_control,
-	.lldd_abort_task	= mvs_task_abort,
-	.lldd_port_formed	= mvs_port_formed,
-	.lldd_I_T_nexus_reset	= mvs_I_T_nexus_reset,
-};
-
-static struct pci_device_id __devinitdata mvs_pci_table[] = {
-	{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
-	{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
-	{
-		.vendor 	= PCI_VENDOR_ID_MARVELL,
-		.device 	= 0x6440,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= 0x6480,
-		.class		= 0,
-		.class_mask	= 0,
-		.driver_data	= chip_6480,
-	},
-	{ PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
-	{ PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
-
-	{ }	/* terminate list */
-};
-
-static struct pci_driver mvs_pci_driver = {
-	.name		= DRV_NAME,
-	.id_table	= mvs_pci_table,
-	.probe		= mvs_pci_init,
-	.remove		= __devexit_p(mvs_pci_remove),
-};
-
-static int __init mvs_init(void)
-{
-	int rc;
-
-	mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
-	if (!mvs_stt)
-		return -ENOMEM;
-
-	rc = pci_register_driver(&mvs_pci_driver);
-	if (rc)
-		goto err_out;
-
-	return 0;
-
-err_out:
-	sas_release_transport(mvs_stt);
-	return rc;
-}
-
-static void __exit mvs_exit(void)
-{
-	pci_unregister_driver(&mvs_pci_driver);
-	sas_release_transport(mvs_stt);
-}
-
-module_init(mvs_init);
-module_exit(mvs_exit);
-
-MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
-MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
-MODULE_VERSION(DRV_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
new file mode 100644
index 00000000000..f83f368e690
--- /dev/null
+++ b/drivers/scsi/mvsas/Kconfig
@@ -0,0 +1,35 @@
+#
+# Kernel configuration file for 88SE64XX SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This file is part of the 88SE64XX driver.
+#
+# The 88SE64XX driver is free software; you can redistribute
+# it and/or modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2 of the
+# License.
+#
+# The 88SE64XX driver is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with 88SE64XX Driver; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+#
+
+config SCSI_MVSAS
+	tristate "Marvell 88SE64XX SAS/SATA support"
+	depends on PCI
+	select SCSI_SAS_LIBSAS
+	select FW_LOADER
+	help
+		This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX
+		chip based host adapters.
+
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
new file mode 100644
index 00000000000..1ac6ed955a0
--- /dev/null
+++ b/drivers/scsi/mvsas/Makefile
@@ -0,0 +1,26 @@
+#
+# Makefile for Marvell 88SE64xx SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+
+obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
+mvsas-y +=  mv_sas.o
+
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
new file mode 100644
index 00000000000..e4acebd10d1
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -0,0 +1,3222 @@
+/*
+	mvsas.c - Marvell 88SE6440 SAS/SATA support
+
+	Copyright 2007 Red Hat, Inc.
+	Copyright 2008 Marvell. <kewei@marvell.com>
+
+	This program is free software; you can redistribute it and/or
+	modify it under the terms of the GNU General Public License as
+	published by the Free Software Foundation; either version 2,
+	or (at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty
+	of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+	See the GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public
+	License along with this program; see the file COPYING.	If not,
+	write to the Free Software Foundation, 675 Mass Ave, Cambridge,
+	MA 02139, USA.
+
+	---------------------------------------------------------------
+
+	Random notes:
+	* hardware supports controlling the endian-ness of data
+	  structures.  this permits elimination of all the le32_to_cpu()
+	  and cpu_to_le32() conversions.
+
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ctype.h>
+#include <scsi/libsas.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sas_ata.h>
+#include <asm/io.h>
+
+#define DRV_NAME	"mvsas"
+#define DRV_VERSION	"0.5.2"
+#define _MV_DUMP	0
+#define MVS_DISABLE_NVRAM
+#define MVS_DISABLE_MSI
+
+#define mr32(reg)	readl(regs + MVS_##reg)
+#define mw32(reg,val)	writel((val), regs + MVS_##reg)
+#define mw32_f(reg,val)	do {			\
+	writel((val), regs + MVS_##reg);	\
+	readl(regs + MVS_##reg);		\
+	} while (0)
+
+#define MVS_ID_NOT_MAPPED	0x7f
+#define MVS_CHIP_SLOT_SZ	(1U << mvi->chip->slot_width)
+
+/* offset for D2H FIS in the Received FIS List Structure */
+#define SATA_RECEIVED_D2H_FIS(reg_set)	\
+	((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
+#define SATA_RECEIVED_PIO_FIS(reg_set)	\
+	((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
+#define UNASSOC_D2H_FIS(id)		\
+	((void *) mvi->rx_fis + 0x100 * id)
+
+#define for_each_phy(__lseq_mask, __mc, __lseq, __rest)			\
+	for ((__mc) = (__lseq_mask), (__lseq) = 0;			\
+					(__mc) != 0 && __rest;		\
+					(++__lseq), (__mc) >>= 1)
+
+/* driver compile-time configuration */
+enum driver_configuration {
+	MVS_TX_RING_SZ		= 1024,	/* TX ring size (12-bit) */
+	MVS_RX_RING_SZ		= 1024, /* RX ring size (12-bit) */
+					/* software requires power-of-2
+					   ring size */
+
+	MVS_SLOTS		= 512,	/* command slots */
+	MVS_SLOT_BUF_SZ		= 8192, /* cmd tbl + IU + status + PRD */
+	MVS_SSP_CMD_SZ		= 64,	/* SSP command table buffer size */
+	MVS_ATA_CMD_SZ		= 96,	/* SATA command table buffer size */
+	MVS_OAF_SZ		= 64,	/* Open address frame buffer size */
+
+	MVS_RX_FIS_COUNT	= 17,	/* Optional rx'd FISs (max 17) */
+
+	MVS_QUEUE_SIZE		= 30,	/* Support Queue depth */
+	MVS_CAN_QUEUE		= MVS_SLOTS - 1,	/* SCSI Queue depth */
+};
+
+/* unchangeable hardware details */
+enum hardware_details {
+	MVS_MAX_PHYS		= 8,	/* max. possible phys */
+	MVS_MAX_PORTS		= 8,	/* max. possible ports */
+	MVS_RX_FISL_SZ		= 0x400 + (MVS_RX_FIS_COUNT * 0x100),
+};
+
+/* peripheral registers (BAR2) */
+enum peripheral_registers {
+	SPI_CTL			= 0x10,	/* EEPROM control */
+	SPI_CMD			= 0x14,	/* EEPROM command */
+	SPI_DATA		= 0x18, /* EEPROM data */
+};
+
+enum peripheral_register_bits {
+	TWSI_RDY		= (1U << 7),	/* EEPROM interface ready */
+	TWSI_RD			= (1U << 4),	/* EEPROM read access */
+
+	SPI_ADDR_MASK		= 0x3ffff,	/* bits 17:0 */
+};
+
+/* enhanced mode registers (BAR4) */
+enum hw_registers {
+	MVS_GBL_CTL		= 0x04,  /* global control */
+	MVS_GBL_INT_STAT	= 0x08,  /* global irq status */
+	MVS_GBL_PI		= 0x0C,  /* ports implemented bitmask */
+	MVS_GBL_PORT_TYPE	= 0xa0,  /* port type */
+
+	MVS_CTL			= 0x100, /* SAS/SATA port configuration */
+	MVS_PCS			= 0x104, /* SAS/SATA port control/status */
+	MVS_CMD_LIST_LO		= 0x108, /* cmd list addr */
+	MVS_CMD_LIST_HI		= 0x10C,
+	MVS_RX_FIS_LO		= 0x110, /* RX FIS list addr */
+	MVS_RX_FIS_HI		= 0x114,
+
+	MVS_TX_CFG		= 0x120, /* TX configuration */
+	MVS_TX_LO		= 0x124, /* TX (delivery) ring addr */
+	MVS_TX_HI		= 0x128,
+
+	MVS_TX_PROD_IDX		= 0x12C, /* TX producer pointer */
+	MVS_TX_CONS_IDX		= 0x130, /* TX consumer pointer (RO) */
+	MVS_RX_CFG		= 0x134, /* RX configuration */
+	MVS_RX_LO		= 0x138, /* RX (completion) ring addr */
+	MVS_RX_HI		= 0x13C,
+	MVS_RX_CONS_IDX		= 0x140, /* RX consumer pointer (RO) */
+
+	MVS_INT_COAL		= 0x148, /* Int coalescing config */
+	MVS_INT_COAL_TMOUT	= 0x14C, /* Int coalescing timeout */
+	MVS_INT_STAT		= 0x150, /* Central int status */
+	MVS_INT_MASK		= 0x154, /* Central int enable */
+	MVS_INT_STAT_SRS	= 0x158, /* SATA register set status */
+	MVS_INT_MASK_SRS	= 0x15C,
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_INT_STAT		= 0x160, /* port0 interrupt status */
+	MVS_P0_INT_MASK		= 0x164, /* port0 interrupt mask */
+	MVS_P4_INT_STAT		= 0x200, /* Port 4 interrupt status */
+	MVS_P4_INT_MASK		= 0x204, /* Port 4 interrupt enable mask */
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_SER_CTLSTAT	= 0x180, /* port0 serial control/status */
+	MVS_P4_SER_CTLSTAT	= 0x220, /* port4 serial control/status */
+
+	MVS_CMD_ADDR		= 0x1B8, /* Command register port (addr) */
+	MVS_CMD_DATA		= 0x1BC, /* Command register port (data) */
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_CFG_ADDR		= 0x1C0, /* port0 phy register address */
+	MVS_P0_CFG_DATA		= 0x1C4, /* port0 phy register data */
+	MVS_P4_CFG_ADDR		= 0x230, /* Port 4 config address */
+	MVS_P4_CFG_DATA		= 0x234, /* Port 4 config data */
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_VSR_ADDR		= 0x1E0, /* port0 VSR address */
+	MVS_P0_VSR_DATA		= 0x1E4, /* port0 VSR data */
+	MVS_P4_VSR_ADDR		= 0x250, /* port 4 VSR addr */
+	MVS_P4_VSR_DATA		= 0x254, /* port 4 VSR data */
+};
+
+enum hw_register_bits {
+	/* MVS_GBL_CTL */
+	INT_EN			= (1U << 1),	/* Global int enable */
+	HBA_RST			= (1U << 0),	/* HBA reset */
+
+	/* MVS_GBL_INT_STAT */
+	INT_XOR			= (1U << 4),	/* XOR engine event */
+	INT_SAS_SATA		= (1U << 0),	/* SAS/SATA event */
+
+	/* MVS_GBL_PORT_TYPE */			/* shl for ports 1-3 */
+	SATA_TARGET		= (1U << 16),	/* port0 SATA target enable */
+	MODE_AUTO_DET_PORT7 = (1U << 15),	/* port0 SAS/SATA autodetect */
+	MODE_AUTO_DET_PORT6 = (1U << 14),
+	MODE_AUTO_DET_PORT5 = (1U << 13),
+	MODE_AUTO_DET_PORT4 = (1U << 12),
+	MODE_AUTO_DET_PORT3 = (1U << 11),
+	MODE_AUTO_DET_PORT2 = (1U << 10),
+	MODE_AUTO_DET_PORT1 = (1U << 9),
+	MODE_AUTO_DET_PORT0 = (1U << 8),
+	MODE_AUTO_DET_EN    =	MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
+				MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
+				MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
+				MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
+	MODE_SAS_PORT7_MASK = (1U << 7),  /* port0 SAS(1), SATA(0) mode */
+	MODE_SAS_PORT6_MASK = (1U << 6),
+	MODE_SAS_PORT5_MASK = (1U << 5),
+	MODE_SAS_PORT4_MASK = (1U << 4),
+	MODE_SAS_PORT3_MASK = (1U << 3),
+	MODE_SAS_PORT2_MASK = (1U << 2),
+	MODE_SAS_PORT1_MASK = (1U << 1),
+	MODE_SAS_PORT0_MASK = (1U << 0),
+	MODE_SAS_SATA	=	MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
+				MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
+				MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
+				MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
+
+				/* SAS_MODE value may be
+				 * dictated (in hw) by values
+				 * of SATA_TARGET & AUTO_DET
+				 */
+
+	/* MVS_TX_CFG */
+	TX_EN			= (1U << 16),	/* Enable TX */
+	TX_RING_SZ_MASK		= 0xfff,	/* TX ring size, bits 11:0 */
+
+	/* MVS_RX_CFG */
+	RX_EN			= (1U << 16),	/* Enable RX */
+	RX_RING_SZ_MASK		= 0xfff,	/* RX ring size, bits 11:0 */
+
+	/* MVS_INT_COAL */
+	COAL_EN			= (1U << 16),	/* Enable int coalescing */
+
+	/* MVS_INT_STAT, MVS_INT_MASK */
+	CINT_I2C		= (1U << 31),	/* I2C event */
+	CINT_SW0		= (1U << 30),	/* software event 0 */
+	CINT_SW1		= (1U << 29),	/* software event 1 */
+	CINT_PRD_BC		= (1U << 28),	/* PRD BC err for read cmd */
+	CINT_DMA_PCIE		= (1U << 27),	/* DMA to PCIE timeout */
+	CINT_MEM		= (1U << 26),	/* int mem parity err */
+	CINT_I2C_SLAVE		= (1U << 25),	/* slave I2C event */
+	CINT_SRS		= (1U << 3),	/* SRS event */
+	CINT_CI_STOP		= (1U << 1),	/* cmd issue stopped */
+	CINT_DONE		= (1U << 0),	/* cmd completion */
+
+						/* shl for ports 1-3 */
+	CINT_PORT_STOPPED	= (1U << 16),	/* port0 stopped */
+	CINT_PORT		= (1U << 8),	/* port0 event */
+	CINT_PORT_MASK_OFFSET	= 8,
+	CINT_PORT_MASK		= (0xFF << CINT_PORT_MASK_OFFSET),
+
+	/* TX (delivery) ring bits */
+	TXQ_CMD_SHIFT		= 29,
+	TXQ_CMD_SSP		= 1,		/* SSP protocol */
+	TXQ_CMD_SMP		= 2,		/* SMP protocol */
+	TXQ_CMD_STP		= 3,		/* STP/SATA protocol */
+	TXQ_CMD_SSP_FREE_LIST	= 4,		/* add to SSP targ free list */
+	TXQ_CMD_SLOT_RESET	= 7,		/* reset command slot */
+	TXQ_MODE_I		= (1U << 28),	/* mode: 0=target,1=initiator */
+	TXQ_PRIO_HI		= (1U << 27),	/* priority: 0=normal, 1=high */
+	TXQ_SRS_SHIFT		= 20,		/* SATA register set */
+	TXQ_SRS_MASK		= 0x7f,
+	TXQ_PHY_SHIFT		= 12,		/* PHY bitmap */
+	TXQ_PHY_MASK		= 0xff,
+	TXQ_SLOT_MASK		= 0xfff,	/* slot number */
+
+	/* RX (completion) ring bits */
+	RXQ_GOOD		= (1U << 23),	/* Response good */
+	RXQ_SLOT_RESET		= (1U << 21),	/* Slot reset complete */
+	RXQ_CMD_RX		= (1U << 20),	/* target cmd received */
+	RXQ_ATTN		= (1U << 19),	/* attention */
+	RXQ_RSP			= (1U << 18),	/* response frame xfer'd */
+	RXQ_ERR			= (1U << 17),	/* err info rec xfer'd */
+	RXQ_DONE		= (1U << 16),	/* cmd complete */
+	RXQ_SLOT_MASK		= 0xfff,	/* slot number */
+
+	/* mvs_cmd_hdr bits */
+	MCH_PRD_LEN_SHIFT	= 16,		/* 16-bit PRD table len */
+	MCH_SSP_FR_TYPE_SHIFT	= 13,		/* SSP frame type */
+
+						/* SSP initiator only */
+	MCH_SSP_FR_CMD		= 0x0,		/* COMMAND frame */
+
+						/* SSP initiator or target */
+	MCH_SSP_FR_TASK		= 0x1,		/* TASK frame */
+
+						/* SSP target only */
+	MCH_SSP_FR_XFER_RDY	= 0x4,		/* XFER_RDY frame */
+	MCH_SSP_FR_RESP		= 0x5,		/* RESPONSE frame */
+	MCH_SSP_FR_READ		= 0x6,		/* Read DATA frame(s) */
+	MCH_SSP_FR_READ_RESP	= 0x7,		/* ditto, plus RESPONSE */
+
+	MCH_PASSTHRU		= (1U << 12),	/* pass-through (SSP) */
+	MCH_FBURST		= (1U << 11),	/* first burst (SSP) */
+	MCH_CHK_LEN		= (1U << 10),	/* chk xfer len (SSP) */
+	MCH_RETRY		= (1U << 9),	/* tport layer retry (SSP) */
+	MCH_PROTECTION		= (1U << 8),	/* protection info rec (SSP) */
+	MCH_RESET		= (1U << 7),	/* Reset (STP/SATA) */
+	MCH_FPDMA		= (1U << 6),	/* First party DMA (STP/SATA) */
+	MCH_ATAPI		= (1U << 5),	/* ATAPI (STP/SATA) */
+	MCH_BIST		= (1U << 4),	/* BIST activate (STP/SATA) */
+	MCH_PMP_MASK		= 0xf,		/* PMP from cmd FIS (STP/SATA)*/
+
+	CCTL_RST		= (1U << 5),	/* port logic reset */
+
+						/* 0(LSB first), 1(MSB first) */
+	CCTL_ENDIAN_DATA	= (1U << 3),	/* PRD data */
+	CCTL_ENDIAN_RSP		= (1U << 2),	/* response frame */
+	CCTL_ENDIAN_OPEN	= (1U << 1),	/* open address frame */
+	CCTL_ENDIAN_CMD		= (1U << 0),	/* command table */
+
+	/* MVS_Px_SER_CTLSTAT (per-phy control) */
+	PHY_SSP_RST		= (1U << 3),	/* reset SSP link layer */
+	PHY_BCAST_CHG		= (1U << 2),	/* broadcast(change) notif */
+	PHY_RST_HARD		= (1U << 1),	/* hard reset + phy reset */
+	PHY_RST			= (1U << 0),	/* phy reset */
+	PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
+	PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
+	PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
+	PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+			(0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+	PHY_READY_MASK		= (1U << 20),
+
+	/* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
+	PHYEV_DEC_ERR		= (1U << 24),	/* Phy Decoding Error */
+	PHYEV_UNASSOC_FIS	= (1U << 19),	/* unassociated FIS rx'd */
+	PHYEV_AN		= (1U << 18),	/* SATA async notification */
+	PHYEV_BIST_ACT		= (1U << 17),	/* BIST activate FIS */
+	PHYEV_SIG_FIS		= (1U << 16),	/* signature FIS */
+	PHYEV_POOF		= (1U << 12),	/* phy ready from 1 -> 0 */
+	PHYEV_IU_BIG		= (1U << 11),	/* IU too long err */
+	PHYEV_IU_SMALL		= (1U << 10),	/* IU too short err */
+	PHYEV_UNK_TAG		= (1U << 9),	/* unknown tag */
+	PHYEV_BROAD_CH		= (1U << 8),	/* broadcast(CHANGE) */
+	PHYEV_COMWAKE		= (1U << 7),	/* COMWAKE rx'd */
+	PHYEV_PORT_SEL		= (1U << 6),	/* port selector present */
+	PHYEV_HARD_RST		= (1U << 5),	/* hard reset rx'd */
+	PHYEV_ID_TMOUT		= (1U << 4),	/* identify timeout */
+	PHYEV_ID_FAIL		= (1U << 3),	/* identify failed */
+	PHYEV_ID_DONE		= (1U << 2),	/* identify done */
+	PHYEV_HARD_RST_DONE	= (1U << 1),	/* hard reset done */
+	PHYEV_RDY_CH		= (1U << 0),	/* phy ready changed state */
+
+	/* MVS_PCS */
+	PCS_EN_SATA_REG_SHIFT	= (16),		/* Enable SATA Register Set */
+	PCS_EN_PORT_XMT_SHIFT	= (12),		/* Enable Port Transmit */
+	PCS_EN_PORT_XMT_SHIFT2	= (8),		/* For 6480 */
+	PCS_SATA_RETRY		= (1U << 8),	/* retry ctl FIS on R_ERR */
+	PCS_RSP_RX_EN		= (1U << 7),	/* raw response rx */
+	PCS_SELF_CLEAR		= (1U << 5),	/* self-clearing int mode */
+	PCS_FIS_RX_EN		= (1U << 4),	/* FIS rx enable */
+	PCS_CMD_STOP_ERR	= (1U << 3),	/* cmd stop-on-err enable */
+	PCS_CMD_RST		= (1U << 1),	/* reset cmd issue */
+	PCS_CMD_EN		= (1U << 0),	/* enable cmd issue */
+
+	/* Port n Attached Device Info */
+	PORT_DEV_SSP_TRGT	= (1U << 19),
+	PORT_DEV_SMP_TRGT	= (1U << 18),
+	PORT_DEV_STP_TRGT	= (1U << 17),
+	PORT_DEV_SSP_INIT	= (1U << 11),
+	PORT_DEV_SMP_INIT	= (1U << 10),
+	PORT_DEV_STP_INIT	= (1U << 9),
+	PORT_PHY_ID_MASK	= (0xFFU << 24),
+	PORT_DEV_TRGT_MASK	= (0x7U << 17),
+	PORT_DEV_INIT_MASK	= (0x7U << 9),
+	PORT_DEV_TYPE_MASK	= (0x7U << 0),
+
+	/* Port n PHY Status */
+	PHY_RDY			= (1U << 2),
+	PHY_DW_SYNC		= (1U << 1),
+	PHY_OOB_DTCTD		= (1U << 0),
+
+	/* VSR */
+	/* PHYMODE 6 (CDB) */
+	PHY_MODE6_LATECLK	= (1U << 29),	/* Lock Clock */
+	PHY_MODE6_DTL_SPEED	= (1U << 27),	/* Digital Loop Speed */
+	PHY_MODE6_FC_ORDER	= (1U << 26),	/* Fibre Channel Mode Order*/
+	PHY_MODE6_MUCNT_EN	= (1U << 24),	/* u Count Enable */
+	PHY_MODE6_SEL_MUCNT_LEN	= (1U << 22),	/* Training Length Select */
+	PHY_MODE6_SELMUPI	= (1U << 20),	/* Phase Multi Select (init) */
+	PHY_MODE6_SELMUPF	= (1U << 18),	/* Phase Multi Select (final) */
+	PHY_MODE6_SELMUFF	= (1U << 16),	/* Freq Loop Multi Sel(final) */
+	PHY_MODE6_SELMUFI	= (1U << 14),	/* Freq Loop Multi Sel(init) */
+	PHY_MODE6_FREEZE_LOOP	= (1U << 12),	/* Freeze Rx CDR Loop */
+	PHY_MODE6_INT_RXFOFFS	= (1U << 3),	/* Rx CDR Freq Loop Enable */
+	PHY_MODE6_FRC_RXFOFFS	= (1U << 2),	/* Initial Rx CDR Offset */
+	PHY_MODE6_STAU_0D8	= (1U << 1),	/* Rx CDR Freq Loop Saturate */
+	PHY_MODE6_RXSAT_DIS	= (1U << 0),	/* Saturate Ctl */
+};
+
+enum mvs_info_flags {
+	MVF_MSI			= (1U << 0),	/* MSI is enabled */
+	MVF_PHY_PWR_FIX		= (1U << 1),	/* bug workaround */
+};
+
+enum sas_cmd_port_registers {
+	CMD_CMRST_OOB_DET	= 0x100, /* COMRESET OOB detect register */
+	CMD_CMWK_OOB_DET	= 0x104, /* COMWAKE OOB detect register */
+	CMD_CMSAS_OOB_DET	= 0x108, /* COMSAS OOB detect register */
+	CMD_BRST_OOB_DET	= 0x10c, /* burst OOB detect register */
+	CMD_OOB_SPACE		= 0x110, /* OOB space control register */
+	CMD_OOB_BURST		= 0x114, /* OOB burst control register */
+	CMD_PHY_TIMER		= 0x118, /* PHY timer control register */
+	CMD_PHY_CONFIG0		= 0x11c, /* PHY config register 0 */
+	CMD_PHY_CONFIG1		= 0x120, /* PHY config register 1 */
+	CMD_SAS_CTL0		= 0x124, /* SAS control register 0 */
+	CMD_SAS_CTL1		= 0x128, /* SAS control register 1 */
+	CMD_SAS_CTL2		= 0x12c, /* SAS control register 2 */
+	CMD_SAS_CTL3		= 0x130, /* SAS control register 3 */
+	CMD_ID_TEST		= 0x134, /* ID test register */
+	CMD_PL_TIMER		= 0x138, /* PL timer register */
+	CMD_WD_TIMER		= 0x13c, /* WD timer register */
+	CMD_PORT_SEL_COUNT	= 0x140, /* port selector count register */
+	CMD_APP_MEM_CTL		= 0x144, /* Application Memory Control */
+	CMD_XOR_MEM_CTL		= 0x148, /* XOR Block Memory Control */
+	CMD_DMA_MEM_CTL		= 0x14c, /* DMA Block Memory Control */
+	CMD_PORT_MEM_CTL0	= 0x150, /* Port Memory Control 0 */
+	CMD_PORT_MEM_CTL1	= 0x154, /* Port Memory Control 1 */
+	CMD_SATA_PORT_MEM_CTL0	= 0x158, /* SATA Port Memory Control 0 */
+	CMD_SATA_PORT_MEM_CTL1	= 0x15c, /* SATA Port Memory Control 1 */
+	CMD_XOR_MEM_BIST_CTL	= 0x160, /* XOR Memory BIST Control */
+	CMD_XOR_MEM_BIST_STAT	= 0x164, /* XOR Memroy BIST Status */
+	CMD_DMA_MEM_BIST_CTL	= 0x168, /* DMA Memory BIST Control */
+	CMD_DMA_MEM_BIST_STAT	= 0x16c, /* DMA Memory BIST Status */
+	CMD_PORT_MEM_BIST_CTL	= 0x170, /* Port Memory BIST Control */
+	CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
+	CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
+	CMD_STP_MEM_BIST_CTL	= 0x17c, /* STP Memory BIST Control */
+	CMD_STP_MEM_BIST_STAT0	= 0x180, /* STP Memory BIST Status 0 */
+	CMD_STP_MEM_BIST_STAT1	= 0x184, /* STP Memory BIST Status 1 */
+	CMD_RESET_COUNT		= 0x188, /* Reset Count */
+	CMD_MONTR_DATA_SEL	= 0x18C, /* Monitor Data/Select */
+	CMD_PLL_PHY_CONFIG	= 0x190, /* PLL/PHY Configuration */
+	CMD_PHY_CTL		= 0x194, /* PHY Control and Status */
+	CMD_PHY_TEST_COUNT0	= 0x198, /* Phy Test Count 0 */
+	CMD_PHY_TEST_COUNT1	= 0x19C, /* Phy Test Count 1 */
+	CMD_PHY_TEST_COUNT2	= 0x1A0, /* Phy Test Count 2 */
+	CMD_APP_ERR_CONFIG	= 0x1A4, /* Application Error Configuration */
+	CMD_PND_FIFO_CTL0	= 0x1A8, /* Pending FIFO Control 0 */
+	CMD_HOST_CTL		= 0x1AC, /* Host Control Status */
+	CMD_HOST_WR_DATA	= 0x1B0, /* Host Write Data */
+	CMD_HOST_RD_DATA	= 0x1B4, /* Host Read Data */
+	CMD_PHY_MODE_21		= 0x1B8, /* Phy Mode 21 */
+	CMD_SL_MODE0		= 0x1BC, /* SL Mode 0 */
+	CMD_SL_MODE1		= 0x1C0, /* SL Mode 1 */
+	CMD_PND_FIFO_CTL1	= 0x1C4, /* Pending FIFO Control 1 */
+};
+
+/* SAS/SATA configuration port registers, aka phy registers */
+enum sas_sata_config_port_regs {
+	PHYR_IDENTIFY		= 0x00,	/* info for IDENTIFY frame */
+	PHYR_ADDR_LO		= 0x04,	/* my SAS address (low) */
+	PHYR_ADDR_HI		= 0x08,	/* my SAS address (high) */
+	PHYR_ATT_DEV_INFO	= 0x0C,	/* attached device info */
+	PHYR_ATT_ADDR_LO	= 0x10,	/* attached dev SAS addr (low) */
+	PHYR_ATT_ADDR_HI	= 0x14,	/* attached dev SAS addr (high) */
+	PHYR_SATA_CTL		= 0x18,	/* SATA control */
+	PHYR_PHY_STAT		= 0x1C,	/* PHY status */
+	PHYR_SATA_SIG0		= 0x20,	/*port SATA signature FIS(Byte 0-3) */
+	PHYR_SATA_SIG1		= 0x24,	/*port SATA signature FIS(Byte 4-7) */
+	PHYR_SATA_SIG2		= 0x28,	/*port SATA signature FIS(Byte 8-11) */
+	PHYR_SATA_SIG3		= 0x2c,	/*port SATA signature FIS(Byte 12-15) */
+	PHYR_R_ERR_COUNT	= 0x30, /* port R_ERR count register */
+	PHYR_CRC_ERR_COUNT	= 0x34, /* port CRC error count register */
+	PHYR_WIDE_PORT		= 0x38,	/* wide port participating */
+	PHYR_CURRENT0		= 0x80,	/* current connection info 0 */
+	PHYR_CURRENT1		= 0x84,	/* current connection info 1 */
+	PHYR_CURRENT2		= 0x88,	/* current connection info 2 */
+};
+
+/*  SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+	VSR_PHY_STAT		= 0x00, /* Phy Status */
+	VSR_PHY_MODE1		= 0x01, /* phy tx */
+	VSR_PHY_MODE2		= 0x02, /* tx scc */
+	VSR_PHY_MODE3		= 0x03, /* pll */
+	VSR_PHY_MODE4		= 0x04, /* VCO */
+	VSR_PHY_MODE5		= 0x05, /* Rx */
+	VSR_PHY_MODE6		= 0x06, /* CDR */
+	VSR_PHY_MODE7		= 0x07, /* Impedance */
+	VSR_PHY_MODE8		= 0x08, /* Voltage */
+	VSR_PHY_MODE9		= 0x09, /* Test */
+	VSR_PHY_MODE10		= 0x0A, /* Power */
+	VSR_PHY_MODE11		= 0x0B, /* Phy Mode */
+	VSR_PHY_VS0		= 0x0C, /* Vednor Specific 0 */
+	VSR_PHY_VS1		= 0x0D, /* Vednor Specific 1 */
+};
+
+enum pci_cfg_registers {
+	PCR_PHY_CTL	= 0x40,
+	PCR_PHY_CTL2	= 0x90,
+	PCR_DEV_CTRL	= 0xE8,
+};
+
+enum pci_cfg_register_bits {
+	PCTL_PWR_ON	= (0xFU << 24),
+	PCTL_OFF	= (0xFU << 12),
+	PRD_REQ_SIZE	= (0x4000),
+	PRD_REQ_MASK	= (0x00007000),
+};
+
+enum nvram_layout_offsets {
+	NVR_SIG		= 0x00,		/* 0xAA, 0x55 */
+	NVR_SAS_ADDR	= 0x02,		/* 8-byte SAS address */
+};
+
+enum chip_flavors {
+	chip_6320,
+	chip_6440,
+	chip_6480,
+};
+
+enum port_type {
+	PORT_TYPE_SAS	=  (1L << 1),
+	PORT_TYPE_SATA	=  (1L << 0),
+};
+
+/* Command Table Format */
+enum ct_format {
+	/* SSP */
+	SSP_F_H		=  0x00,
+	SSP_F_IU	=  0x18,
+	SSP_F_MAX	=  0x4D,
+	/* STP */
+	STP_CMD_FIS	=  0x00,
+	STP_ATAPI_CMD	=  0x40,
+	STP_F_MAX	=  0x10,
+	/* SMP */
+	SMP_F_T		=  0x00,
+	SMP_F_DEP	=  0x01,
+	SMP_F_MAX	=  0x101,
+};
+
+enum status_buffer {
+	SB_EIR_OFF	=  0x00,	/* Error Information Record */
+	SB_RFB_OFF	=  0x08,	/* Response Frame Buffer */
+	SB_RFB_MAX	=  0x400,	/* RFB size*/
+};
+
+enum error_info_rec {
+	CMD_ISS_STPD	= (1U << 31),	/* Cmd Issue Stopped */
+	CMD_PI_ERR	= (1U << 30),	/* Protection info error.  see flags2 */
+	RSP_OVER	= (1U << 29),	/* rsp buffer overflow */
+	RETRY_LIM	= (1U << 28),	/* FIS/frame retry limit exceeded */
+	UNK_FIS 	= (1U << 27),	/* unknown FIS */
+	DMA_TERM	= (1U << 26),	/* DMA terminate primitive rx'd */
+	SYNC_ERR	= (1U << 25),	/* SYNC rx'd during frame xmit */
+	TFILE_ERR	= (1U << 24),	/* SATA taskfile Error bit set */
+	R_ERR		= (1U << 23),	/* SATA returned R_ERR prim */
+	RD_OFS		= (1U << 20),	/* Read DATA frame invalid offset */
+	XFER_RDY_OFS	= (1U << 19),	/* XFER_RDY offset error */
+	UNEXP_XFER_RDY	= (1U << 18),	/* unexpected XFER_RDY error */
+	DATA_OVER_UNDER = (1U << 16),	/* data overflow/underflow */
+	INTERLOCK	= (1U << 15),	/* interlock error */
+	NAK		= (1U << 14),	/* NAK rx'd */
+	ACK_NAK_TO	= (1U << 13),	/* ACK/NAK timeout */
+	CXN_CLOSED	= (1U << 12),	/* cxn closed w/out ack/nak */
+	OPEN_TO 	= (1U << 11),	/* I_T nexus lost, open cxn timeout */
+	PATH_BLOCKED	= (1U << 10),	/* I_T nexus lost, pathway blocked */
+	NO_DEST 	= (1U << 9),	/* I_T nexus lost, no destination */
+	STP_RES_BSY	= (1U << 8),	/* STP resources busy */
+	BREAK		= (1U << 7),	/* break received */
+	BAD_DEST	= (1U << 6),	/* bad destination */
+	BAD_PROTO	= (1U << 5),	/* protocol not supported */
+	BAD_RATE	= (1U << 4),	/* cxn rate not supported */
+	WRONG_DEST	= (1U << 3),	/* wrong destination error */
+	CREDIT_TO	= (1U << 2),	/* credit timeout */
+	WDOG_TO 	= (1U << 1),	/* watchdog timeout */
+	BUF_PAR 	= (1U << 0),	/* buffer parity error */
+};
+
+enum error_info_rec_2 {
+	SLOT_BSY_ERR	= (1U << 31),	/* Slot Busy Error */
+	GRD_CHK_ERR	= (1U << 14),	/* Guard Check Error */
+	APP_CHK_ERR	= (1U << 13),	/* Application Check error */
+	REF_CHK_ERR	= (1U << 12),	/* Reference Check Error */
+	USR_BLK_NM	= (1U << 0),	/* User Block Number */
+};
+
+struct mvs_chip_info {
+	u32		n_phy;
+	u32		srs_sz;
+	u32		slot_width;
+};
+
+struct mvs_err_info {
+	__le32			flags;
+	__le32			flags2;
+};
+
+struct mvs_prd {
+	__le64			addr;		/* 64-bit buffer address */
+	__le32			reserved;
+	__le32			len;		/* 16-bit length */
+};
+
+struct mvs_cmd_hdr {
+	__le32			flags;		/* PRD tbl len; SAS, SATA ctl */
+	__le32			lens;		/* cmd, max resp frame len */
+	__le32			tags;		/* targ port xfer tag; tag */
+	__le32			data_len;	/* data xfer len */
+	__le64			cmd_tbl;	/* command table address */
+	__le64			open_frame;	/* open addr frame address */
+	__le64			status_buf;	/* status buffer address */
+	__le64			prd_tbl;	/* PRD tbl address */
+	__le32			reserved[4];
+};
+
+struct mvs_port {
+	struct asd_sas_port	sas_port;
+	u8			port_attached;
+	u8			taskfileset;
+	u8			wide_port_phymap;
+	struct list_head	list;
+};
+
+struct mvs_phy {
+	struct mvs_port		*port;
+	struct asd_sas_phy	sas_phy;
+	struct sas_identify	identify;
+	struct scsi_device	*sdev;
+	u64		dev_sas_addr;
+	u64		att_dev_sas_addr;
+	u32		att_dev_info;
+	u32		dev_info;
+	u32		phy_type;
+	u32		phy_status;
+	u32		irq_status;
+	u32		frame_rcvd_size;
+	u8		frame_rcvd[32];
+	u8		phy_attached;
+	enum sas_linkrate	minimum_linkrate;
+	enum sas_linkrate	maximum_linkrate;
+};
+
+struct mvs_slot_info {
+	struct list_head	list;
+	struct sas_task		*task;
+	u32			n_elem;
+	u32			tx;
+
+	/* DMA buffer for storing cmd tbl, open addr frame, status buffer,
+	 * and PRD table
+	 */
+	void			*buf;
+	dma_addr_t		buf_dma;
+#if _MV_DUMP
+	u32			cmd_size;
+#endif
+
+	void			*response;
+	struct mvs_port		*port;
+};
+
+struct mvs_info {
+	unsigned long		flags;
+
+	spinlock_t		lock;		/* host-wide lock */
+	struct pci_dev		*pdev;		/* our device */
+	void __iomem		*regs;		/* enhanced mode registers */
+	void __iomem		*peri_regs;	/* peripheral registers */
+
+	u8			sas_addr[SAS_ADDR_SIZE];
+	struct sas_ha_struct	sas;		/* SCSI/SAS glue */
+	struct Scsi_Host	*shost;
+
+	__le32			*tx;		/* TX (delivery) DMA ring */
+	dma_addr_t		tx_dma;
+	u32			tx_prod;	/* cached next-producer idx */
+
+	__le32			*rx;		/* RX (completion) DMA ring */
+	dma_addr_t		rx_dma;
+	u32			rx_cons;	/* RX consumer idx */
+
+	__le32			*rx_fis;	/* RX'd FIS area */
+	dma_addr_t		rx_fis_dma;
+
+	struct mvs_cmd_hdr	*slot;	/* DMA command header slots */
+	dma_addr_t		slot_dma;
+
+	const struct mvs_chip_info *chip;
+
+	u8			tags[MVS_SLOTS];
+	struct mvs_slot_info	slot_info[MVS_SLOTS];
+				/* further per-slot information */
+	struct mvs_phy		phy[MVS_MAX_PHYS];
+	struct mvs_port		port[MVS_MAX_PHYS];
+#ifdef MVS_USE_TASKLET
+	struct tasklet_struct	tasklet;
+#endif
+};
+
+static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+			   void *funcdata);
+static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
+static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
+static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
+static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
+static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
+static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
+
+static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
+static void mvs_detect_porttype(struct mvs_info *mvi, int i);
+static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
+static void mvs_release_task(struct mvs_info *mvi, int phy_no);
+
+static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
+static void mvs_scan_start(struct Scsi_Host *);
+static int mvs_slave_configure(struct scsi_device *sdev);
+
+static struct scsi_transport_template *mvs_stt;
+
+static const struct mvs_chip_info mvs_chips[] = {
+	[chip_6320] =		{ 2, 16, 9  },
+	[chip_6440] =		{ 4, 16, 9  },
+	[chip_6480] =		{ 8, 32, 10 },
+};
+
+static struct scsi_host_template mvs_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.queuecommand		= sas_queuecommand,
+	.target_alloc		= sas_target_alloc,
+	.slave_configure	= mvs_slave_configure,
+	.slave_destroy		= sas_slave_destroy,
+	.scan_finished		= mvs_scan_finished,
+	.scan_start		= mvs_scan_start,
+	.change_queue_depth	= sas_change_queue_depth,
+	.change_queue_type	= sas_change_queue_type,
+	.bios_param		= sas_bios_param,
+	.can_queue		= 1,
+	.cmd_per_lun		= 1,
+	.this_id		= -1,
+	.sg_tablesize		= SG_ALL,
+	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.eh_device_reset_handler	= sas_eh_device_reset_handler,
+	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.slave_alloc		= sas_slave_alloc,
+	.target_destroy		= sas_target_destroy,
+	.ioctl			= sas_ioctl,
+};
+
+static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
+{
+	u32 i;
+	u32 run;
+	u32 offset;
+
+	offset = 0;
+	while (size) {
+		printk("%08X : ", baseaddr + offset);
+		if (size >= 16)
+			run = 16;
+		else
+			run = size;
+		size -= run;
+		for (i = 0; i < 16; i++) {
+			if (i < run)
+				printk("%02X ", (u32)data[i]);
+			else
+				printk("   ");
+		}
+		printk(": ");
+		for (i = 0; i < run; i++)
+			printk("%c", isalnum(data[i]) ? data[i] : '.');
+		printk("\n");
+		data = &data[16];
+		offset += run;
+	}
+	printk("\n");
+}
+
+#if _MV_DUMP
+static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
+				   enum sas_protocol proto)
+{
+	u32 offset;
+	struct pci_dev *pdev = mvi->pdev;
+	struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+	offset = slot->cmd_size + MVS_OAF_SZ +
+	    sizeof(struct mvs_prd) * slot->n_elem;
+	dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
+			tag);
+	mvs_hexdump(32, (u8 *) slot->response,
+		    (u32) slot->buf_dma + offset);
+}
+#endif
+
+static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
+				enum sas_protocol proto)
+{
+#if _MV_DUMP
+	u32 sz, w_ptr;
+	u64 addr;
+	void __iomem *regs = mvi->regs;
+	struct pci_dev *pdev = mvi->pdev;
+	struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+	/*Delivery Queue */
+	sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
+	w_ptr = slot->tx;
+	addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
+	dev_printk(KERN_DEBUG, &pdev->dev,
+		"Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
+	dev_printk(KERN_DEBUG, &pdev->dev,
+		"Delivery Queue Base Address=0x%llX (PA)"
+		"(tx_dma=0x%llX), Entry=%04d\n",
+		addr, mvi->tx_dma, w_ptr);
+	mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
+			(u32) mvi->tx_dma + sizeof(u32) * w_ptr);
+	/*Command List */
+	addr = mvi->slot_dma;
+	dev_printk(KERN_DEBUG, &pdev->dev,
+		"Command List Base Address=0x%llX (PA)"
+		"(slot_dma=0x%llX), Header=%03d\n",
+		addr, slot->buf_dma, tag);
+	dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
+	/*mvs_cmd_hdr */
+	mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
+		(u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
+	/*1.command table area */
+	dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
+	mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
+	/*2.open address frame area */
+	dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
+	mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
+				(u32) slot->buf_dma + slot->cmd_size);
+	/*3.status buffer */
+	mvs_hba_sb_dump(mvi, tag, proto);
+	/*4.PRD table */
+	dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
+	mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
+		(u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
+		(u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
+#endif
+}
+
+static void mvs_hba_cq_dump(struct mvs_info *mvi)
+{
+#if (_MV_DUMP > 2)
+	u64 addr;
+	void __iomem *regs = mvi->regs;
+	struct pci_dev *pdev = mvi->pdev;
+	u32 entry = mvi->rx_cons + 1;
+	u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
+
+	/*Completion Queue */
+	addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
+	dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
+		   mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
+	dev_printk(KERN_DEBUG, &pdev->dev,
+		"Completion List Base Address=0x%llX (PA), "
+		"CQ_Entry=%04d, CQ_WP=0x%08X\n",
+		addr, entry - 1, mvi->rx[0]);
+	mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
+		    mvi->rx_dma + sizeof(u32) * entry);
+#endif
+}
+
+static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	tmp = mr32(GBL_CTL);
+
+	mw32(GBL_CTL, tmp | INT_EN);
+}
+
+static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	tmp = mr32(GBL_CTL);
+
+	mw32(GBL_CTL, tmp & ~INT_EN);
+}
+
+static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
+
+/* move to PCI layer or libata core? */
+static int pci_go_64(struct pci_dev *pdev)
+{
+	int rc;
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+			if (rc) {
+				dev_printk(KERN_ERR, &pdev->dev,
+					   "64-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
+{
+	if (task->lldd_task) {
+		struct mvs_slot_info *slot;
+		slot = (struct mvs_slot_info *) task->lldd_task;
+		*tag = slot - mvi->slot_info;
+		return 1;
+	}
+	return 0;
+}
+
+static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
+{
+	void *bitmap = (void *) &mvi->tags;
+	clear_bit(tag, bitmap);
+}
+
+static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
+{
+	mvs_tag_clear(mvi, tag);
+}
+
+static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
+{
+	void *bitmap = (void *) &mvi->tags;
+	set_bit(tag, bitmap);
+}
+
+static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
+{
+	unsigned int index, tag;
+	void *bitmap = (void *) &mvi->tags;
+
+	index = find_first_zero_bit(bitmap, MVS_SLOTS);
+	tag = index;
+	if (tag >= MVS_SLOTS)
+		return -SAS_QUEUE_FULL;
+	mvs_tag_set(mvi, tag);
+	*tag_out = tag;
+	return 0;
+}
+
+static void mvs_tag_init(struct mvs_info *mvi)
+{
+	int i;
+	for (i = 0; i < MVS_SLOTS; ++i)
+		mvs_tag_clear(mvi, i);
+}
+
+#ifndef MVS_DISABLE_NVRAM
+static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
+{
+	int timeout = 1000;
+
+	if (addr & ~SPI_ADDR_MASK)
+		return -EINVAL;
+
+	writel(addr, regs + SPI_CMD);
+	writel(TWSI_RD, regs + SPI_CTL);
+
+	while (timeout-- > 0) {
+		if (readl(regs + SPI_CTL) & TWSI_RDY) {
+			*data = readl(regs + SPI_DATA);
+			return 0;
+		}
+
+		udelay(10);
+	}
+
+	return -EBUSY;
+}
+
+static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
+			    void *buf, u32 buflen)
+{
+	u32 addr_end, tmp_addr, i, j;
+	u32 tmp = 0;
+	int rc;
+	u8 *tmp8, *buf8 = buf;
+
+	addr_end = addr + buflen;
+	tmp_addr = ALIGN(addr, 4);
+	if (addr > 0xff)
+		return -EINVAL;
+
+	j = addr & 0x3;
+	if (j) {
+		rc = mvs_eep_read(regs, tmp_addr, &tmp);
+		if (rc)
+			return rc;
+
+		tmp8 = (u8 *)&tmp;
+		for (i = j; i < 4; i++)
+			*buf8++ = tmp8[i];
+
+		tmp_addr += 4;
+	}
+
+	for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
+		rc = mvs_eep_read(regs, tmp_addr, &tmp);
+		if (rc)
+			return rc;
+
+		memcpy(buf8, &tmp, 4);
+		buf8 += 4;
+	}
+
+	if (tmp_addr < addr_end) {
+		rc = mvs_eep_read(regs, tmp_addr, &tmp);
+		if (rc)
+			return rc;
+
+		tmp8 = (u8 *)&tmp;
+		j = addr_end - tmp_addr;
+		for (i = 0; i < j; i++)
+			*buf8++ = tmp8[i];
+
+		tmp_addr += 4;
+	}
+
+	return 0;
+}
+#endif
+
+static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
+			  void *buf, u32 buflen)
+{
+#ifndef MVS_DISABLE_NVRAM
+	void __iomem *regs = mvi->regs;
+	int rc, i;
+	u32 sum;
+	u8 hdr[2], *tmp;
+	const char *msg;
+
+	rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
+	if (rc) {
+		msg = "nvram hdr read failed";
+		goto err_out;
+	}
+	rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
+	if (rc) {
+		msg = "nvram read failed";
+		goto err_out;
+	}
+
+	if (hdr[0] != 0x5A) {
+		/* entry id */
+		msg = "invalid nvram entry id";
+		rc = -ENOENT;
+		goto err_out;
+	}
+
+	tmp = buf;
+	sum = ((u32)hdr[0]) + ((u32)hdr[1]);
+	for (i = 0; i < buflen; i++)
+		sum += ((u32)tmp[i]);
+
+	if (sum) {
+		msg = "nvram checksum failure";
+		rc = -EILSEQ;
+		goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
+	return rc;
+#else
+	/* FIXME , For SAS target mode */
+	memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
+	return 0;
+#endif
+}
+
+static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+{
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
+
+	if (!phy->phy_attached)
+		return;
+
+	if (sas_phy->phy) {
+		struct sas_phy *sphy = sas_phy->phy;
+
+		sphy->negotiated_linkrate = sas_phy->linkrate;
+		sphy->minimum_linkrate = phy->minimum_linkrate;
+		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+		sphy->maximum_linkrate = phy->maximum_linkrate;
+		sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
+	}
+
+	if (phy->phy_type & PORT_TYPE_SAS) {
+		struct sas_identify_frame *id;
+
+		id = (struct sas_identify_frame *)phy->frame_rcvd;
+		id->dev_type = phy->identify.device_type;
+		id->initiator_bits = SAS_PROTOCOL_ALL;
+		id->target_bits = phy->identify.target_port_protocols;
+	} else if (phy->phy_type & PORT_TYPE_SATA) {
+		/* TODO */
+	}
+	mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
+	mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
+				   PORTE_BYTES_DMAED);
+}
+
+static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	/* give the phy enabling interrupt event time to come in (1s
+	 * is empirically about all it takes) */
+	if (time < HZ)
+		return 0;
+	/* Wait for discovery to finish */
+	scsi_flush_work(shost);
+	return 1;
+}
+
+static void mvs_scan_start(struct Scsi_Host *shost)
+{
+	int i;
+	struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
+	for (i = 0; i < mvi->chip->n_phy; ++i) {
+		mvs_bytes_dmaed(mvi, i);
+	}
+}
+
+static int mvs_slave_configure(struct scsi_device *sdev)
+{
+	struct domain_device *dev = sdev_to_domain_dev(sdev);
+	int ret = sas_slave_configure(sdev);
+
+	if (ret)
+		return ret;
+
+	if (dev_is_sata(dev)) {
+		/* struct ata_port *ap = dev->sata_dev.ap; */
+		/* struct ata_device *adev = ap->link.device; */
+
+		/* clamp at no NCQ for the time being */
+		/* adev->flags |= ATA_DFLAG_NCQ_OFF; */
+		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
+	}
+	return 0;
+}
+
+static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
+{
+	struct pci_dev *pdev = mvi->pdev;
+	struct sas_ha_struct *sas_ha = &mvi->sas;
+	struct mvs_phy *phy = &mvi->phy[phy_no];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+	phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
+	/*
+	* events is port event now ,
+	* we need check the interrupt status which belongs to per port.
+	*/
+	dev_printk(KERN_DEBUG, &pdev->dev,
+		"Port %d Event = %X\n",
+		phy_no, phy->irq_status);
+
+	if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
+		mvs_release_task(mvi, phy_no);
+		if (!mvs_is_phy_ready(mvi, phy_no)) {
+			sas_phy_disconnected(sas_phy);
+			sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
+			dev_printk(KERN_INFO, &pdev->dev,
+				"Port %d Unplug Notice\n", phy_no);
+
+		} else
+			mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
+	}
+	if (!(phy->irq_status & PHYEV_DEC_ERR)) {
+		if (phy->irq_status & PHYEV_COMWAKE) {
+			u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
+			mvs_write_port_irq_mask(mvi, phy_no,
+						tmp | PHYEV_SIG_FIS);
+		}
+		if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
+			phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
+			if (phy->phy_status) {
+				mvs_detect_porttype(mvi, phy_no);
+
+				if (phy->phy_type & PORT_TYPE_SATA) {
+					u32 tmp = mvs_read_port_irq_mask(mvi,
+								phy_no);
+					tmp &= ~PHYEV_SIG_FIS;
+					mvs_write_port_irq_mask(mvi,
+								phy_no, tmp);
+				}
+
+				mvs_update_phyinfo(mvi, phy_no, 0);
+				sas_ha->notify_phy_event(sas_phy,
+							PHYE_OOB_DONE);
+				mvs_bytes_dmaed(mvi, phy_no);
+			} else {
+				dev_printk(KERN_DEBUG, &pdev->dev,
+					"plugin interrupt but phy is gone\n");
+				mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
+							NULL);
+			}
+		} else if (phy->irq_status & PHYEV_BROAD_CH) {
+			mvs_release_task(mvi, phy_no);
+			sas_ha->notify_port_event(sas_phy,
+						PORTE_BROADCAST_RCVD);
+		}
+	}
+	mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
+}
+
+static void mvs_int_sata(struct mvs_info *mvi)
+{
+	u32 tmp;
+	void __iomem *regs = mvi->regs;
+	tmp = mr32(INT_STAT_SRS);
+	mw32(INT_STAT_SRS, tmp & 0xFFFF);
+}
+
+static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
+				u32 slot_idx)
+{
+	void __iomem *regs = mvi->regs;
+	struct domain_device *dev = task->dev;
+	struct asd_sas_port *sas_port = dev->port;
+	struct mvs_port *port = mvi->slot_info[slot_idx].port;
+	u32 reg_set, phy_mask;
+
+	if (!sas_protocol_ata(task->task_proto)) {
+		reg_set = 0;
+		phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
+				sas_port->phy_mask;
+	} else {
+		reg_set = port->taskfileset;
+		phy_mask = sas_port->phy_mask;
+	}
+	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
+					(TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
+					(phy_mask << TXQ_PHY_SHIFT) |
+					(reg_set << TXQ_SRS_SHIFT));
+
+	mw32(TX_PROD_IDX, mvi->tx_prod);
+	mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+}
+
+static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
+			u32 slot_idx, int err)
+{
+	struct mvs_port *port = mvi->slot_info[slot_idx].port;
+	struct task_status_struct *tstat = &task->task_status;
+	struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
+	int stat = SAM_GOOD;
+
+	resp->frame_len = sizeof(struct dev_to_host_fis);
+	memcpy(&resp->ending_fis[0],
+	       SATA_RECEIVED_D2H_FIS(port->taskfileset),
+	       sizeof(struct dev_to_host_fis));
+	tstat->buf_valid_size = sizeof(*resp);
+	if (unlikely(err))
+		stat = SAS_PROTO_RESPONSE;
+	return stat;
+}
+
+static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
+{
+	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+	mvs_tag_clear(mvi, slot_idx);
+}
+
+static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
+			  struct mvs_slot_info *slot, u32 slot_idx)
+{
+	if (!sas_protocol_ata(task->task_proto))
+		if (slot->n_elem)
+			pci_unmap_sg(mvi->pdev, task->scatter,
+				     slot->n_elem, task->data_dir);
+
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SMP:
+		pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
+			     PCI_DMA_FROMDEVICE);
+		pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
+			     PCI_DMA_TODEVICE);
+		break;
+
+	case SAS_PROTOCOL_SATA:
+	case SAS_PROTOCOL_STP:
+	case SAS_PROTOCOL_SSP:
+	default:
+		/* do nothing */
+		break;
+	}
+	list_del(&slot->list);
+	task->lldd_task = NULL;
+	slot->task = NULL;
+	slot->port = NULL;
+}
+
+static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
+			 u32 slot_idx)
+{
+	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+	u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
+	u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
+	int stat = SAM_CHECK_COND;
+
+	if (err_dw1 & SLOT_BSY_ERR) {
+		stat = SAS_QUEUE_FULL;
+		mvs_slot_reset(mvi, task, slot_idx);
+	}
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SSP:
+		break;
+	case SAS_PROTOCOL_SMP:
+		break;
+	case SAS_PROTOCOL_SATA:
+	case SAS_PROTOCOL_STP:
+	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+		if (err_dw0 & TFILE_ERR)
+			stat = mvs_sata_done(mvi, task, slot_idx, 1);
+		break;
+	default:
+		break;
+	}
+
+	mvs_hexdump(16, (u8 *) slot->response, 0);
+	return stat;
+}
+
+static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
+{
+	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+	struct sas_task *task = slot->task;
+	struct task_status_struct *tstat;
+	struct mvs_port *port;
+	bool aborted;
+	void *to;
+
+	if (unlikely(!task || !task->lldd_task))
+		return -1;
+
+	mvs_hba_cq_dump(mvi);
+
+	spin_lock(&task->task_state_lock);
+	aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
+	if (!aborted) {
+		task->task_state_flags &=
+		    ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+		task->task_state_flags |= SAS_TASK_STATE_DONE;
+	}
+	spin_unlock(&task->task_state_lock);
+
+	if (aborted) {
+		mvs_slot_task_free(mvi, task, slot, slot_idx);
+		mvs_slot_free(mvi, rx_desc);
+		return -1;
+	}
+
+	port = slot->port;
+	tstat = &task->task_status;
+	memset(tstat, 0, sizeof(*tstat));
+	tstat->resp = SAS_TASK_COMPLETE;
+
+	if (unlikely(!port->port_attached || flags)) {
+		mvs_slot_err(mvi, task, slot_idx);
+		if (!sas_protocol_ata(task->task_proto))
+			tstat->stat = SAS_PHY_DOWN;
+		goto out;
+	}
+
+	/* error info record present */
+	if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
+		tstat->stat = mvs_slot_err(mvi, task, slot_idx);
+		goto out;
+	}
+
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SSP:
+		/* hw says status == 0, datapres == 0 */
+		if (rx_desc & RXQ_GOOD) {
+			tstat->stat = SAM_GOOD;
+			tstat->resp = SAS_TASK_COMPLETE;
+		}
+		/* response frame present */
+		else if (rx_desc & RXQ_RSP) {
+			struct ssp_response_iu *iu =
+			    slot->response + sizeof(struct mvs_err_info);
+			sas_ssp_task_response(&mvi->pdev->dev, task, iu);
+		}
+
+		/* should never happen? */
+		else
+			tstat->stat = SAM_CHECK_COND;
+		break;
+
+	case SAS_PROTOCOL_SMP: {
+			struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+			tstat->stat = SAM_GOOD;
+			to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
+			memcpy(to + sg_resp->offset,
+				slot->response + sizeof(struct mvs_err_info),
+				sg_dma_len(sg_resp));
+			kunmap_atomic(to, KM_IRQ0);
+			break;
+		}
+
+	case SAS_PROTOCOL_SATA:
+	case SAS_PROTOCOL_STP:
+	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
+			tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
+			break;
+		}
+
+	default:
+		tstat->stat = SAM_CHECK_COND;
+		break;
+	}
+
+out:
+	mvs_slot_task_free(mvi, task, slot, slot_idx);
+	if (unlikely(tstat->stat != SAS_QUEUE_FULL))
+		mvs_slot_free(mvi, rx_desc);
+
+	spin_unlock(&mvi->lock);
+	task->task_done(task);
+	spin_lock(&mvi->lock);
+	return tstat->stat;
+}
+
+static void mvs_release_task(struct mvs_info *mvi, int phy_no)
+{
+	struct list_head *pos, *n;
+	struct mvs_slot_info *slot;
+	struct mvs_phy *phy = &mvi->phy[phy_no];
+	struct mvs_port *port = phy->port;
+	u32 rx_desc;
+
+	if (!port)
+		return;
+
+	list_for_each_safe(pos, n, &port->list) {
+		slot = container_of(pos, struct mvs_slot_info, list);
+		rx_desc = (u32) (slot - mvi->slot_info);
+		mvs_slot_complete(mvi, rx_desc, 1);
+	}
+}
+
+static void mvs_int_full(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp, stat;
+	int i;
+
+	stat = mr32(INT_STAT);
+
+	mvs_int_rx(mvi, false);
+
+	for (i = 0; i < MVS_MAX_PORTS; i++) {
+		tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
+		if (tmp)
+			mvs_int_port(mvi, i, tmp);
+	}
+
+	if (stat & CINT_SRS)
+		mvs_int_sata(mvi);
+
+	mw32(INT_STAT, stat);
+}
+
+static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
+{
+	void __iomem *regs = mvi->regs;
+	u32 rx_prod_idx, rx_desc;
+	bool attn = false;
+	struct pci_dev *pdev = mvi->pdev;
+
+	/* the first dword in the RX ring is special: it contains
+	 * a mirror of the hardware's RX producer index, so that
+	 * we don't have to stall the CPU reading that register.
+	 * The actual RX ring is offset by one dword, due to this.
+	 */
+	rx_prod_idx = mvi->rx_cons;
+	mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
+	if (mvi->rx_cons == 0xfff)	/* h/w hasn't touched RX ring yet */
+		return 0;
+
+	/* The CMPL_Q may come late, read from register and try again
+	* note: if coalescing is enabled,
+	* it will need to read from register every time for sure
+	*/
+	if (mvi->rx_cons == rx_prod_idx)
+		mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
+
+	if (mvi->rx_cons == rx_prod_idx)
+		return 0;
+
+	while (mvi->rx_cons != rx_prod_idx) {
+
+		/* increment our internal RX consumer pointer */
+		rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
+
+		rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
+
+		if (likely(rx_desc & RXQ_DONE))
+			mvs_slot_complete(mvi, rx_desc, 0);
+		if (rx_desc & RXQ_ATTN) {
+			attn = true;
+			dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
+				rx_desc);
+		} else if (rx_desc & RXQ_ERR) {
+			if (!(rx_desc & RXQ_DONE))
+				mvs_slot_complete(mvi, rx_desc, 0);
+			dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
+				rx_desc);
+		} else if (rx_desc & RXQ_SLOT_RESET) {
+			dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
+				rx_desc);
+			mvs_slot_free(mvi, rx_desc);
+		}
+	}
+
+	if (attn && self_clear)
+		mvs_int_full(mvi);
+
+	return 0;
+}
+
+#ifdef MVS_USE_TASKLET
+static void mvs_tasklet(unsigned long data)
+{
+	struct mvs_info *mvi = (struct mvs_info *) data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mvi->lock, flags);
+
+#ifdef MVS_DISABLE_MSI
+	mvs_int_full(mvi);
+#else
+	mvs_int_rx(mvi, true);
+#endif
+	spin_unlock_irqrestore(&mvi->lock, flags);
+}
+#endif
+
+static irqreturn_t mvs_interrupt(int irq, void *opaque)
+{
+	struct mvs_info *mvi = opaque;
+	void __iomem *regs = mvi->regs;
+	u32 stat;
+
+	stat = mr32(GBL_INT_STAT);
+
+	if (stat == 0 || stat == 0xffffffff)
+		return IRQ_NONE;
+
+	/* clear CMD_CMPLT ASAP */
+	mw32_f(INT_STAT, CINT_DONE);
+
+#ifndef MVS_USE_TASKLET
+	spin_lock(&mvi->lock);
+
+	mvs_int_full(mvi);
+
+	spin_unlock(&mvi->lock);
+#else
+	tasklet_schedule(&mvi->tasklet);
+#endif
+	return IRQ_HANDLED;
+}
+
+#ifndef MVS_DISABLE_MSI
+static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
+{
+	struct mvs_info *mvi = opaque;
+
+#ifndef MVS_USE_TASKLET
+	spin_lock(&mvi->lock);
+
+	mvs_int_rx(mvi, true);
+
+	spin_unlock(&mvi->lock);
+#else
+	tasklet_schedule(&mvi->tasklet);
+#endif
+	return IRQ_HANDLED;
+}
+#endif
+
+struct mvs_task_exec_info {
+	struct sas_task *task;
+	struct mvs_cmd_hdr *hdr;
+	struct mvs_port *port;
+	u32 tag;
+	int n_elem;
+};
+
+static int mvs_task_prep_smp(struct mvs_info *mvi,
+			     struct mvs_task_exec_info *tei)
+{
+	int elem, rc, i;
+	struct sas_task *task = tei->task;
+	struct mvs_cmd_hdr *hdr = tei->hdr;
+	struct scatterlist *sg_req, *sg_resp;
+	u32 req_len, resp_len, tag = tei->tag;
+	void *buf_tmp;
+	u8 *buf_oaf;
+	dma_addr_t buf_tmp_dma;
+	struct mvs_prd *buf_prd;
+	struct scatterlist *sg;
+	struct mvs_slot_info *slot = &mvi->slot_info[tag];
+	struct asd_sas_port *sas_port = task->dev->port;
+	u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#if _MV_DUMP
+	u8 *buf_cmd;
+	void *from;
+#endif
+	/*
+	 * DMA-map SMP request, response buffers
+	 */
+	sg_req = &task->smp_task.smp_req;
+	elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
+	if (!elem)
+		return -ENOMEM;
+	req_len = sg_dma_len(sg_req);
+
+	sg_resp = &task->smp_task.smp_resp;
+	elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+	if (!elem) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	resp_len = sg_dma_len(sg_resp);
+
+	/* must be in dwords */
+	if ((req_len & 0x3) || (resp_len & 0x3)) {
+		rc = -EINVAL;
+		goto err_out_2;
+	}
+
+	/*
+	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+	 */
+
+	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
+	buf_tmp = slot->buf;
+	buf_tmp_dma = slot->buf_dma;
+
+#if _MV_DUMP
+	buf_cmd = buf_tmp;
+	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+	buf_tmp += req_len;
+	buf_tmp_dma += req_len;
+	slot->cmd_size = req_len;
+#else
+	hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
+#endif
+
+	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+	buf_oaf = buf_tmp;
+	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_OAF_SZ;
+	buf_tmp_dma += MVS_OAF_SZ;
+
+	/* region 3: PRD table ********************************************* */
+	buf_prd = buf_tmp;
+	if (tei->n_elem)
+		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+	else
+		hdr->prd_tbl = 0;
+
+	i = sizeof(struct mvs_prd) * tei->n_elem;
+	buf_tmp += i;
+	buf_tmp_dma += i;
+
+	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+	slot->response = buf_tmp;
+	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+
+	/*
+	 * Fill in TX ring and command slot header
+	 */
+	slot->tx = mvi->tx_prod;
+	mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
+					TXQ_MODE_I | tag |
+					(sas_port->phy_mask << TXQ_PHY_SHIFT));
+
+	hdr->flags |= flags;
+	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
+	hdr->tags = cpu_to_le32(tag);
+	hdr->data_len = 0;
+
+	/* generate open address frame hdr (first 12 bytes) */
+	buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
+	buf_oaf[1] = task->dev->linkrate & 0xf;
+	*(u16 *)(buf_oaf + 2) = 0xFFFF;		/* SAS SPEC */
+	memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
+
+	/* fill in PRD (scatter/gather) table, if any */
+	for_each_sg(task->scatter, sg, tei->n_elem, i) {
+		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
+		buf_prd++;
+	}
+
+#if _MV_DUMP
+	/* copy cmd table */
+	from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
+	memcpy(buf_cmd, from + sg_req->offset, req_len);
+	kunmap_atomic(from, KM_IRQ0);
+#endif
+	return 0;
+
+err_out_2:
+	pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
+		     PCI_DMA_FROMDEVICE);
+err_out:
+	pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
+		     PCI_DMA_TODEVICE);
+	return rc;
+}
+
+static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp, offs;
+	u8 *tfs = &port->taskfileset;
+
+	if (*tfs == MVS_ID_NOT_MAPPED)
+		return;
+
+	offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+	if (*tfs < 16) {
+		tmp = mr32(PCS);
+		mw32(PCS, tmp & ~offs);
+	} else {
+		tmp = mr32(CTL);
+		mw32(CTL, tmp & ~offs);
+	}
+
+	tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
+	if (tmp)
+		mw32(INT_STAT_SRS, tmp);
+
+	*tfs = MVS_ID_NOT_MAPPED;
+}
+
+static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
+{
+	int i;
+	u32 tmp, offs;
+	void __iomem *regs = mvi->regs;
+
+	if (port->taskfileset != MVS_ID_NOT_MAPPED)
+		return 0;
+
+	tmp = mr32(PCS);
+
+	for (i = 0; i < mvi->chip->srs_sz; i++) {
+		if (i == 16)
+			tmp = mr32(CTL);
+		offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+		if (!(tmp & offs)) {
+			port->taskfileset = i;
+
+			if (i < 16)
+				mw32(PCS, tmp | offs);
+			else
+				mw32(CTL, tmp | offs);
+			tmp = mr32(INT_STAT_SRS) & (1U << i);
+			if (tmp)
+				mw32(INT_STAT_SRS, tmp);
+			return 0;
+		}
+	}
+	return MVS_ID_NOT_MAPPED;
+}
+
+static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+	struct ata_queued_cmd *qc = task->uldd_task;
+
+	if (qc) {
+		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+			qc->tf.command == ATA_CMD_FPDMA_READ) {
+			*tag = qc->tag;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static int mvs_task_prep_ata(struct mvs_info *mvi,
+			     struct mvs_task_exec_info *tei)
+{
+	struct sas_task *task = tei->task;
+	struct domain_device *dev = task->dev;
+	struct mvs_cmd_hdr *hdr = tei->hdr;
+	struct asd_sas_port *sas_port = dev->port;
+	struct mvs_slot_info *slot;
+	struct scatterlist *sg;
+	struct mvs_prd *buf_prd;
+	struct mvs_port *port = tei->port;
+	u32 tag = tei->tag;
+	u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+	void *buf_tmp;
+	u8 *buf_cmd, *buf_oaf;
+	dma_addr_t buf_tmp_dma;
+	u32 i, req_len, resp_len;
+	const u32 max_resp_len = SB_RFB_MAX;
+
+	if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
+		return -EBUSY;
+
+	slot = &mvi->slot_info[tag];
+	slot->tx = mvi->tx_prod;
+	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
+					(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+					(sas_port->phy_mask << TXQ_PHY_SHIFT) |
+					(port->taskfileset << TXQ_SRS_SHIFT));
+
+	if (task->ata_task.use_ncq)
+		flags |= MCH_FPDMA;
+	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
+		if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
+			flags |= MCH_ATAPI;
+	}
+
+	/* FIXME: fill in port multiplier number */
+
+	hdr->flags = cpu_to_le32(flags);
+
+	/* FIXME: the low order order 5 bits for the TAG if enable NCQ */
+	if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
+		task->ata_task.fis.sector_count |= hdr->tags << 3;
+	else
+		hdr->tags = cpu_to_le32(tag);
+	hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+	/*
+	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+	 */
+
+	/* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
+	buf_cmd = buf_tmp = slot->buf;
+	buf_tmp_dma = slot->buf_dma;
+
+	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_ATA_CMD_SZ;
+	buf_tmp_dma += MVS_ATA_CMD_SZ;
+#if _MV_DUMP
+	slot->cmd_size = MVS_ATA_CMD_SZ;
+#endif
+
+	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+	/* used for STP.  unused for SATA? */
+	buf_oaf = buf_tmp;
+	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_OAF_SZ;
+	buf_tmp_dma += MVS_OAF_SZ;
+
+	/* region 3: PRD table ********************************************* */
+	buf_prd = buf_tmp;
+	if (tei->n_elem)
+		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+	else
+		hdr->prd_tbl = 0;
+
+	i = sizeof(struct mvs_prd) * tei->n_elem;
+	buf_tmp += i;
+	buf_tmp_dma += i;
+
+	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+	/* FIXME: probably unused, for SATA.  kept here just in case
+	 * we get a STP/SATA error information record
+	 */
+	slot->response = buf_tmp;
+	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+
+	req_len = sizeof(struct host_to_dev_fis);
+	resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
+	    sizeof(struct mvs_err_info) - i;
+
+	/* request, response lengths */
+	resp_len = min(resp_len, max_resp_len);
+	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+	task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+	/* fill in command FIS and ATAPI CDB */
+	memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
+		memcpy(buf_cmd + STP_ATAPI_CMD,
+			task->ata_task.atapi_packet, 16);
+
+	/* generate open address frame hdr (first 12 bytes) */
+	buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1;	/* initiator, STP, ftype 1h */
+	buf_oaf[1] = task->dev->linkrate & 0xf;
+	*(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
+	memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
+
+	/* fill in PRD (scatter/gather) table, if any */
+	for_each_sg(task->scatter, sg, tei->n_elem, i) {
+		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
+		buf_prd++;
+	}
+
+	return 0;
+}
+
+static int mvs_task_prep_ssp(struct mvs_info *mvi,
+			     struct mvs_task_exec_info *tei)
+{
+	struct sas_task *task = tei->task;
+	struct mvs_cmd_hdr *hdr = tei->hdr;
+	struct mvs_port *port = tei->port;
+	struct mvs_slot_info *slot;
+	struct scatterlist *sg;
+	struct mvs_prd *buf_prd;
+	struct ssp_frame_hdr *ssp_hdr;
+	void *buf_tmp;
+	u8 *buf_cmd, *buf_oaf, fburst = 0;
+	dma_addr_t buf_tmp_dma;
+	u32 flags;
+	u32 resp_len, req_len, i, tag = tei->tag;
+	const u32 max_resp_len = SB_RFB_MAX;
+	u8 phy_mask;
+
+	slot = &mvi->slot_info[tag];
+
+	phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
+		task->dev->port->phy_mask;
+	slot->tx = mvi->tx_prod;
+	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
+				(TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
+				(phy_mask << TXQ_PHY_SHIFT));
+
+	flags = MCH_RETRY;
+	if (task->ssp_task.enable_first_burst) {
+		flags |= MCH_FBURST;
+		fburst = (1 << 7);
+	}
+	hdr->flags = cpu_to_le32(flags |
+				 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
+				 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
+
+	hdr->tags = cpu_to_le32(tag);
+	hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+	/*
+	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+	 */
+
+	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
+	buf_cmd = buf_tmp = slot->buf;
+	buf_tmp_dma = slot->buf_dma;
+
+	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_SSP_CMD_SZ;
+	buf_tmp_dma += MVS_SSP_CMD_SZ;
+#if _MV_DUMP
+	slot->cmd_size = MVS_SSP_CMD_SZ;
+#endif
+
+	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+	buf_oaf = buf_tmp;
+	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_OAF_SZ;
+	buf_tmp_dma += MVS_OAF_SZ;
+
+	/* region 3: PRD table ********************************************* */
+	buf_prd = buf_tmp;
+	if (tei->n_elem)
+		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+	else
+		hdr->prd_tbl = 0;
+
+	i = sizeof(struct mvs_prd) * tei->n_elem;
+	buf_tmp += i;
+	buf_tmp_dma += i;
+
+	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+	slot->response = buf_tmp;
+	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+
+	resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
+	    sizeof(struct mvs_err_info) - i;
+	resp_len = min(resp_len, max_resp_len);
+
+	req_len = sizeof(struct ssp_frame_hdr) + 28;
+
+	/* request, response lengths */
+	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+	/* generate open address frame hdr (first 12 bytes) */
+	buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1;	/* initiator, SSP, ftype 1h */
+	buf_oaf[1] = task->dev->linkrate & 0xf;
+	*(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
+	memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
+
+	/* fill in SSP frame header (Command Table.SSP frame header) */
+	ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
+	ssp_hdr->frame_type = SSP_COMMAND;
+	memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
+	       HASHED_SAS_ADDR_SIZE);
+	memcpy(ssp_hdr->hashed_src_addr,
+	       task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+	ssp_hdr->tag = cpu_to_be16(tag);
+
+	/* fill in command frame IU */
+	buf_cmd += sizeof(*ssp_hdr);
+	memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+	buf_cmd[9] = fburst | task->ssp_task.task_attr |
+			(task->ssp_task.task_prio << 3);
+	memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
+
+	/* fill in PRD (scatter/gather) table, if any */
+	for_each_sg(task->scatter, sg, tei->n_elem, i) {
+		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
+		buf_prd++;
+	}
+
+	return 0;
+}
+
+static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
+{
+	struct domain_device *dev = task->dev;
+	struct mvs_info *mvi = dev->port->ha->lldd_ha;
+	struct pci_dev *pdev = mvi->pdev;
+	void __iomem *regs = mvi->regs;
+	struct mvs_task_exec_info tei;
+	struct sas_task *t = task;
+	struct mvs_slot_info *slot;
+	u32 tag = 0xdeadbeef, rc, n_elem = 0;
+	unsigned long flags;
+	u32 n = num, pass = 0;
+
+	spin_lock_irqsave(&mvi->lock, flags);
+	do {
+		dev = t->dev;
+		tei.port = &mvi->port[dev->port->id];
+
+		if (!tei.port->port_attached) {
+			if (sas_protocol_ata(t->task_proto)) {
+				rc = SAS_PHY_DOWN;
+				goto out_done;
+			} else {
+				struct task_status_struct *ts = &t->task_status;
+				ts->resp = SAS_TASK_UNDELIVERED;
+				ts->stat = SAS_PHY_DOWN;
+				t->task_done(t);
+				if (n > 1)
+					t = list_entry(t->list.next,
+							struct sas_task, list);
+				continue;
+			}
+		}
+
+		if (!sas_protocol_ata(t->task_proto)) {
+			if (t->num_scatter) {
+				n_elem = pci_map_sg(mvi->pdev, t->scatter,
+						    t->num_scatter,
+						    t->data_dir);
+				if (!n_elem) {
+					rc = -ENOMEM;
+					goto err_out;
+				}
+			}
+		} else {
+			n_elem = t->num_scatter;
+		}
+
+		rc = mvs_tag_alloc(mvi, &tag);
+		if (rc)
+			goto err_out;
+
+		slot = &mvi->slot_info[tag];
+		t->lldd_task = NULL;
+		slot->n_elem = n_elem;
+		memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+		tei.task = t;
+		tei.hdr = &mvi->slot[tag];
+		tei.tag = tag;
+		tei.n_elem = n_elem;
+
+		switch (t->task_proto) {
+		case SAS_PROTOCOL_SMP:
+			rc = mvs_task_prep_smp(mvi, &tei);
+			break;
+		case SAS_PROTOCOL_SSP:
+			rc = mvs_task_prep_ssp(mvi, &tei);
+			break;
+		case SAS_PROTOCOL_SATA:
+		case SAS_PROTOCOL_STP:
+		case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+			rc = mvs_task_prep_ata(mvi, &tei);
+			break;
+		default:
+			dev_printk(KERN_ERR, &pdev->dev,
+				"unknown sas_task proto: 0x%x\n",
+				t->task_proto);
+			rc = -EINVAL;
+			break;
+		}
+
+		if (rc)
+			goto err_out_tag;
+
+		slot->task = t;
+		slot->port = tei.port;
+		t->lldd_task = (void *) slot;
+		list_add_tail(&slot->list, &slot->port->list);
+		/* TODO: select normal or high priority */
+
+		spin_lock(&t->task_state_lock);
+		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
+		spin_unlock(&t->task_state_lock);
+
+		mvs_hba_memory_dump(mvi, tag, t->task_proto);
+
+		++pass;
+		mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+		if (n > 1)
+			t = list_entry(t->list.next, struct sas_task, list);
+	} while (--n);
+
+	rc = 0;
+	goto out_done;
+
+err_out_tag:
+	mvs_tag_free(mvi, tag);
+err_out:
+	dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
+	if (!sas_protocol_ata(t->task_proto))
+		if (n_elem)
+			pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
+				     t->data_dir);
+out_done:
+	if (pass)
+		mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
+	spin_unlock_irqrestore(&mvi->lock, flags);
+	return rc;
+}
+
+static int mvs_task_abort(struct sas_task *task)
+{
+	int rc;
+	unsigned long flags;
+	struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
+	struct pci_dev *pdev = mvi->pdev;
+	int tag;
+
+	spin_lock_irqsave(&task->task_state_lock, flags);
+	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+		rc = TMF_RESP_FUNC_COMPLETE;
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+		goto out_done;
+	}
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SMP:
+		dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
+		break;
+	case SAS_PROTOCOL_SSP:
+		dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
+		break;
+	case SAS_PROTOCOL_SATA:
+	case SAS_PROTOCOL_STP:
+	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
+		dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
+#if _MV_DUMP
+		dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
+		mvs_hexdump(sizeof(struct host_to_dev_fis),
+				(void *)&task->ata_task.fis, 0);
+		dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
+		mvs_hexdump(16, task->ata_task.atapi_packet, 0);
+#endif
+		spin_lock_irqsave(&task->task_state_lock, flags);
+		if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
+			/* TODO */
+			;
+		}
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+		break;
+	}
+	default:
+		break;
+	}
+
+	if (mvs_find_tag(mvi, task, &tag)) {
+		spin_lock_irqsave(&mvi->lock, flags);
+		mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
+		spin_unlock_irqrestore(&mvi->lock, flags);
+	}
+	if (!mvs_task_exec(task, 1, GFP_ATOMIC))
+		rc = TMF_RESP_FUNC_COMPLETE;
+	else
+		rc = TMF_RESP_FUNC_FAILED;
+out_done:
+	return rc;
+}
+
+static void mvs_free(struct mvs_info *mvi)
+{
+	int i;
+
+	if (!mvi)
+		return;
+
+	for (i = 0; i < MVS_SLOTS; i++) {
+		struct mvs_slot_info *slot = &mvi->slot_info[i];
+
+		if (slot->buf)
+			dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
+					  slot->buf, slot->buf_dma);
+	}
+
+	if (mvi->tx)
+		dma_free_coherent(&mvi->pdev->dev,
+				  sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+				  mvi->tx, mvi->tx_dma);
+	if (mvi->rx_fis)
+		dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
+				  mvi->rx_fis, mvi->rx_fis_dma);
+	if (mvi->rx)
+		dma_free_coherent(&mvi->pdev->dev,
+				  sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+				  mvi->rx, mvi->rx_dma);
+	if (mvi->slot)
+		dma_free_coherent(&mvi->pdev->dev,
+				  sizeof(*mvi->slot) * MVS_SLOTS,
+				  mvi->slot, mvi->slot_dma);
+#ifdef MVS_ENABLE_PERI
+	if (mvi->peri_regs)
+		iounmap(mvi->peri_regs);
+#endif
+	if (mvi->regs)
+		iounmap(mvi->regs);
+	if (mvi->shost)
+		scsi_host_put(mvi->shost);
+	kfree(mvi->sas.sas_port);
+	kfree(mvi->sas.sas_phy);
+	kfree(mvi);
+}
+
+/* FIXME: locking? */
+static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+			   void *funcdata)
+{
+	struct mvs_info *mvi = sas_phy->ha->lldd_ha;
+	int rc = 0, phy_id = sas_phy->id;
+	u32 tmp;
+
+	tmp = mvs_read_phy_ctl(mvi, phy_id);
+
+	switch (func) {
+	case PHY_FUNC_SET_LINK_RATE:{
+			struct sas_phy_linkrates *rates = funcdata;
+			u32 lrmin = 0, lrmax = 0;
+
+			lrmin = (rates->minimum_linkrate << 8);
+			lrmax = (rates->maximum_linkrate << 12);
+
+			if (lrmin) {
+				tmp &= ~(0xf << 8);
+				tmp |= lrmin;
+			}
+			if (lrmax) {
+				tmp &= ~(0xf << 12);
+				tmp |= lrmax;
+			}
+			mvs_write_phy_ctl(mvi, phy_id, tmp);
+			break;
+		}
+
+	case PHY_FUNC_HARD_RESET:
+		if (tmp & PHY_RST_HARD)
+			break;
+		mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
+		break;
+
+	case PHY_FUNC_LINK_RESET:
+		mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
+		break;
+
+	case PHY_FUNC_DISABLE:
+	case PHY_FUNC_RELEASE_SPINUP_HOLD:
+	default:
+		rc = -EOPNOTSUPP;
+	}
+
+	return rc;
+}
+
+static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
+{
+	struct mvs_phy *phy = &mvi->phy[phy_id];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+	sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
+	sas_phy->class = SAS;
+	sas_phy->iproto = SAS_PROTOCOL_ALL;
+	sas_phy->tproto = 0;
+	sas_phy->type = PHY_TYPE_PHYSICAL;
+	sas_phy->role = PHY_ROLE_INITIATOR;
+	sas_phy->oob_mode = OOB_NOT_CONNECTED;
+	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
+
+	sas_phy->id = phy_id;
+	sas_phy->sas_addr = &mvi->sas_addr[0];
+	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
+	sas_phy->ha = &mvi->sas;
+	sas_phy->lldd_phy = phy;
+}
+
+static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
+					    const struct pci_device_id *ent)
+{
+	struct mvs_info *mvi;
+	unsigned long res_start, res_len, res_flag;
+	struct asd_sas_phy **arr_phy;
+	struct asd_sas_port **arr_port;
+	const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
+	int i;
+
+	/*
+	 * alloc and init our per-HBA mvs_info struct
+	 */
+
+	mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
+	if (!mvi)
+		return NULL;
+
+	spin_lock_init(&mvi->lock);
+#ifdef MVS_USE_TASKLET
+	tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
+#endif
+	mvi->pdev = pdev;
+	mvi->chip = chip;
+
+	if (pdev->device == 0x6440 && pdev->revision == 0)
+		mvi->flags |= MVF_PHY_PWR_FIX;
+
+	/*
+	 * alloc and init SCSI, SAS glue
+	 */
+
+	mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
+	if (!mvi->shost)
+		goto err_out;
+
+	arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
+	arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
+	if (!arr_phy || !arr_port)
+		goto err_out;
+
+	for (i = 0; i < MVS_MAX_PHYS; i++) {
+		mvs_phy_init(mvi, i);
+		arr_phy[i] = &mvi->phy[i].sas_phy;
+		arr_port[i] = &mvi->port[i].sas_port;
+		mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
+		mvi->port[i].wide_port_phymap = 0;
+		mvi->port[i].port_attached = 0;
+		INIT_LIST_HEAD(&mvi->port[i].list);
+	}
+
+	SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
+	mvi->shost->transportt = mvs_stt;
+	mvi->shost->max_id = 21;
+	mvi->shost->max_lun = ~0;
+	mvi->shost->max_channel = 0;
+	mvi->shost->max_cmd_len = 16;
+
+	mvi->sas.sas_ha_name = DRV_NAME;
+	mvi->sas.dev = &pdev->dev;
+	mvi->sas.lldd_module = THIS_MODULE;
+	mvi->sas.sas_addr = &mvi->sas_addr[0];
+	mvi->sas.sas_phy = arr_phy;
+	mvi->sas.sas_port = arr_port;
+	mvi->sas.num_phys = chip->n_phy;
+	mvi->sas.lldd_max_execute_num = 1;
+	mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
+	mvi->shost->can_queue = MVS_CAN_QUEUE;
+	mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
+	mvi->sas.lldd_ha = mvi;
+	mvi->sas.core.shost = mvi->shost;
+
+	mvs_tag_init(mvi);
+
+	/*
+	 * ioremap main and peripheral registers
+	 */
+
+#ifdef MVS_ENABLE_PERI
+	res_start = pci_resource_start(pdev, 2);
+	res_len = pci_resource_len(pdev, 2);
+	if (!res_start || !res_len)
+		goto err_out;
+
+	mvi->peri_regs = ioremap_nocache(res_start, res_len);
+	if (!mvi->peri_regs)
+		goto err_out;
+#endif
+
+	res_start = pci_resource_start(pdev, 4);
+	res_len = pci_resource_len(pdev, 4);
+	if (!res_start || !res_len)
+		goto err_out;
+
+	res_flag = pci_resource_flags(pdev, 4);
+	if (res_flag & IORESOURCE_CACHEABLE)
+		mvi->regs = ioremap(res_start, res_len);
+	else
+		mvi->regs = ioremap_nocache(res_start, res_len);
+
+	if (!mvi->regs)
+		goto err_out;
+
+	/*
+	 * alloc and init our DMA areas
+	 */
+
+	mvi->tx = dma_alloc_coherent(&pdev->dev,
+				     sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+				     &mvi->tx_dma, GFP_KERNEL);
+	if (!mvi->tx)
+		goto err_out;
+	memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
+
+	mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
+					 &mvi->rx_fis_dma, GFP_KERNEL);
+	if (!mvi->rx_fis)
+		goto err_out;
+	memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
+
+	mvi->rx = dma_alloc_coherent(&pdev->dev,
+				     sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+				     &mvi->rx_dma, GFP_KERNEL);
+	if (!mvi->rx)
+		goto err_out;
+	memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
+
+	mvi->rx[0] = cpu_to_le32(0xfff);
+	mvi->rx_cons = 0xfff;
+
+	mvi->slot = dma_alloc_coherent(&pdev->dev,
+				       sizeof(*mvi->slot) * MVS_SLOTS,
+				       &mvi->slot_dma, GFP_KERNEL);
+	if (!mvi->slot)
+		goto err_out;
+	memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
+
+	for (i = 0; i < MVS_SLOTS; i++) {
+		struct mvs_slot_info *slot = &mvi->slot_info[i];
+
+		slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
+					       &slot->buf_dma, GFP_KERNEL);
+		if (!slot->buf)
+			goto err_out;
+		memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+	}
+
+	/* finally, read NVRAM to get our SAS address */
+	if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
+		goto err_out;
+	return mvi;
+
+err_out:
+	mvs_free(mvi);
+	return NULL;
+}
+
+static u32 mvs_cr32(void __iomem *regs, u32 addr)
+{
+	mw32(CMD_ADDR, addr);
+	return mr32(CMD_DATA);
+}
+
+static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
+{
+	mw32(CMD_ADDR, addr);
+	mw32(CMD_DATA, val);
+}
+
+static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
+{
+	void __iomem *regs = mvi->regs;
+	return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
+		mr32(P4_SER_CTLSTAT + (port - 4) * 4);
+}
+
+static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
+{
+	void __iomem *regs = mvi->regs;
+	if (port < 4)
+		mw32(P0_SER_CTLSTAT + port * 4, val);
+	else
+		mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
+}
+
+static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
+{
+	void __iomem *regs = mvi->regs + off;
+	void __iomem *regs2 = mvi->regs + off2;
+	return (port < 4)?readl(regs + port * 8):
+		readl(regs2 + (port - 4) * 8);
+}
+
+static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
+				u32 port, u32 val)
+{
+	void __iomem *regs = mvi->regs + off;
+	void __iomem *regs2 = mvi->regs + off2;
+	if (port < 4)
+		writel(val, regs + port * 8);
+	else
+		writel(val, regs2 + (port - 4) * 8);
+}
+
+static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
+{
+	return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
+}
+
+static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
+{
+	mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
+}
+
+static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
+{
+	mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
+}
+
+static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
+{
+	return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
+}
+
+static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
+{
+	mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
+}
+
+static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
+{
+	mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
+}
+
+static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
+{
+	return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
+}
+
+static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
+{
+	mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
+}
+
+static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
+{
+	return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
+}
+
+static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
+{
+	mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
+}
+
+static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	/* workaround for SATA R-ERR, to ignore phy glitch */
+	tmp = mvs_cr32(regs, CMD_PHY_TIMER);
+	tmp &= ~(1 << 9);
+	tmp |= (1 << 10);
+	mvs_cw32(regs, CMD_PHY_TIMER, tmp);
+
+	/* enable retry 127 times */
+	mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
+
+	/* extend open frame timeout to max */
+	tmp = mvs_cr32(regs, CMD_SAS_CTL0);
+	tmp &= ~0xffff;
+	tmp |= 0x3fff;
+	mvs_cw32(regs, CMD_SAS_CTL0, tmp);
+
+	/* workaround for WDTIMEOUT , set to 550 ms */
+	mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
+
+	/* not to halt for different port op during wideport link change */
+	mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
+
+	/* workaround for Seagate disk not-found OOB sequence, recv
+	 * COMINIT before sending out COMWAKE */
+	tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
+	tmp &= 0x0000ffff;
+	tmp |= 0x00fa0000;
+	mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
+
+	tmp = mvs_cr32(regs, CMD_PHY_TIMER);
+	tmp &= 0x1fffffff;
+	tmp |= (2U << 29);	/* 8 ms retry */
+	mvs_cw32(regs, CMD_PHY_TIMER, tmp);
+
+	/* TEST - for phy decoding error, adjust voltage levels */
+	mw32(P0_VSR_ADDR + 0, 0x8);
+	mw32(P0_VSR_DATA + 0, 0x2F0);
+
+	mw32(P0_VSR_ADDR + 8, 0x8);
+	mw32(P0_VSR_DATA + 8, 0x2F0);
+
+	mw32(P0_VSR_ADDR + 16, 0x8);
+	mw32(P0_VSR_DATA + 16, 0x2F0);
+
+	mw32(P0_VSR_ADDR + 24, 0x8);
+	mw32(P0_VSR_DATA + 24, 0x2F0);
+
+}
+
+static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	tmp = mr32(PCS);
+	if (mvi->chip->n_phy <= 4)
+		tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
+	else
+		tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
+	mw32(PCS, tmp);
+}
+
+static void mvs_detect_porttype(struct mvs_info *mvi, int i)
+{
+	void __iomem *regs = mvi->regs;
+	u32 reg;
+	struct mvs_phy *phy = &mvi->phy[i];
+
+	/* TODO check & save device type */
+	reg = mr32(GBL_PORT_TYPE);
+
+	if (reg & MODE_SAS_SATA & (1 << i))
+		phy->phy_type |= PORT_TYPE_SAS;
+	else
+		phy->phy_type |= PORT_TYPE_SATA;
+}
+
+static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
+{
+	u32 *s = (u32 *) buf;
+
+	if (!s)
+		return NULL;
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
+	s[3] = mvs_read_port_cfg_data(mvi, i);
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
+	s[2] = mvs_read_port_cfg_data(mvi, i);
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
+	s[1] = mvs_read_port_cfg_data(mvi, i);
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
+	s[0] = mvs_read_port_cfg_data(mvi, i);
+
+	return (void *)s;
+}
+
+static u32 mvs_is_sig_fis_received(u32 irq_status)
+{
+	return irq_status & PHYEV_SIG_FIS;
+}
+
+static void mvs_update_wideport(struct mvs_info *mvi, int i)
+{
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct mvs_port *port = phy->port;
+	int j, no;
+
+	for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
+		if (no & 1) {
+			mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
+			mvs_write_port_cfg_data(mvi, no,
+						port->wide_port_phymap);
+		} else {
+			mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
+			mvs_write_port_cfg_data(mvi, no, 0);
+		}
+}
+
+static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
+{
+	u32 tmp;
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct mvs_port *port = phy->port;;
+
+	tmp = mvs_read_phy_ctl(mvi, i);
+
+	if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
+		if (!port)
+			phy->phy_attached = 1;
+		return tmp;
+	}
+
+	if (port) {
+		if (phy->phy_type & PORT_TYPE_SAS) {
+			port->wide_port_phymap &= ~(1U << i);
+			if (!port->wide_port_phymap)
+				port->port_attached = 0;
+			mvs_update_wideport(mvi, i);
+		} else if (phy->phy_type & PORT_TYPE_SATA)
+			port->port_attached = 0;
+		mvs_free_reg_set(mvi, phy->port);
+		phy->port = NULL;
+		phy->phy_attached = 0;
+		phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+	}
+	return 0;
+}
+
+static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
+					int get_st)
+{
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct pci_dev *pdev = mvi->pdev;
+	u32 tmp;
+	u64 tmp64;
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
+	phy->dev_info = mvs_read_port_cfg_data(mvi, i);
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
+	phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
+	phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
+
+	if (get_st) {
+		phy->irq_status = mvs_read_port_irq_stat(mvi, i);
+		phy->phy_status = mvs_is_phy_ready(mvi, i);
+	}
+
+	if (phy->phy_status) {
+		u32 phy_st;
+		struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
+
+		mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
+		phy_st = mvs_read_port_cfg_data(mvi, i);
+
+		sas_phy->linkrate =
+			(phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+				PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+		phy->minimum_linkrate =
+			(phy->phy_status &
+				PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
+		phy->maximum_linkrate =
+			(phy->phy_status &
+				PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
+
+		if (phy->phy_type & PORT_TYPE_SAS) {
+			/* Updated attached_sas_addr */
+			mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
+			phy->att_dev_sas_addr =
+				(u64) mvs_read_port_cfg_data(mvi, i) << 32;
+			mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
+			phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
+			mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
+			phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
+			phy->identify.device_type =
+			    phy->att_dev_info & PORT_DEV_TYPE_MASK;
+
+			if (phy->identify.device_type == SAS_END_DEV)
+				phy->identify.target_port_protocols =
+							SAS_PROTOCOL_SSP;
+			else if (phy->identify.device_type != NO_DEVICE)
+				phy->identify.target_port_protocols =
+							SAS_PROTOCOL_SMP;
+			if (phy_st & PHY_OOB_DTCTD)
+				sas_phy->oob_mode = SAS_OOB_MODE;
+			phy->frame_rcvd_size =
+			    sizeof(struct sas_identify_frame);
+		} else if (phy->phy_type & PORT_TYPE_SATA) {
+			phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
+			if (mvs_is_sig_fis_received(phy->irq_status)) {
+				phy->att_dev_sas_addr = i;	/* temp */
+				if (phy_st & PHY_OOB_DTCTD)
+					sas_phy->oob_mode = SATA_OOB_MODE;
+				phy->frame_rcvd_size =
+				    sizeof(struct dev_to_host_fis);
+				mvs_get_d2h_reg(mvi, i,
+						(void *)sas_phy->frame_rcvd);
+			} else {
+				dev_printk(KERN_DEBUG, &pdev->dev,
+					"No sig fis\n");
+				phy->phy_type &= ~(PORT_TYPE_SATA);
+				goto out_done;
+			}
+		}
+		tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
+		memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
+
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			"phy[%d] Get Attached Address 0x%llX ,"
+			" SAS Address 0x%llX\n",
+			i,
+			(unsigned long long)phy->att_dev_sas_addr,
+			(unsigned long long)phy->dev_sas_addr);
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			"Rate = %x , type = %d\n",
+			sas_phy->linkrate, phy->phy_type);
+
+		/* workaround for HW phy decoding error on 1.5g disk drive */
+		mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
+		tmp = mvs_read_port_vsr_data(mvi, i);
+		if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+		     PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
+			SAS_LINK_RATE_1_5_GBPS)
+			tmp &= ~PHY_MODE6_LATECLK;
+		else
+			tmp |= PHY_MODE6_LATECLK;
+		mvs_write_port_vsr_data(mvi, i, tmp);
+
+	}
+out_done:
+	if (get_st)
+		mvs_write_port_irq_stat(mvi, i, phy->irq_status);
+}
+
+static void mvs_port_formed(struct asd_sas_phy *sas_phy)
+{
+	struct sas_ha_struct *sas_ha = sas_phy->ha;
+	struct mvs_info *mvi = sas_ha->lldd_ha;
+	struct asd_sas_port *sas_port = sas_phy->port;
+	struct mvs_phy *phy = sas_phy->lldd_phy;
+	struct mvs_port *port = &mvi->port[sas_port->id];
+	unsigned long flags;
+
+	spin_lock_irqsave(&mvi->lock, flags);
+	port->port_attached = 1;
+	phy->port = port;
+	port->taskfileset = MVS_ID_NOT_MAPPED;
+	if (phy->phy_type & PORT_TYPE_SAS) {
+		port->wide_port_phymap = sas_port->phy_mask;
+		mvs_update_wideport(mvi, sas_phy->id);
+	}
+	spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+static int mvs_I_T_nexus_reset(struct domain_device *dev)
+{
+	return TMF_RESP_FUNC_FAILED;
+}
+
+static int __devinit mvs_hw_init(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	int i;
+	u32 tmp, cctl;
+
+	/* make sure interrupts are masked immediately (paranoia) */
+	mw32(GBL_CTL, 0);
+	tmp = mr32(GBL_CTL);
+
+	/* Reset Controller */
+	if (!(tmp & HBA_RST)) {
+		if (mvi->flags & MVF_PHY_PWR_FIX) {
+			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+			tmp &= ~PCTL_PWR_ON;
+			tmp |= PCTL_OFF;
+			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+			tmp &= ~PCTL_PWR_ON;
+			tmp |= PCTL_OFF;
+			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+		}
+
+		/* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
+		mw32_f(GBL_CTL, HBA_RST);
+	}
+
+	/* wait for reset to finish; timeout is just a guess */
+	i = 1000;
+	while (i-- > 0) {
+		msleep(10);
+
+		if (!(mr32(GBL_CTL) & HBA_RST))
+			break;
+	}
+	if (mr32(GBL_CTL) & HBA_RST) {
+		dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
+		return -EBUSY;
+	}
+
+	/* Init Chip */
+	/* make sure RST is set; HBA_RST /should/ have done that for us */
+	cctl = mr32(CTL);
+	if (cctl & CCTL_RST)
+		cctl &= ~CCTL_RST;
+	else
+		mw32_f(CTL, cctl | CCTL_RST);
+
+	/* write to device control _AND_ device status register? - A.C. */
+	pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
+	tmp &= ~PRD_REQ_MASK;
+	tmp |= PRD_REQ_SIZE;
+	pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
+
+	pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+	tmp |= PCTL_PWR_ON;
+	tmp &= ~PCTL_OFF;
+	pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+	pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+	tmp |= PCTL_PWR_ON;
+	tmp &= ~PCTL_OFF;
+	pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+
+	mw32_f(CTL, cctl);
+
+	/* reset control */
+	mw32(PCS, 0);		/*MVS_PCS */
+
+	mvs_phy_hacks(mvi);
+
+	mw32(CMD_LIST_LO, mvi->slot_dma);
+	mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+	mw32(RX_FIS_LO, mvi->rx_fis_dma);
+	mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+	mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
+	mw32(TX_LO, mvi->tx_dma);
+	mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+	mw32(RX_CFG, MVS_RX_RING_SZ);
+	mw32(RX_LO, mvi->rx_dma);
+	mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+	/* enable auto port detection */
+	mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
+	msleep(1100);
+	/* init and reset phys */
+	for (i = 0; i < mvi->chip->n_phy; i++) {
+		u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
+		u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
+
+		mvs_detect_porttype(mvi, i);
+
+		/* set phy local SAS address */
+		mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
+		mvs_write_port_cfg_data(mvi, i, lo);
+		mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
+		mvs_write_port_cfg_data(mvi, i, hi);
+
+		/* reset phy */
+		tmp = mvs_read_phy_ctl(mvi, i);
+		tmp |= PHY_RST;
+		mvs_write_phy_ctl(mvi, i, tmp);
+	}
+
+	msleep(100);
+
+	for (i = 0; i < mvi->chip->n_phy; i++) {
+		/* clear phy int status */
+		tmp = mvs_read_port_irq_stat(mvi, i);
+		tmp &= ~PHYEV_SIG_FIS;
+		mvs_write_port_irq_stat(mvi, i, tmp);
+
+		/* set phy int mask */
+		tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
+			PHYEV_ID_DONE | PHYEV_DEC_ERR;
+		mvs_write_port_irq_mask(mvi, i, tmp);
+
+		msleep(100);
+		mvs_update_phyinfo(mvi, i, 1);
+		mvs_enable_xmt(mvi, i);
+	}
+
+	/* FIXME: update wide port bitmaps */
+
+	/* little endian for open address and command table, etc. */
+	/* A.C.
+	 * it seems that ( from the spec ) turning on big-endian won't
+	 * do us any good on big-endian machines, need further confirmation
+	 */
+	cctl = mr32(CTL);
+	cctl |= CCTL_ENDIAN_CMD;
+	cctl |= CCTL_ENDIAN_DATA;
+	cctl &= ~CCTL_ENDIAN_OPEN;
+	cctl |= CCTL_ENDIAN_RSP;
+	mw32_f(CTL, cctl);
+
+	/* reset CMD queue */
+	tmp = mr32(PCS);
+	tmp |= PCS_CMD_RST;
+	mw32(PCS, tmp);
+	/* interrupt coalescing may cause missing HW interrput in some case,
+	 * and the max count is 0x1ff, while our max slot is 0x200,
+	 * it will make count 0.
+	 */
+	tmp = 0;
+	mw32(INT_COAL, tmp);
+
+	tmp = 0x100;
+	mw32(INT_COAL_TMOUT, tmp);
+
+	/* ladies and gentlemen, start your engines */
+	mw32(TX_CFG, 0);
+	mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+	mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
+	/* enable CMD/CMPL_Q/RESP mode */
+	mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
+
+	/* enable completion queue interrupt */
+	tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
+	mw32(INT_MASK, tmp);
+
+	/* Enable SRS interrupt */
+	mw32(INT_MASK_SRS, 0xFF);
+	return 0;
+}
+
+static void __devinit mvs_print_info(struct mvs_info *mvi)
+{
+	struct pci_dev *pdev = mvi->pdev;
+	static int printed_version;
+
+	if (!printed_version++)
+		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+
+	dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
+		   mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
+}
+
+static int __devinit mvs_pci_init(struct pci_dev *pdev,
+				  const struct pci_device_id *ent)
+{
+	int rc;
+	struct mvs_info *mvi;
+	irq_handler_t irq_handler = mvs_interrupt;
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	pci_set_master(pdev);
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc)
+		goto err_out_disable;
+
+	rc = pci_go_64(pdev);
+	if (rc)
+		goto err_out_regions;
+
+	mvi = mvs_alloc(pdev, ent);
+	if (!mvi) {
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+
+	rc = mvs_hw_init(mvi);
+	if (rc)
+		goto err_out_mvi;
+
+#ifndef MVS_DISABLE_MSI
+	if (!pci_enable_msi(pdev)) {
+		u32 tmp;
+		void __iomem *regs = mvi->regs;
+		mvi->flags |= MVF_MSI;
+		irq_handler = mvs_msi_interrupt;
+		tmp = mr32(PCS);
+		mw32(PCS, tmp | PCS_SELF_CLEAR);
+	}
+#endif
+
+	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
+	if (rc)
+		goto err_out_msi;
+
+	rc = scsi_add_host(mvi->shost, &pdev->dev);
+	if (rc)
+		goto err_out_irq;
+
+	rc = sas_register_ha(&mvi->sas);
+	if (rc)
+		goto err_out_shost;
+
+	pci_set_drvdata(pdev, mvi);
+
+	mvs_print_info(mvi);
+
+	mvs_hba_interrupt_enable(mvi);
+
+	scsi_scan_host(mvi->shost);
+
+	return 0;
+
+err_out_shost:
+	scsi_remove_host(mvi->shost);
+err_out_irq:
+	free_irq(pdev->irq, mvi);
+err_out_msi:
+	if (mvi->flags |= MVF_MSI)
+		pci_disable_msi(pdev);
+err_out_mvi:
+	mvs_free(mvi);
+err_out_regions:
+	pci_release_regions(pdev);
+err_out_disable:
+	pci_disable_device(pdev);
+	return rc;
+}
+
+static void __devexit mvs_pci_remove(struct pci_dev *pdev)
+{
+	struct mvs_info *mvi = pci_get_drvdata(pdev);
+
+	pci_set_drvdata(pdev, NULL);
+
+	if (mvi) {
+		sas_unregister_ha(&mvi->sas);
+		mvs_hba_interrupt_disable(mvi);
+		sas_remove_host(mvi->shost);
+		scsi_remove_host(mvi->shost);
+
+		free_irq(pdev->irq, mvi);
+		if (mvi->flags & MVF_MSI)
+			pci_disable_msi(pdev);
+		mvs_free(mvi);
+		pci_release_regions(pdev);
+	}
+	pci_disable_device(pdev);
+}
+
+static struct sas_domain_function_template mvs_transport_ops = {
+	.lldd_execute_task	= mvs_task_exec,
+	.lldd_control_phy	= mvs_phy_control,
+	.lldd_abort_task	= mvs_task_abort,
+	.lldd_port_formed	= mvs_port_formed,
+	.lldd_I_T_nexus_reset	= mvs_I_T_nexus_reset,
+};
+
+static struct pci_device_id __devinitdata mvs_pci_table[] = {
+	{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
+	{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
+	{
+		.vendor 	= PCI_VENDOR_ID_MARVELL,
+		.device 	= 0x6440,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= 0x6480,
+		.class		= 0,
+		.class_mask	= 0,
+		.driver_data	= chip_6480,
+	},
+	{ PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
+	{ PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver mvs_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= mvs_pci_table,
+	.probe		= mvs_pci_init,
+	.remove		= __devexit_p(mvs_pci_remove),
+};
+
+static int __init mvs_init(void)
+{
+	int rc;
+
+	mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
+	if (!mvs_stt)
+		return -ENOMEM;
+
+	rc = pci_register_driver(&mvs_pci_driver);
+	if (rc)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	sas_release_transport(mvs_stt);
+	return rc;
+}
+
+static void __exit mvs_exit(void)
+{
+	pci_unregister_driver(&mvs_pci_driver);
+	sas_release_transport(mvs_stt);
+}
+
+module_init(mvs_init);
+module_exit(mvs_exit);
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, mvs_pci_table);
-- 
cgit v1.2.3-70-g09d2


From a3ec723a949d65bf0349cdf60958036454927729 Mon Sep 17 00:00:00 2001
From: Eric Piel <eric.piel@tremplin-utc.net>
Date: Mon, 4 May 2009 12:43:02 +0200
Subject: [SCSI] Update wording of CONFIG_SCSI_MULTI_LUN help

I had to set CONFIG_SCSI_MULTI_LUN to y in order to get my SE W595
working when plugging it as a mass storage. Looking at SCSI option to
get a phone behaving correctly was convoluted to say the least. There
are quite a few other reports about USB card readers needing this option
as well. This patch improves the help text to make the use of the option
more obvious.

Signed-off-by: Eric Piel <eric.piel@tremplin-utc.net>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/Kconfig | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

(limited to 'drivers/scsi/Kconfig')

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 6e8106a70b3..759e1507e63 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
 	  it has an enclosure device.  Selecting this option will just allow
 	  certain enclosure conditions to be reported and is not required.
 
-comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
-	depends on SCSI
-
 config SCSI_MULTI_LUN
 	bool "Probe all LUNs on each SCSI device"
 	depends on SCSI
 	help
-	  If you have a SCSI device that supports more than one LUN (Logical
-	  Unit Number), e.g. a CD jukebox, and only one LUN is detected, you
-	  can say Y here to force the SCSI driver to probe for multiple LUNs.
-	  A SCSI device with multiple LUNs acts logically like multiple SCSI
-	  devices. The vast majority of SCSI devices have only one LUN, and
-	  so most people can say N here. The max_luns boot/module parameter 
-	  allows to override this setting.
+	  Some devices support more than one LUN (Logical Unit Number) in order
+	  to allow access to several media, e.g. CD jukebox, USB card reader,
+	  mobile phone in mass storage mode. This option forces the kernel to
+	  probe for all LUNs by default. This setting can be overriden by
+	  max_luns boot/module parameter. Note that this option does not affect
+	  devices conforming to SCSI-3 or higher as they can explicitely report
+	  their number of LUNs. It is safe to say Y here unless you have one of
+	  those rare devices which reacts in an unexpected way when probed for
+	  multiple LUNs.
 
 config SCSI_CONSTANTS
 	bool "Verbose SCSI error reporting (kernel size +=12K)"
-- 
cgit v1.2.3-70-g09d2


From cf4e6363859d30f24f8cd3e8930dbff399cc3550 Mon Sep 17 00:00:00 2001
From: Michael Chan <mchan@broadcom.com>
Date: Mon, 8 Jun 2009 18:14:44 -0700
Subject: [SCSI] bnx2i: Add bnx2i iSCSI driver.

New iSCSI driver for Broadcom BNX2 devices.  The driver interfaces with
the CNIC driver to access the hardware.

Signed-off-by: Anil Veerabhadrappa <anilgv@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/Kconfig                      |    1 +
 drivers/scsi/Makefile                     |    1 +
 drivers/scsi/bnx2i/57xx_iscsi_constants.h |  155 ++
 drivers/scsi/bnx2i/57xx_iscsi_hsi.h       | 1509 ++++++++++++++++++
 drivers/scsi/bnx2i/Kconfig                |    7 +
 drivers/scsi/bnx2i/Makefile               |    3 +
 drivers/scsi/bnx2i/bnx2i.h                |  771 +++++++++
 drivers/scsi/bnx2i/bnx2i_hwi.c            | 2405 +++++++++++++++++++++++++++++
 drivers/scsi/bnx2i/bnx2i_init.c           |  438 ++++++
 drivers/scsi/bnx2i/bnx2i_iscsi.c          | 2064 +++++++++++++++++++++++++
 drivers/scsi/bnx2i/bnx2i_sysfs.c          |  142 ++
 11 files changed, 7496 insertions(+)
 create mode 100644 drivers/scsi/bnx2i/57xx_iscsi_constants.h
 create mode 100644 drivers/scsi/bnx2i/57xx_iscsi_hsi.h
 create mode 100644 drivers/scsi/bnx2i/Kconfig
 create mode 100644 drivers/scsi/bnx2i/Makefile
 create mode 100644 drivers/scsi/bnx2i/bnx2i.h
 create mode 100644 drivers/scsi/bnx2i/bnx2i_hwi.c
 create mode 100644 drivers/scsi/bnx2i/bnx2i_init.c
 create mode 100644 drivers/scsi/bnx2i/bnx2i_iscsi.c
 create mode 100644 drivers/scsi/bnx2i/bnx2i_sysfs.c

(limited to 'drivers/scsi/Kconfig')

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 759e1507e63..6a19ed9a119 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -354,6 +354,7 @@ config ISCSI_TCP
 	 http://open-iscsi.org
 
 source "drivers/scsi/cxgb3i/Kconfig"
+source "drivers/scsi/bnx2i/Kconfig"
 
 config SGIWD93_SCSI
 	tristate "SGI WD93C93 SCSI Driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 8795c309963..25429ea63d0 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -129,6 +129,7 @@ obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_MVSAS)	+= mvsas/
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
 obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
+obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 
 obj-$(CONFIG_ARM)		+= arm/
 
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644
index 00000000000..2fceb19eb27
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -0,0 +1,155 @@
+/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_CONSTANTS_H_
+#define __57XX_ISCSI_CONSTANTS_H_
+
+/**
+* This file defines HSI constants for the iSCSI flows
+*/
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_CLEANUP_REQUEST    (7)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_CLEANUP_RESPONSE 		(0x27)
+#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION    (0)
+
+/* iSCSI task types */
+#define ISCSI_TASK_TYPE_READ    (0)
+#define ISCSI_TASK_TYPE_WRITE   (1)
+#define ISCSI_TASK_TYPE_MPATH   (2)
+
+/* initial CQ sequence numbers */
+#define ISCSI_INITIAL_SN    (1)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_KWQE_LAYER_CODE   (6)
+
+/* KWQ (kernel work queue) request op codes */
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
+#define ISCSI_KWQE_OPCODE_UPDATE_CONN   (2)
+#define ISCSI_KWQE_OPCODE_DESTROY_CONN  (3)
+#define ISCSI_KWQE_OPCODE_INIT1         (4)
+#define ISCSI_KWQE_OPCODE_INIT2         (5)
+
+/* KCQ (kernel completion queue) response op codes */
+#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN  (0x10)
+#define ISCSI_KCQE_OPCODE_UPDATE_CONN   (0x12)
+#define ISCSI_KCQE_OPCODE_DESTROY_CONN  (0x13)
+#define ISCSI_KCQE_OPCODE_INIT          (0x14)
+#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK	(0x15)
+#define ISCSI_KCQE_OPCODE_TCP_RESET     (0x16)
+#define ISCSI_KCQE_OPCODE_TCP_SYN       (0x17)
+#define ISCSI_KCQE_OPCODE_TCP_FIN       (0X18)
+#define ISCSI_KCQE_OPCODE_TCP_ERROR     (0x19)
+#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define ISCSI_KCQE_OPCODE_ISCSI_ERROR   (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS                            (0x0)
+#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE                     (0x1)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE                  (0x2)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE                   (0x3)
+#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR                          (0x4)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR                        (0x5)
+#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR                       (0x6)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE     (0xa)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE                (0xb)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN               (0xc)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT                   (0xd)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN                (0xe)
+
+/* Response */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN            (0xf)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T              (0x10)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO  (0x2c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG  (0x2d)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0                 (0x11)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1                 (0x12)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2                 (0x13)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3                 (0x14)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4                 (0x15)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5                 (0x16)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6                 (0x17)
+
+/* Data-In */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN        (0x18)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN       (0x19)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO            (0x1a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV          (0x1b)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN                (0x1c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN      (0x1d)
+
+/* R2T */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF            (0x1f)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN                   (0x20)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN                 (0x21)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED       (0x24)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV           (0x25)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN         (0x26)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
+
+/* TMF */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN        (0x28)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN         (0x29)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN         (0x2a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP   (0x2b)
+
+/* IP/TCP processing errors: */
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT               (0x40)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS                (0x41)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG               (0x42)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS                (0x43)
+
+/* iSCSI licensing errors */
+/* general iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED                (0x50)
+/* additional LOM specific iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED              (0x51)
+
+/* SQ/RQ/CQ DB structure sizes */
+#define ISCSI_SQ_DB_SIZE    (16)
+#define ISCSI_RQ_DB_SIZE    (16)
+#define ISCSI_CQ_DB_SIZE    (80)
+
+#define ISCSI_SQN_TO_NOTIFY_NOT_VALID                                   0xFFFF
+
+/* Page size codes (for flags field in connection offload request) */
+#define ISCSI_PAGE_SIZE_256     (0)
+#define ISCSI_PAGE_SIZE_512     (1)
+#define ISCSI_PAGE_SIZE_1K      (2)
+#define ISCSI_PAGE_SIZE_2K      (3)
+#define ISCSI_PAGE_SIZE_4K      (4)
+#define ISCSI_PAGE_SIZE_8K      (5)
+#define ISCSI_PAGE_SIZE_16K     (6)
+#define ISCSI_PAGE_SIZE_32K     (7)
+#define ISCSI_PAGE_SIZE_64K     (8)
+#define ISCSI_PAGE_SIZE_128K    (9)
+#define ISCSI_PAGE_SIZE_256K    (10)
+#define ISCSI_PAGE_SIZE_512K    (11)
+#define ISCSI_PAGE_SIZE_1M      (12)
+#define ISCSI_PAGE_SIZE_2M      (13)
+#define ISCSI_PAGE_SIZE_4M      (14)
+#define ISCSI_PAGE_SIZE_8M      (15)
+
+/* Iscsi PDU related defines */
+#define ISCSI_HEADER_SIZE   (48)
+#define ISCSI_DIGEST_SHIFT  (2)
+#define ISCSI_DIGEST_SIZE   (4)
+
+#define B577XX_ISCSI_CONNECTION_TYPE    3
+
+#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644
index 00000000000..36af1afef9b
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -0,0 +1,1509 @@
+/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_HSI_LINUX_LE__
+#define __57XX_ISCSI_HSI_LINUX_LE__
+
+/*
+ * iSCSI Async CQE
+ */
+struct bnx2i_async_msg {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved5;
+	u8 err_code;
+	u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved4;
+	u8 err_code;
+	u16 reserved5;
+#endif
+	u32 reserved6;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u8 async_event;
+	u8 async_vcode;
+	u16 param1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 param1;
+	u8 async_vcode;
+	u8 async_event;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 param2;
+	u16 param3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 param3;
+	u16 param2;
+#endif
+	u32 reserved7[3];
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Buffer Descriptor (BD)
+ */
+struct iscsi_bd {
+	u32 buffer_addr_hi;
+	u32 buffer_addr_lo;
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u16 buffer_length;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buffer_length;
+	u16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+#elif defined(__LITTLE_ENDIAN)
+	u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+	u16 reserved3;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup SQ WQE
+ */
+struct bnx2i_cleanup_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2[3];
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+	u16 reserved3;
+#endif
+	u32 reserved4[10];
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved6;
+	u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved5;
+	u8 reserved6;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup CQE
+ */
+struct bnx2i_cleanup_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 status;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 status;
+	u8 op_code;
+#endif
+	u32 reserved1[3];
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5[7];
+#if defined(__BIG_ENDIAN)
+	u16 reserved6;
+	u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+	u16 reserved6;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct bnx2i_cmd_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+	u8 op_code;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ud_buffer_offset;
+	u16 sd_buffer_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sd_buffer_offset;
+	u16 ud_buffer_offset;
+#endif
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+	u32 total_data_transfer_length;
+	u32 cmd_sn;
+	u32 reserved3;
+	u32 cdb[4];
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 sd_start_bd_index;
+	u8 ud_start_bd_index;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 ud_start_bd_index;
+	u8 sd_start_bd_index;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * task statistics for write response
+ */
+struct bnx2i_write_resp_task_stat {
+	u32 num_data_ins;
+};
+
+/*
+ * task statistics for read response
+ */
+struct bnx2i_read_resp_task_stat {
+#if defined(__BIG_ENDIAN)
+	u16 num_data_outs;
+	u16 num_r2ts;
+#elif defined(__LITTLE_ENDIAN)
+	u16 num_r2ts;
+	u16 num_data_outs;
+#endif
+};
+
+/*
+ * task statistics for iSCSI cmd response
+ */
+union bnx2i_cmd_resp_task_stat {
+	struct bnx2i_write_resp_task_stat write_stat;
+	struct bnx2i_read_resp_task_stat read_stat;
+};
+
+/*
+ * SCSI Command CQE
+ */
+struct bnx2i_cmd_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+	u8 response;
+	u8 status;
+#elif defined(__LITTLE_ENDIAN)
+	u8 status;
+	u8 response;
+	u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved2;
+	u32 residual_count;
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5[5];
+	union bnx2i_cmd_resp_task_stat task_stat;
+	u32 reserved6;
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+
+
+/*
+ * firmware middle-path request SQ WQE
+ */
+struct bnx2i_fw_mp_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+	u16 hdr_opaque1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hdr_opaque1;
+	u8 op_attr;
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 hdr_opaque2[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+	u16 reserved0;
+#endif
+	u32 hdr_opaque3[4];
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 reserved3;
+	u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+	u8 reserved3;
+	u16 reserved4;
+#endif
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved6;
+	u8 reserved5;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved5;
+	u8 reserved6;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * firmware response - CQE: used only by firmware
+ */
+struct bnx2i_fw_response {
+	u32 hdr_dword1[2];
+	u32 hdr_exp_cmd_sn;
+	u32 hdr_max_cmd_sn;
+	u32 hdr_ttt;
+	u32 hdr_res_cnt;
+	u32 cqe_flags;
+#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
+#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
+#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
+#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
+#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
+#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
+	u32 stat_sn;
+	u32 hdr_dword2[2];
+	u32 hdr_dword3[2];
+	u32 task_stat;
+	u32 reserved0;
+	u32 hdr_itt;
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI KCQ CQE parameters
+ */
+union iscsi_kcqe_params {
+	u32 reserved0[4];
+};
+
+/*
+ * iSCSI KCQ CQE
+ */
+struct iscsi_kcqe {
+	u32 iscsi_conn_id;
+	u32 completion_status;
+	u32 iscsi_conn_context_id;
+	union iscsi_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+#endif
+};
+
+
+
+/*
+ * iSCSI KWQE header
+ */
+struct iscsi_kwqe_header {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+	u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+	u8 op_code;
+	u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * iSCSI firmware init request 1
+ */
+struct iscsi_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u8 reserved0;
+	u8 num_cqs;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_cqs;
+	u8 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 dummy_buffer_addr_lo;
+	u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 num_ccells_per_conn;
+	u16 num_tasks_per_conn;
+#elif defined(__LITTLE_ENDIAN)
+	u16 num_tasks_per_conn;
+	u16 num_ccells_per_conn;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 sq_wqes_per_page;
+	u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_num_wqes;
+	u16 sq_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cq_log_wqes_per_page;
+	u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+	u16 cq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_num_wqes;
+	u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+	u8 cq_log_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cq_num_pages;
+	u16 sq_num_pages;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_num_pages;
+	u16 cq_num_pages;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 rq_buffer_size;
+	u16 rq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rq_num_wqes;
+	u16 rq_buffer_size;
+#endif
+};
+
+/*
+ * iSCSI firmware init request 2
+ */
+struct iscsi_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 max_cq_sqn;
+#elif defined(__LITTLE_ENDIAN)
+	u16 max_cq_sqn;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 error_bit_map[2];
+	u32 reserved1[5];
+};
+
+/*
+ * Initial iSCSI connection offload request 1
+ */
+struct iscsi_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 sq_page_table_addr_lo;
+	u32 sq_page_table_addr_hi;
+	u32 cq_page_table_addr_lo;
+	u32 cq_page_table_addr_hi;
+	u32 reserved0[3];
+};
+
+/*
+ * iSCSI Page Table Entry (PTE)
+ */
+struct iscsi_pte {
+	u32 hi;
+	u32 lo;
+};
+
+/*
+ * Initial iSCSI connection offload request 2
+ */
+struct iscsi_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 rq_page_table_addr_lo;
+	u32 rq_page_table_addr_hi;
+	struct iscsi_pte sq_first_pte;
+	struct iscsi_pte cq_first_pte;
+	u32 num_additional_wqes;
+};
+
+
+/*
+ * Initial iSCSI connection offload request 3
+ */
+struct iscsi_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 reserved1;
+	struct iscsi_pte qp_first_pte[3];
+};
+
+
+/*
+ * iSCSI connection update request
+ */
+struct iscsi_kwqe_conn_update {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 session_error_recovery_level;
+	u8 max_outstanding_r2ts;
+	u8 reserved2;
+	u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+	u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+	u8 reserved2;
+	u8 max_outstanding_r2ts;
+	u8 session_error_recovery_level;
+#endif
+	u32 context_id;
+	u32 max_send_pdu_length;
+	u32 max_recv_pdu_length;
+	u32 first_burst_length;
+	u32 max_burst_length;
+	u32 exp_stat_sn;
+};
+
+/*
+ * iSCSI destroy connection request
+ */
+struct iscsi_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 context_id;
+	u32 reserved1[6];
+};
+
+/*
+ * iSCSI KWQ WQE
+ */
+union iscsi_kwqe {
+	struct iscsi_kwqe_init1 init1;
+	struct iscsi_kwqe_init2 init2;
+	struct iscsi_kwqe_conn_offload1 conn_offload1;
+	struct iscsi_kwqe_conn_offload2 conn_offload2;
+	struct iscsi_kwqe_conn_update conn_update;
+	struct iscsi_kwqe_conn_destroy conn_destroy;
+};
+
+/*
+ * iSCSI Login SQ WQE
+ */
+struct bnx2i_login_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+	u8 version_max;
+	u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+	u8 version_min;
+	u8 version_max;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+	u16 isid_hi;
+	u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tsih;
+	u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 reserved4;
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 reserved8;
+	u8 reserved7;
+	u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+	u8 reserved7;
+	u16 reserved8;
+#endif
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved10;
+	u8 reserved9;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved9;
+	u8 reserved10;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Login CQE
+ */
+struct bnx2i_login_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+	u8 version_max;
+	u8 version_active;
+#elif defined(__LITTLE_ENDIAN)
+	u8 version_active;
+	u8 version_max;
+	u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 err_code;
+	u8 reserved2;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved2;
+	u8 err_code;
+	u16 reserved3;
+#endif
+	u32 stat_sn;
+	u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+	u16 isid_hi;
+	u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tsih;
+	u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 status_class;
+	u8 status_detail;
+	u16 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved4;
+	u8 status_detail;
+	u8 status_class;
+#endif
+	u32 reserved5[3];
+#if defined(__BIG_ENDIAN)
+	u16 reserved6;
+	u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+	u16 reserved6;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Logout SQ WQE
+ */
+struct bnx2i_logout_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 reserved4[5];
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved6;
+	u8 reserved5;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved5;
+	u8 reserved6;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Logout CQE
+ */
+struct bnx2i_logout_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u8 response;
+	u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved0;
+	u8 response;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved5;
+	u8 err_code;
+	u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved4;
+	u8 err_code;
+	u16 reserved5;
+#endif
+	u32 reserved6[3];
+#if defined(__BIG_ENDIAN)
+	u16 time_to_wait;
+	u16 time_to_retain;
+#elif defined(__LITTLE_ENDIAN)
+	u16 time_to_retain;
+	u16 time_to_wait;
+#endif
+	u32 reserved7[3];
+#if defined(__BIG_ENDIAN)
+	u16 reserved8;
+	u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+	u16 reserved8;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Nop-In CQE
+ */
+struct bnx2i_nop_in_msg {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 ttt;
+	u32 reserved2;
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5;
+	u32 lun[2];
+	u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI NOP-OUT SQ WQE
+ */
+struct bnx2i_nop_out_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+	u32 ttt;
+	u32 cmd_sn;
+	u32 reserved3[2];
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u8 reserved6;
+	u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+	u8 reserved6;
+	u16 reserved7;
+#endif
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved9;
+	u8 reserved8;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved8;
+	u8 reserved9;
+	u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Reject CQE
+ */
+struct bnx2i_reject_msg {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u8 reason;
+	u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved0;
+	u8 reason;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5[8];
+	u32 cq_req_sn;
+};
+
+/*
+ * bnx2i iSCSI TMF SQ WQE
+ */
+struct bnx2i_tmf_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+	u16 reserved1;
+#endif
+	u32 ref_itt;
+	u32 cmd_sn;
+	u32 reserved2;
+	u32 ref_cmd_sn;
+	u32 reserved3[3];
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved5;
+	u8 reserved4;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved4;
+	u8 reserved5;
+	u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Text SQ WQE
+ */
+struct bnx2i_text_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+	u16 reserved3;
+#endif
+	u32 ttt;
+	u32 cmd_sn;
+	u32 reserved4[2];
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved7;
+	u8 reserved6;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved6;
+	u8 reserved7;
+	u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI SQ WQE
+ */
+union iscsi_request {
+	struct bnx2i_cmd_request cmd;
+	struct bnx2i_tmf_request tmf;
+	struct bnx2i_nop_out_request nop_out;
+	struct bnx2i_login_request login_req;
+	struct bnx2i_text_request text;
+	struct bnx2i_logout_request logout_req;
+	struct bnx2i_cleanup_request cleanup;
+};
+
+
+/*
+ * iSCSI TMF CQE
+ */
+struct bnx2i_tmf_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u8 response;
+	u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved0;
+	u8 response;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved5;
+	u8 err_code;
+	u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved4;
+	u8 err_code;
+	u16 reserved5;
+#endif
+	u32 reserved6[7];
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+/*
+ * iSCSI Text CQE
+ */
+struct bnx2i_text_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 ttt;
+	u32 reserved2;
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5;
+	u32 lun[2];
+	u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+/*
+ * iSCSI CQE
+ */
+union iscsi_response {
+	struct bnx2i_cmd_response cmd;
+	struct bnx2i_tmf_response tmf;
+	struct bnx2i_login_response login_resp;
+	struct bnx2i_text_response text;
+	struct bnx2i_logout_response logout_resp;
+	struct bnx2i_cleanup_response cleanup;
+	struct bnx2i_reject_msg reject;
+	struct bnx2i_async_msg async;
+	struct bnx2i_nop_in_msg nop_in;
+};
+
+#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644
index 00000000000..820d428ae83
--- /dev/null
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -0,0 +1,7 @@
+config SCSI_BNX2_ISCSI
+	tristate "Broadcom NetXtreme II iSCSI support"
+	select SCSI_ISCSI_ATTRS
+	select CNIC
+	---help---
+	This driver supports iSCSI offload for the Broadcom NetXtreme II
+	devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644
index 00000000000..b5802bd2e76
--- /dev/null
+++ b/drivers/scsi/bnx2i/Makefile
@@ -0,0 +1,3 @@
+bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
+
+obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644
index 00000000000..d7576f28c6e
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -0,0 +1,771 @@
+/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#ifndef _BNX2I_H_
+#define _BNX2I_H_
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/kfifo.h>
+#include <linux/netdevice.h>
+#include <linux/completion.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "../../net/cnic_if.h"
+#include "57xx_iscsi_hsi.h"
+#include "57xx_iscsi_constants.h"
+
+#define BNX2_ISCSI_DRIVER_NAME		"bnx2i"
+
+#define BNX2I_MAX_ADAPTERS		8
+
+#define ISCSI_MAX_CONNS_PER_HBA		128
+#define ISCSI_MAX_SESS_PER_HBA		ISCSI_MAX_CONNS_PER_HBA
+#define ISCSI_MAX_CMDS_PER_SESS		128
+
+/* Total active commands across all connections supported by devices */
+#define ISCSI_MAX_CMDS_PER_HBA_5708	(28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_5709	(128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_57710	(256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+
+#define ISCSI_MAX_BDS_PER_CMD		32
+
+#define MAX_PAGES_PER_CTRL_STRUCT_POOL	8
+#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS	4
+
+/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
+#define MAX_BD_LENGTH			65535
+#define BD_SPLIT_SIZE			32768
+
+/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
+#define BNX2I_SQ_WQES_MIN 		16
+#define BNX2I_570X_SQ_WQES_MAX 		128
+#define BNX2I_5770X_SQ_WQES_MAX 	512
+#define BNX2I_570X_SQ_WQES_DEFAULT 	128
+#define BNX2I_5770X_SQ_WQES_DEFAULT 	256
+
+#define BNX2I_570X_CQ_WQES_MAX 		128
+#define BNX2I_5770X_CQ_WQES_MAX 	512
+
+#define BNX2I_RQ_WQES_MIN 		16
+#define BNX2I_RQ_WQES_MAX 		32
+#define BNX2I_RQ_WQES_DEFAULT 		16
+
+/* CCELLs per conn */
+#define BNX2I_CCELLS_MIN		16
+#define BNX2I_CCELLS_MAX		96
+#define BNX2I_CCELLS_DEFAULT		64
+
+#define ITT_INVALID_SIGNATURE		0xFFFF
+
+#define ISCSI_CMD_CLEANUP_TIMEOUT	100
+
+#define BNX2I_CONN_CTX_BUF_SIZE		16384
+
+#define BNX2I_SQ_WQE_SIZE		64
+#define BNX2I_RQ_WQE_SIZE		256
+#define BNX2I_CQE_SIZE			64
+
+#define MB_KERNEL_CTX_SHIFT		8
+#define MB_KERNEL_CTX_SIZE		(1 << MB_KERNEL_CTX_SHIFT)
+
+#define CTX_SHIFT			7
+#define GET_CID_NUM(cid_addr)		((cid_addr) >> CTX_SHIFT)
+
+#define CTX_OFFSET 			0x10000
+#define MAX_CID_CNT			0x4000
+
+/* 5709 context registers */
+#define BNX2_MQ_CONFIG2			0x00003d00
+#define BNX2_MQ_CONFIG2_CONT_SZ		(0x7L<<4)
+#define BNX2_MQ_CONFIG2_FIRST_L4L5	(0x1fL<<8)
+
+/* 57710's BAR2 is mapped to doorbell registers */
+#define BNX2X_DOORBELL_PCI_BAR		2
+#define BNX2X_MAX_CQS			8
+
+#define CNIC_ARM_CQE			1
+#define CNIC_DISARM_CQE			0
+
+#define REG_RD(__hba, offset)				\
+		readl(__hba->regview + offset)
+#define REG_WR(__hba, offset, val)			\
+		writel(val, __hba->regview + offset)
+
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf:            driver buffer used to stage payload associated with
+ *                      the login request
+ * @req_dma_addr:       dma address for iscsi login request payload buffer
+ * @req_buf_size:       actual login request payload length
+ * @req_wr_ptr:         pointer into login request buffer when next data is
+ *                      to be written
+ * @resp_hdr:           iscsi header where iscsi login response header is to
+ *                      be recreated
+ * @resp_buf:           buffer to stage login response payload
+ * @resp_dma_addr:      login response payload buffer dma address
+ * @resp_buf_size:      login response paylod length
+ * @resp_wr_ptr:        pointer into login response buffer when next data is
+ *                      to be written
+ * @req_bd_tbl:         iscsi login request payload BD table
+ * @req_bd_dma:         login request BD table dma address
+ * @resp_bd_tbl:        iscsi login response payload BD table
+ * @resp_bd_dma:        login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ *	Logout and NOP
+ */
+struct generic_pdu_resc {
+	char *req_buf;
+	dma_addr_t req_dma_addr;
+	u32 req_buf_size;
+	char *req_wr_ptr;
+	struct iscsi_hdr resp_hdr;
+	char *resp_buf;
+	dma_addr_t resp_dma_addr;
+	u32 resp_buf_size;
+	char *resp_wr_ptr;
+	char *req_bd_tbl;
+	dma_addr_t req_bd_dma;
+	char *resp_bd_tbl;
+	dma_addr_t resp_bd_dma;
+};
+
+
+/**
+ * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
+ *
+ * @link:               list head to link elements
+ * @max_ptrs:           maximun pointers that can be stored in this page
+ * @num_valid:          number of pointer valid in this page
+ * @page:               base addess for page pointer array
+ *
+ * structure to track DMA'able memory allocated for command BD tables
+ */
+struct bd_resc_page {
+	struct list_head link;
+	u32 max_ptrs;
+	u32 num_valid;
+	void *page[1];
+};
+
+
+/**
+ * struct io_bdt - I/O buffer destricptor table
+ *
+ * @bd_tbl:             BD table's virtual address
+ * @bd_tbl_dma:         BD table's dma address
+ * @bd_valid:           num valid BD entries
+ *
+ * IO BD table
+ */
+struct io_bdt {
+	struct iscsi_bd *bd_tbl;
+	dma_addr_t bd_tbl_dma;
+	u16 bd_valid;
+};
+
+
+/**
+ * bnx2i_cmd - iscsi command structure
+ *
+ * @scsi_cmd:           SCSI-ML task pointer corresponding to this iscsi cmd
+ * @sg:                 SG list
+ * @io_tbl:             buffer descriptor (BD) table
+ * @bd_tbl_dma:         buffer descriptor (BD) table's dma address
+ */
+struct bnx2i_cmd {
+	struct iscsi_hdr hdr;
+	struct bnx2i_conn *conn;
+	struct scsi_cmnd *scsi_cmd;
+	struct scatterlist *sg;
+	struct io_bdt io_tbl;
+	dma_addr_t bd_tbl_dma;
+	struct bnx2i_cmd_request req;
+};
+
+
+/**
+ * struct bnx2i_conn - iscsi connection structure
+ *
+ * @cls_conn:              pointer to iscsi cls conn
+ * @hba:                   adapter structure pointer
+ * @iscsi_conn_cid:        iscsi conn id
+ * @fw_cid:                firmware iscsi context id
+ * @ep:                    endpoint structure pointer
+ * @gen_pdu:               login/nopout/logout pdu resources
+ * @violation_notified:    bit mask used to track iscsi error/warning messages
+ *                         already printed out
+ *
+ * iSCSI connection structure
+ */
+struct bnx2i_conn {
+	struct iscsi_cls_conn *cls_conn;
+	struct bnx2i_hba *hba;
+	struct completion cmd_cleanup_cmpl;
+	int is_bound;
+
+	u32 iscsi_conn_cid;
+#define BNX2I_CID_RESERVED	0x5AFF
+	u32 fw_cid;
+
+	struct timer_list poll_timer;
+	/*
+	 * Queue Pair (QP) related structure elements.
+	 */
+	struct bnx2i_endpoint *ep;
+
+	/*
+	 * Buffer for login negotiation process
+	 */
+	struct generic_pdu_resc gen_pdu;
+	u64 violation_notified;
+};
+
+
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base:           queue base memory
+ * @cid_que:                queue memory pointer
+ * @cid_q_prod_idx:         produce index
+ * @cid_q_cons_idx:         consumer index
+ * @cid_q_max_idx:          max index. used to detect wrap around condition
+ * @cid_free_cnt:           queue size
+ * @conn_cid_tbl:           iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+	void *cid_que_base;
+	u32 *cid_que;
+	u32 cid_q_prod_idx;
+	u32 cid_q_cons_idx;
+	u32 cid_q_max_idx;
+	u32 cid_free_cnt;
+	struct bnx2i_conn **conn_cid_tbl;
+};
+
+/**
+ * struct bnx2i_hba - bnx2i adapter structure
+ *
+ * @link:                  list head to link elements
+ * @cnic:                  pointer to cnic device
+ * @pcidev:                pointer to pci dev
+ * @netdev:                pointer to netdev structure
+ * @regview:               mapped PCI register space
+ * @age:                   age, incremented by every recovery
+ * @cnic_dev_type:         cnic device type, 5706/5708/5709/57710
+ * @mail_queue_access:     mailbox queue access mode, applicable to 5709 only
+ * @reg_with_cnic:         indicates whether the device is register with CNIC
+ * @adapter_state:         adapter state, UP, GOING_DOWN, LINK_DOWN
+ * @mtu_supported:         Ethernet MTU supported
+ * @shost:                 scsi host pointer
+ * @max_sqes:              SQ size
+ * @max_rqes:              RQ size
+ * @max_cqes:              CQ size
+ * @num_ccell:             number of command cells per connection
+ * @ofld_conns_active:     active connection list
+ * @max_active_conns:      max offload connections supported by this device
+ * @cid_que:               iscsi cid queue
+ * @ep_rdwr_lock:          read / write lock to synchronize various ep lists
+ * @ep_ofld_list:          connection list for pending offload completion
+ * @ep_destroy_list:       connection list for pending offload completion
+ * @mp_bd_tbl:             BD table to be used with middle path requests
+ * @mp_bd_dma:             DMA address of 'mp_bd_tbl' memory buffer
+ * @dummy_buffer:          Dummy buffer to be used with zero length scsicmd reqs
+ * @dummy_buf_dma:         DMA address of 'dummy_buffer' memory buffer
+ * @lock:              	   lock to synchonize access to hba structure
+ * @pci_did:               PCI device ID
+ * @pci_vid:               PCI vendor ID
+ * @pci_sdid:              PCI subsystem device ID
+ * @pci_svid:              PCI subsystem vendor ID
+ * @pci_func:              PCI function number in system pci tree
+ * @pci_devno:             PCI device number in system pci tree
+ * @num_wqe_sent:          statistic counter, total wqe's sent
+ * @num_cqe_rcvd:          statistic counter, total cqe's received
+ * @num_intr_claimed:      statistic counter, total interrupts claimed
+ * @link_changed_count:    statistic counter, num of link change notifications
+ *                         received
+ * @ipaddr_changed_count:  statistic counter, num times IP address changed while
+ *                         at least one connection is offloaded
+ * @num_sess_opened:       statistic counter, total num sessions opened
+ * @num_conn_opened:       statistic counter, total num conns opened on this hba
+ * @ctx_ccell_tasks:       captures number of ccells and tasks supported by
+ *                         currently offloaded connection, used to decode
+ *                         context memory
+ *
+ * Adapter Data Structure
+ */
+struct bnx2i_hba {
+	struct list_head link;
+	struct cnic_dev *cnic;
+	struct pci_dev *pcidev;
+	struct net_device *netdev;
+	void __iomem *regview;
+
+	u32 age;
+	unsigned long cnic_dev_type;
+		#define BNX2I_NX2_DEV_5706		0x0
+		#define BNX2I_NX2_DEV_5708		0x1
+		#define BNX2I_NX2_DEV_5709		0x2
+		#define BNX2I_NX2_DEV_57710		0x3
+	u32 mail_queue_access;
+		#define BNX2I_MQ_KERNEL_MODE		0x0
+		#define BNX2I_MQ_KERNEL_BYPASS_MODE	0x1
+		#define BNX2I_MQ_BIN_MODE		0x2
+	unsigned long  reg_with_cnic;
+		#define BNX2I_CNIC_REGISTERED		1
+
+	unsigned long  adapter_state;
+		#define ADAPTER_STATE_UP		0
+		#define ADAPTER_STATE_GOING_DOWN	1
+		#define ADAPTER_STATE_LINK_DOWN		2
+		#define ADAPTER_STATE_INIT_FAILED	31
+	unsigned int mtu_supported;
+		#define BNX2I_MAX_MTU_SUPPORTED		1500
+
+	struct Scsi_Host *shost;
+
+	u32 max_sqes;
+	u32 max_rqes;
+	u32 max_cqes;
+	u32 num_ccell;
+
+	int ofld_conns_active;
+
+	int max_active_conns;
+	struct iscsi_cid_queue cid_que;
+
+	rwlock_t ep_rdwr_lock;
+	struct list_head ep_ofld_list;
+	struct list_head ep_destroy_list;
+
+	/*
+	 * BD table to be used with MP (Middle Path requests.
+	 */
+	char *mp_bd_tbl;
+	dma_addr_t mp_bd_dma;
+	char *dummy_buffer;
+	dma_addr_t dummy_buf_dma;
+
+	spinlock_t lock;	/* protects hba structure access */
+	struct mutex net_dev_lock;/* sync net device access */
+
+	/*
+	 * PCI related info.
+	 */
+	u16 pci_did;
+	u16 pci_vid;
+	u16 pci_sdid;
+	u16 pci_svid;
+	u16 pci_func;
+	u16 pci_devno;
+
+	/*
+	 * Following are a bunch of statistics useful during development
+	 * and later stage for score boarding.
+	 */
+	u32 num_wqe_sent;
+	u32 num_cqe_rcvd;
+	u32 num_intr_claimed;
+	u32 link_changed_count;
+	u32 ipaddr_changed_count;
+	u32 num_sess_opened;
+	u32 num_conn_opened;
+	unsigned int ctx_ccell_tasks;
+};
+
+
+/*******************************************************************************
+ * 	QP [ SQ / RQ / CQ ] info.
+ ******************************************************************************/
+
+/*
+ * SQ/RQ/CQ generic structure definition
+ */
+struct 	sqe {
+	u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
+};
+
+struct 	rqe {
+	u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
+};
+
+struct 	cqe {
+	u8 cqe_byte[BNX2I_CQE_SIZE];
+};
+
+
+enum {
+#if defined(__LITTLE_ENDIAN)
+	CNIC_EVENT_COAL_INDEX	= 0x0,
+	CNIC_SEND_DOORBELL	= 0x4,
+	CNIC_EVENT_CQ_ARM	= 0x7,
+	CNIC_RECV_DOORBELL	= 0x8
+#elif defined(__BIG_ENDIAN)
+	CNIC_EVENT_COAL_INDEX	= 0x2,
+	CNIC_SEND_DOORBELL	= 0x6,
+	CNIC_EVENT_CQ_ARM	= 0x4,
+	CNIC_RECV_DOORBELL	= 0xa
+#endif
+};
+
+
+/*
+ * CQ DB
+ */
+struct bnx2x_iscsi_cq_pend_cmpl {
+	/* CQ producer, updated by Ustorm */
+	u16 ustrom_prod;
+	/* CQ pending completion counter */
+	u16 pend_cntr;
+};
+
+
+struct bnx2i_5771x_cq_db {
+	struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
+	/* CQ pending completion ITT array */
+	u16 itt[BNX2X_MAX_CQS];
+	/* Cstorm CQ sequence to notify array, updated by driver */;
+	u16 sqn[BNX2X_MAX_CQS];
+	u32 reserved[4] /* 16 byte allignment */;
+};
+
+
+struct bnx2i_5771x_sq_rq_db {
+	u16 prod_idx;
+	u8 reserved0[14]; /* Pad structure size to 16 bytes */
+};
+
+
+struct bnx2i_5771x_dbell_hdr {
+	u8 header;
+	/* 1 for rx doorbell, 0 for tx doorbell */
+#define B577XX_DOORBELL_HDR_RX				(0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT			0
+	/* 0 for normal doorbell, 1 for advertise wnd doorbell */
+#define B577XX_DOORBELL_HDR_DB_TYPE			(0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT		1
+	/* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
+#define B577XX_DOORBELL_HDR_DPM_SIZE			(0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT		2
+	/* connection type */
+#define B577XX_DOORBELL_HDR_CONN_TYPE			(0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT		4
+};
+
+struct bnx2i_5771x_dbell {
+	struct bnx2i_5771x_dbell_hdr dbell;
+	u8 pad[3];
+
+};
+
+/**
+ * struct qp_info - QP (share queue region) atrributes structure
+ *
+ * @ctx_base:           ioremapped pci register base to access doorbell register
+ *                      pertaining to this offloaded connection
+ * @sq_virt:            virtual address of send queue (SQ) region
+ * @sq_phys:            DMA address of SQ memory region
+ * @sq_mem_size:        SQ size
+ * @sq_prod_qe:         SQ producer entry pointer
+ * @sq_cons_qe:         SQ consumer entry pointer
+ * @sq_first_qe:        virtaul address of first entry in SQ
+ * @sq_last_qe:         virtaul address of last entry in SQ
+ * @sq_prod_idx:        SQ producer index
+ * @sq_cons_idx:        SQ consumer index
+ * @sqe_left:           number sq entry left
+ * @sq_pgtbl_virt:      page table describing buffer consituting SQ region
+ * @sq_pgtbl_phys:      dma address of 'sq_pgtbl_virt'
+ * @sq_pgtbl_size:      SQ page table size
+ * @cq_virt:            virtual address of completion queue (CQ) region
+ * @cq_phys:            DMA address of RQ memory region
+ * @cq_mem_size:        CQ size
+ * @cq_prod_qe:         CQ producer entry pointer
+ * @cq_cons_qe:         CQ consumer entry pointer
+ * @cq_first_qe:        virtaul address of first entry in CQ
+ * @cq_last_qe:         virtaul address of last entry in CQ
+ * @cq_prod_idx:        CQ producer index
+ * @cq_cons_idx:        CQ consumer index
+ * @cqe_left:           number cq entry left
+ * @cqe_size:           size of each CQ entry
+ * @cqe_exp_seq_sn:     next expected CQE sequence number
+ * @cq_pgtbl_virt:      page table describing buffer consituting CQ region
+ * @cq_pgtbl_phys:      dma address of 'cq_pgtbl_virt'
+ * @cq_pgtbl_size:    	CQ page table size
+ * @rq_virt:            virtual address of receive queue (RQ) region
+ * @rq_phys:            DMA address of RQ memory region
+ * @rq_mem_size:        RQ size
+ * @rq_prod_qe:         RQ producer entry pointer
+ * @rq_cons_qe:         RQ consumer entry pointer
+ * @rq_first_qe:        virtaul address of first entry in RQ
+ * @rq_last_qe:         virtaul address of last entry in RQ
+ * @rq_prod_idx:        RQ producer index
+ * @rq_cons_idx:        RQ consumer index
+ * @rqe_left:           number rq entry left
+ * @rq_pgtbl_virt:      page table describing buffer consituting RQ region
+ * @rq_pgtbl_phys:      dma address of 'rq_pgtbl_virt'
+ * @rq_pgtbl_size:      RQ page table size
+ *
+ * queue pair (QP) is a per connection shared data structure which is used
+ *	to send work requests (SQ), receive completion notifications (CQ)
+ *	and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
+ *	below holds queue memory, consumer/producer indexes and page table
+ *	information
+ */
+struct qp_info {
+	void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE			0x40
+
+#define BNX2I_570x_QUE_DB_SIZE		0
+#define BNX2I_5771x_QUE_DB_SIZE		16
+	struct sqe *sq_virt;
+	dma_addr_t sq_phys;
+	u32 sq_mem_size;
+
+	struct sqe *sq_prod_qe;
+	struct sqe *sq_cons_qe;
+	struct sqe *sq_first_qe;
+	struct sqe *sq_last_qe;
+	u16 sq_prod_idx;
+	u16 sq_cons_idx;
+	u32 sqe_left;
+
+	void *sq_pgtbl_virt;
+	dma_addr_t sq_pgtbl_phys;
+	u32 sq_pgtbl_size;	/* set to PAGE_SIZE for 5708 & 5709 */
+
+	struct cqe *cq_virt;
+	dma_addr_t cq_phys;
+	u32 cq_mem_size;
+
+	struct cqe *cq_prod_qe;
+	struct cqe *cq_cons_qe;
+	struct cqe *cq_first_qe;
+	struct cqe *cq_last_qe;
+	u16 cq_prod_idx;
+	u16 cq_cons_idx;
+	u32 cqe_left;
+	u32 cqe_size;
+	u32 cqe_exp_seq_sn;
+
+	void *cq_pgtbl_virt;
+	dma_addr_t cq_pgtbl_phys;
+	u32 cq_pgtbl_size;	/* set to PAGE_SIZE for 5708 & 5709 */
+
+	struct rqe *rq_virt;
+	dma_addr_t rq_phys;
+	u32 rq_mem_size;
+
+	struct rqe *rq_prod_qe;
+	struct rqe *rq_cons_qe;
+	struct rqe *rq_first_qe;
+	struct rqe *rq_last_qe;
+	u16 rq_prod_idx;
+	u16 rq_cons_idx;
+	u32 rqe_left;
+
+	void *rq_pgtbl_virt;
+	dma_addr_t rq_pgtbl_phys;
+	u32 rq_pgtbl_size;	/* set to PAGE_SIZE for 5708 & 5709 */
+};
+
+
+
+/*
+ * CID handles
+ */
+struct ep_handles {
+	u32 fw_cid;
+	u32 drv_iscsi_cid;
+	u16 pg_cid;
+	u16 rsvd;
+};
+
+
+enum {
+	EP_STATE_IDLE                   = 0x0,
+	EP_STATE_PG_OFLD_START          = 0x1,
+	EP_STATE_PG_OFLD_COMPL          = 0x2,
+	EP_STATE_OFLD_START             = 0x4,
+	EP_STATE_OFLD_COMPL             = 0x8,
+	EP_STATE_CONNECT_START          = 0x10,
+	EP_STATE_CONNECT_COMPL          = 0x20,
+	EP_STATE_ULP_UPDATE_START       = 0x40,
+	EP_STATE_ULP_UPDATE_COMPL       = 0x80,
+	EP_STATE_DISCONN_START          = 0x100,
+	EP_STATE_DISCONN_COMPL          = 0x200,
+	EP_STATE_CLEANUP_START          = 0x400,
+	EP_STATE_CLEANUP_CMPL           = 0x800,
+	EP_STATE_TCP_FIN_RCVD           = 0x1000,
+	EP_STATE_TCP_RST_RCVD           = 0x2000,
+	EP_STATE_PG_OFLD_FAILED         = 0x1000000,
+	EP_STATE_ULP_UPDATE_FAILED      = 0x2000000,
+	EP_STATE_CLEANUP_FAILED         = 0x4000000,
+	EP_STATE_OFLD_FAILED            = 0x8000000,
+	EP_STATE_CONNECT_FAILED         = 0x10000000,
+	EP_STATE_DISCONN_TIMEDOUT       = 0x20000000,
+};
+
+/**
+ * struct bnx2i_endpoint - representation of tcp connection in NX2 world
+ *
+ * @link:               list head to link elements
+ * @hba:                adapter to which this connection belongs
+ * @conn:               iscsi connection this EP is linked to
+ * @sess:               iscsi session this EP is linked to
+ * @cm_sk:              cnic sock struct
+ * @hba_age:            age to detect if 'iscsid' issues ep_disconnect()
+ *                      after HBA reset is completed by bnx2i/cnic/bnx2
+ *                      modules
+ * @state:              tracks offload connection state machine
+ * @teardown_mode:      indicates if conn teardown is abortive or orderly
+ * @qp:                 QP information
+ * @ids:                contains chip allocated *context id* & driver assigned
+ *                      *iscsi cid*
+ * @ofld_timer:         offload timer to detect timeout
+ * @ofld_wait:          wait queue
+ *
+ * Endpoint Structure - equivalent of tcp socket structure
+ */
+struct bnx2i_endpoint {
+	struct list_head link;
+	struct bnx2i_hba *hba;
+	struct bnx2i_conn *conn;
+	struct cnic_sock *cm_sk;
+	u32 hba_age;
+	u32 state;
+	unsigned long timestamp;
+	int num_active_cmds;
+
+	struct qp_info qp;
+	struct ep_handles ids;
+		#define ep_iscsi_cid	ids.drv_iscsi_cid
+		#define ep_cid		ids.fw_cid
+		#define ep_pg_cid	ids.pg_cid
+	struct timer_list ofld_timer;
+	wait_queue_head_t ofld_wait;
+};
+
+
+
+/* Global variables */
+extern unsigned int error_mask1, error_mask2;
+extern u64 iscsi_error_mask;
+extern unsigned int en_tcp_dack;
+extern unsigned int event_coal_div;
+
+extern struct scsi_transport_template *bnx2i_scsi_xport_template;
+extern struct iscsi_transport bnx2i_iscsi_transport;
+extern struct cnic_ulp_ops bnx2i_cnic_cb;
+
+extern unsigned int sq_size;
+extern unsigned int rq_size;
+
+extern struct device_attribute *bnx2i_dev_attributes[];
+
+
+
+/*
+ * Function Prototypes
+ */
+extern void bnx2i_identify_device(struct bnx2i_hba *hba);
+extern void bnx2i_register_device(struct bnx2i_hba *hba);
+
+extern void bnx2i_ulp_init(struct cnic_dev *dev);
+extern void bnx2i_ulp_exit(struct cnic_dev *dev);
+extern void bnx2i_start(void *handle);
+extern void bnx2i_stop(void *handle);
+extern void bnx2i_reg_dev_all(void);
+extern void bnx2i_unreg_dev_all(void);
+extern struct bnx2i_hba *get_adapter_list_head(void);
+
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+					  u16 iscsi_cid);
+
+int bnx2i_alloc_ep_pool(void);
+void bnx2i_release_ep_pool(void);
+struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
+struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
+
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
+
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
+void bnx2i_free_hba(struct bnx2i_hba *hba);
+
+void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
+void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
+
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
+
+void bnx2i_drop_session(struct iscsi_cls_session *session);
+
+extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
+extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
+				  struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
+				  struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
+				    struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
+				   struct iscsi_task *mtask, u32 ttt,
+				   char *datap, int data_len, int unsol);
+extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
+				   struct iscsi_task *mtask);
+extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
+				       struct bnx2i_cmd *cmd);
+extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
+				     struct bnx2i_endpoint *ep);
+extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
+extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+				    struct bnx2i_endpoint *ep);
+
+extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
+			       struct bnx2i_endpoint *ep);
+extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
+			       struct bnx2i_endpoint *ep);
+extern void bnx2i_ep_ofld_timer(unsigned long data);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
+		struct bnx2i_hba *hba, u32 iscsi_cid);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
+		struct bnx2i_hba *hba, u32 iscsi_cid);
+
+extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
+extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
+
+/* Debug related function prototypes */
+extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
+
+#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644
index 00000000000..906cef5cda8
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -0,0 +1,2405 @@
+/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+/**
+ * bnx2i_get_cid_num - get cid from ep
+ * @ep: 	endpoint pointer
+ *
+ * Only applicable to 57710 family of devices
+ */
+static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
+{
+	u32 cid;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		cid = ep->ep_cid;
+	else
+		cid = GET_CID_NUM(ep->ep_cid);
+	return cid;
+}
+
+
+/**
+ * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
+ * @hba: 		Adapter for which adjustments is to be made
+ *
+ * Only applicable to 57710 family of devices
+ */
+static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
+{
+	u32 num_elements_per_pg;
+
+	if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
+	    test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
+	    test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+		if (!is_power_of_2(hba->max_sqes))
+			hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
+
+		if (!is_power_of_2(hba->max_rqes))
+			hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
+	}
+
+	/* Adjust each queue size if the user selection does not
+	 * yield integral num of page buffers
+	 */
+	/* adjust SQ */
+	num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+	if (hba->max_sqes < num_elements_per_pg)
+		hba->max_sqes = num_elements_per_pg;
+	else if (hba->max_sqes % num_elements_per_pg)
+		hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
+				 ~(num_elements_per_pg - 1);
+
+	/* adjust CQ */
+	num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+	if (hba->max_cqes < num_elements_per_pg)
+		hba->max_cqes = num_elements_per_pg;
+	else if (hba->max_cqes % num_elements_per_pg)
+		hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
+				 ~(num_elements_per_pg - 1);
+
+	/* adjust RQ */
+	num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+	if (hba->max_rqes < num_elements_per_pg)
+		hba->max_rqes = num_elements_per_pg;
+	else if (hba->max_rqes % num_elements_per_pg)
+		hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
+				 ~(num_elements_per_pg - 1);
+}
+
+
+/**
+ * bnx2i_get_link_state - get network interface link state
+ * @hba:	adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+static void bnx2i_get_link_state(struct bnx2i_hba *hba)
+{
+	if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+		set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+	else
+		clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_iscsi_license_error - displays iscsi license related error message
+ * @hba:		adapter instance pointer
+ * @error_code:		error classification
+ *
+ * Puts out an error log when driver is unable to offload iscsi connection
+ *	due to license restrictions
+ */
+static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
+{
+	if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
+		/* iSCSI offload not supported on this device */
+		printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
+				hba->netdev->name);
+	if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
+		/* iSCSI offload not supported on this LOM device */
+		printk(KERN_ERR "bnx2i: LOM is not enable to "
+				"offload iSCSI connections, dev=%s\n",
+				hba->netdev->name);
+	set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
+ * @ep:		endpoint (transport indentifier) structure
+ * @action:	action, ARM or DISARM. For now only ARM_CQE is used
+ *
+ * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
+ *	the driver. EQ event is generated CQ index is hit or at least 1 CQ is
+ *	outstanding and on chip timer expires
+ */
+void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
+{
+	struct bnx2i_5771x_cq_db *cq_db;
+	u16 cq_index;
+
+	if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		return;
+
+	if (action == CNIC_ARM_CQE) {
+		cq_index = ep->qp.cqe_exp_seq_sn +
+			   ep->num_active_cmds / event_coal_div;
+		cq_index %= (ep->qp.cqe_size * 2 + 1);
+		if (!cq_index) {
+			cq_index = 1;
+			cq_db = (struct bnx2i_5771x_cq_db *)
+					ep->qp.cq_pgtbl_virt;
+			cq_db->sqn[0] = cq_index;
+		}
+	}
+}
+
+
+/**
+ * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
+ * @conn:		iscsi connection on which RQ event occured
+ * @ptr:		driver buffer to which RQ buffer contents is to
+ *			be copied
+ * @len:		length of valid data inside RQ buf
+ *
+ * Copies RQ buffer contents from shared (DMA'able) memory region to
+ *	driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
+ *	scsi sense info
+ */
+void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
+{
+	if (!bnx2i_conn->ep->qp.rqe_left)
+		return;
+
+	bnx2i_conn->ep->qp.rqe_left--;
+	memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
+	if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
+		bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
+		bnx2i_conn->ep->qp.rq_cons_idx = 0;
+	} else {
+		bnx2i_conn->ep->qp.rq_cons_qe++;
+		bnx2i_conn->ep->qp.rq_cons_idx++;
+	}
+}
+
+
+static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
+{
+	struct bnx2i_5771x_dbell dbell;
+	u32 msg;
+
+	memset(&dbell, 0, sizeof(dbell));
+	dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
+			      B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
+	msg = *((u32 *)&dbell);
+	/* TODO : get doorbell register mapping */
+	writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
+}
+
+
+/**
+ * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
+ * @conn:	iscsi connection on which event to post
+ * @count:	number of RQ buffer being posted to chip
+ *
+ * No need to ring hardware doorbell for 57710 family of devices
+ */
+void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
+{
+	struct bnx2i_5771x_sq_rq_db *rq_db;
+	u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
+	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+	ep->qp.rqe_left += count;
+	ep->qp.rq_prod_idx &= 0x7FFF;
+	ep->qp.rq_prod_idx += count;
+
+	if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
+		ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
+		if (!hi_bit)
+			ep->qp.rq_prod_idx |= 0x8000;
+	} else
+		ep->qp.rq_prod_idx |= hi_bit;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
+		rq_db->prod_idx = ep->qp.rq_prod_idx;
+		/* no need to ring hardware doorbell for 57710 */
+	} else {
+		writew(ep->qp.rq_prod_idx,
+		       ep->qp.ctx_base + CNIC_RECV_DOORBELL);
+	}
+	mmiowb();
+}
+
+
+/**
+ * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
+ * @conn: 		iscsi connection to which new SQ entries belong
+ * @count: 		number of SQ WQEs to post
+ *
+ * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
+ *	of devices. For 5706/5708/5709 new SQ WQE count is written into the
+ *	doorbell register
+ */
+static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
+{
+	struct bnx2i_5771x_sq_rq_db *sq_db;
+	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+	ep->num_active_cmds++;
+	wmb();	/* flush SQ WQE memory before the doorbell is rung */
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
+		sq_db->prod_idx = ep->qp.sq_prod_idx;
+		bnx2i_ring_577xx_doorbell(bnx2i_conn);
+	} else
+		writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
+
+	mmiowb(); /* flush posted PCI writes */
+}
+
+
+/**
+ * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
+ * @conn:	iscsi connection to which new SQ entries belong
+ * @count:	number of SQ WQEs to post
+ *
+ * this routine will update SQ driver parameters and ring the doorbell
+ */
+static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
+					      int count)
+{
+	int tmp_cnt;
+
+	if (count == 1) {
+		if (bnx2i_conn->ep->qp.sq_prod_qe ==
+		    bnx2i_conn->ep->qp.sq_last_qe)
+			bnx2i_conn->ep->qp.sq_prod_qe =
+						bnx2i_conn->ep->qp.sq_first_qe;
+		else
+			bnx2i_conn->ep->qp.sq_prod_qe++;
+	} else {
+		if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
+		    bnx2i_conn->ep->qp.sq_last_qe)
+			bnx2i_conn->ep->qp.sq_prod_qe += count;
+		else {
+			tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
+				bnx2i_conn->ep->qp.sq_prod_qe;
+			bnx2i_conn->ep->qp.sq_prod_qe =
+				&bnx2i_conn->ep->qp.sq_first_qe[count -
+								(tmp_cnt + 1)];
+		}
+	}
+	bnx2i_conn->ep->qp.sq_prod_idx += count;
+	/* Ring the doorbell */
+	bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
+}
+
+
+/**
+ * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
+ * @conn:	iscsi connection
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
+			   struct iscsi_task *task)
+{
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_login_request *login_wqe;
+	struct iscsi_login *login_hdr;
+	u32 dword;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+	login_hdr = (struct iscsi_login *)task->hdr;
+	login_wqe = (struct bnx2i_login_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+
+	login_wqe->op_code = login_hdr->opcode;
+	login_wqe->op_attr = login_hdr->flags;
+	login_wqe->version_max = login_hdr->max_version;
+	login_wqe->version_min = login_hdr->min_version;
+	login_wqe->data_length = ntoh24(login_hdr->dlength);
+	login_wqe->isid_lo = *((u32 *) login_hdr->isid);
+	login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
+	login_wqe->tsih = login_hdr->tsih;
+	login_wqe->itt = task->itt |
+		(ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
+	login_wqe->cid = login_hdr->cid;
+
+	login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+	login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+
+	login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+	login_wqe->resp_bd_list_addr_hi =
+		(u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+	dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
+		 (bnx2i_conn->gen_pdu.resp_buf_size <<
+		  ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+	login_wqe->resp_buffer = dword;
+	login_wqe->flags = 0;
+	login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+	login_wqe->bd_list_addr_hi =
+		(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+	login_wqe->num_bds = 1;
+	login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
+ * @conn:	iscsi connection
+ * @mtask:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
+			 struct iscsi_task *mtask)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_tm *tmfabort_hdr;
+	struct scsi_cmnd *ref_sc;
+	struct iscsi_task *ctask;
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_tmf_request *tmfabort_wqe;
+	u32 dword;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+	tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
+	tmfabort_wqe = (struct bnx2i_tmf_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+
+	tmfabort_wqe->op_code = tmfabort_hdr->opcode;
+	tmfabort_wqe->op_attr = 0;
+	tmfabort_wqe->op_attr =
+		ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
+	tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
+	tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
+
+	tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
+	tmfabort_wqe->reserved2 = 0;
+	tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
+
+	ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
+	if (!ctask || ctask->sc)
+		/*
+		 * the iscsi layer must have completed the cmd while this
+		 * was starting up.
+		 */
+		return 0;
+	ref_sc = ctask->sc;
+
+	if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
+		dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+	else
+		dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+	tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
+	tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
+
+	tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+	tmfabort_wqe->bd_list_addr_hi = (u32)
+				((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+	tmfabort_wqe->num_bds = 1;
+	tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
+ * @conn:	iscsi connection
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
+			     struct bnx2i_cmd *cmd)
+{
+	struct bnx2i_cmd_request *scsi_cmd_wqe;
+
+	scsi_cmd_wqe = (struct bnx2i_cmd_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+	memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
+	scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
+ * @conn:		iscsi connection
+ * @cmd:		driver command structure which is requesting
+ *			a WQE to sent to chip for further processing
+ * @ttt:		TTT to be used when building pdu header
+ * @datap:		payload buffer pointer
+ * @data_len:		payload data length
+ * @unsol:		indicated whether nopout pdu is unsolicited pdu or
+ *			in response to target's NOPIN w/ TTT != FFFFFFFF
+ *
+ * prepare and post a nopout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
+			    struct iscsi_task *task, u32 ttt,
+			    char *datap, int data_len, int unsol)
+{
+	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_nop_out_request *nopout_wqe;
+	struct iscsi_nopout *nopout_hdr;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+	nopout_hdr = (struct iscsi_nopout *)task->hdr;
+	nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+	nopout_wqe->op_code = nopout_hdr->opcode;
+	nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
+	memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		u32 tmp = nopout_hdr->lun[0];
+		/* 57710 requires LUN field to be swapped */
+		nopout_hdr->lun[0] = nopout_hdr->lun[1];
+		nopout_hdr->lun[1] = tmp;
+	}
+
+	nopout_wqe->itt = ((u16)task->itt |
+			   (ISCSI_TASK_TYPE_MPATH <<
+			    ISCSI_TMF_REQUEST_TYPE_SHIFT));
+	nopout_wqe->ttt = ttt;
+	nopout_wqe->flags = 0;
+	if (!unsol)
+		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+	else if (nopout_hdr->itt == RESERVED_ITT)
+		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+
+	nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+	nopout_wqe->data_length = data_len;
+	if (data_len) {
+		/* handle payload data, not required in first release */
+		printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
+	} else {
+		nopout_wqe->bd_list_addr_lo = (u32)
+					bnx2i_conn->hba->mp_bd_dma;
+		nopout_wqe->bd_list_addr_hi =
+			(u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+		nopout_wqe->num_bds = 1;
+	}
+	nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+
+/**
+ * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
+ * @conn:	iscsi connection
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post logout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
+			    struct iscsi_task *task)
+{
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_logout_request *logout_wqe;
+	struct iscsi_logout *logout_hdr;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+	logout_hdr = (struct iscsi_logout *)task->hdr;
+
+	logout_wqe = (struct bnx2i_logout_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+	memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
+
+	logout_wqe->op_code = logout_hdr->opcode;
+	logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+	logout_wqe->op_attr =
+			logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
+	logout_wqe->itt = ((u16)task->itt |
+			   (ISCSI_TASK_TYPE_MPATH <<
+			    ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
+	logout_wqe->data_length = 0;
+	logout_wqe->cid = 0;
+
+	logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+	logout_wqe->bd_list_addr_hi = (u32)
+				((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+	logout_wqe->num_bds = 1;
+	logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+
+/**
+ * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
+ * @conn:	iscsi connection which requires iscsi parameter update
+ *
+ * sends down iSCSI Conn Update request to move iSCSI conn to FFP
+ */
+void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
+{
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct bnx2i_hba *hba = bnx2i_conn->hba;
+	struct kwqe *kwqe_arr[2];
+	struct iscsi_kwqe_conn_update *update_wqe;
+	struct iscsi_kwqe_conn_update conn_update_kwqe;
+
+	update_wqe = &conn_update_kwqe;
+
+	update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
+	update_wqe->hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	/* 5771x requires conn context id to be passed as is */
+	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
+		update_wqe->context_id = bnx2i_conn->ep->ep_cid;
+	else
+		update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
+	update_wqe->conn_flags = 0;
+	if (conn->hdrdgst_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
+	if (conn->datadgst_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
+	if (conn->session->initial_r2t_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
+	if (conn->session->imm_data_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
+
+	update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
+	update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
+	update_wqe->first_burst_length = conn->session->first_burst;
+	update_wqe->max_burst_length = conn->session->max_burst;
+	update_wqe->exp_stat_sn = conn->exp_statsn;
+	update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
+	update_wqe->session_error_recovery_level = conn->session->erl;
+	iscsi_conn_printk(KERN_ALERT, conn,
+			  "bnx2i: conn update - MBL 0x%x FBL 0x%x"
+			  "MRDSL_I 0x%x MRDSL_T 0x%x \n",
+			  update_wqe->max_burst_length,
+			  update_wqe->first_burst_length,
+			  update_wqe->max_recv_pdu_length,
+			  update_wqe->max_send_pdu_length);
+
+	kwqe_arr[0] = (struct kwqe *) update_wqe;
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
+ * @data:	endpoint (transport handle) structure pointer
+ *
+ * routine to handle connection offload/destroy request timeout
+ */
+void bnx2i_ep_ofld_timer(unsigned long data)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
+
+	if (ep->state == EP_STATE_OFLD_START) {
+		printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
+		ep->state = EP_STATE_OFLD_FAILED;
+	} else if (ep->state == EP_STATE_DISCONN_START) {
+		printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
+		ep->state = EP_STATE_DISCONN_TIMEDOUT;
+	} else if (ep->state == EP_STATE_CLEANUP_START) {
+		printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
+		ep->state = EP_STATE_CLEANUP_FAILED;
+	}
+
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+static int bnx2i_power_of2(u32 val)
+{
+	u32 power = 0;
+	if (val & (val - 1))
+		return power;
+	val--;
+	while (val) {
+		val = val >> 1;
+		power++;
+	}
+	return power;
+}
+
+
+/**
+ * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
+ * @hba:	adapter structure pointer
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+	struct bnx2i_cleanup_request *cmd_cleanup;
+
+	cmd_cleanup =
+		(struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
+	memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
+
+	cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
+	cmd_cleanup->itt = cmd->req.itt;
+	cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
+}
+
+
+/**
+ * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
+ * @hba:	adapter structure pointer
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
+ * 	iscsi connection context clean-up process
+ */
+void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	struct kwqe *kwqe_arr[2];
+	struct iscsi_kwqe_conn_destroy conn_cleanup;
+
+	memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
+
+	conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
+	conn_cleanup.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+	/* 5771x requires conn context id to be passed as is */
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		conn_cleanup.context_id = ep->ep_cid;
+	else
+		conn_cleanup.context_id = (ep->ep_cid >> 7);
+
+	conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
+
+	kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
+ * @hba: 		adapter structure pointer
+ * @ep: 		endpoint (transport indentifier) structure
+ *
+ * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					  struct bnx2i_endpoint *ep)
+{
+	struct kwqe *kwqe_arr[2];
+	struct iscsi_kwqe_conn_offload1 ofld_req1;
+	struct iscsi_kwqe_conn_offload2 ofld_req2;
+	dma_addr_t dma_addr;
+	int num_kwqes = 2;
+	u32 *ptbl;
+
+	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+	ofld_req1.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+	dma_addr = ep->qp.sq_pgtbl_phys;
+	ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	dma_addr = ep->qp.cq_pgtbl_phys;
+	ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+	ofld_req2.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	dma_addr = ep->qp.rq_pgtbl_phys;
+	ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+
+	ofld_req2.sq_first_pte.hi = *ptbl++;
+	ofld_req2.sq_first_pte.lo = *ptbl;
+
+	ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+	ofld_req2.cq_first_pte.hi = *ptbl++;
+	ofld_req2.cq_first_pte.lo = *ptbl;
+
+	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+	ofld_req2.num_additional_wqes = 0;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+
+/**
+ * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
+ * @hba: 		adapter structure pointer
+ * @ep: 		endpoint (transport indentifier) structure
+ *
+ * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					   struct bnx2i_endpoint *ep)
+{
+	struct kwqe *kwqe_arr[5];
+	struct iscsi_kwqe_conn_offload1 ofld_req1;
+	struct iscsi_kwqe_conn_offload2 ofld_req2;
+	struct iscsi_kwqe_conn_offload3 ofld_req3[1];
+	dma_addr_t dma_addr;
+	int num_kwqes = 2;
+	u32 *ptbl;
+
+	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+	ofld_req1.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+	dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
+	ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
+	ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+	ofld_req2.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
+	ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+	ofld_req2.sq_first_pte.hi = *ptbl++;
+	ofld_req2.sq_first_pte.lo = *ptbl;
+
+	ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+	ofld_req2.cq_first_pte.hi = *ptbl++;
+	ofld_req2.cq_first_pte.lo = *ptbl;
+
+	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+
+	ofld_req2.num_additional_wqes = 1;
+	memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
+	ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+	ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
+	ofld_req3[0].qp_first_pte[0].lo = *ptbl;
+
+	kwqe_arr[2] = (struct kwqe *) ofld_req3;
+	/* need if we decide to go with multiple KCQE's per conn */
+	num_kwqes += 1;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+/**
+ * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
+ *
+ * @hba: 		adapter structure pointer
+ * @ep: 		endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+		bnx2i_5771x_send_conn_ofld_req(hba, ep);
+	else
+		bnx2i_570x_send_conn_ofld_req(hba, ep);
+}
+
+
+/**
+ * setup_qp_page_tables - iscsi QP page table setup function
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
+ * 	64-bit address in big endian format. Whereas 10G/sec (57710) requires
+ * 	PT in little endian format
+ */
+static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
+{
+	int num_pages;
+	u32 *ptbl;
+	dma_addr_t page;
+	int cnic_dev_10g;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		cnic_dev_10g = 1;
+	else
+		cnic_dev_10g = 0;
+
+	/* SQ page table */
+	memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
+	num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+	page = ep->qp.sq_phys;
+
+	if (cnic_dev_10g)
+		ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+	else
+		ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+	while (num_pages--) {
+		if (cnic_dev_10g) {
+			/* PTE is written in little endian format for 57710 */
+			*ptbl = (u32) page;
+			ptbl++;
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			page += PAGE_SIZE;
+		} else {
+			/* PTE is written in big endian format for
+			 * 5706/5708/5709 devices */
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			*ptbl = (u32) page;
+			ptbl++;
+			page += PAGE_SIZE;
+		}
+	}
+
+	/* RQ page table */
+	memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
+	num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+	page = ep->qp.rq_phys;
+
+	if (cnic_dev_10g)
+		ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+	else
+		ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
+	while (num_pages--) {
+		if (cnic_dev_10g) {
+			/* PTE is written in little endian format for 57710 */
+			*ptbl = (u32) page;
+			ptbl++;
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			page += PAGE_SIZE;
+		} else {
+			/* PTE is written in big endian format for
+			 * 5706/5708/5709 devices */
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			*ptbl = (u32) page;
+			ptbl++;
+			page += PAGE_SIZE;
+		}
+	}
+
+	/* CQ page table */
+	memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
+	num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+	page = ep->qp.cq_phys;
+
+	if (cnic_dev_10g)
+		ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+	else
+		ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+	while (num_pages--) {
+		if (cnic_dev_10g) {
+			/* PTE is written in little endian format for 57710 */
+			*ptbl = (u32) page;
+			ptbl++;
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			page += PAGE_SIZE;
+		} else {
+			/* PTE is written in big endian format for
+			 * 5706/5708/5709 devices */
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			*ptbl = (u32) page;
+			ptbl++;
+			page += PAGE_SIZE;
+		}
+	}
+}
+
+
+/**
+ * bnx2i_alloc_qp_resc - allocates required resources for QP.
+ * @hba:	adapter structure pointer
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
+ *	memory for SQ/RQ/CQ and page tables. EP structure elements such
+ *	as producer/consumer indexes/pointers, queue sizes and page table
+ *	contents are setup
+ */
+int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	struct bnx2i_5771x_cq_db *cq_db;
+
+	ep->hba = hba;
+	ep->conn = NULL;
+	ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
+
+	/* Allocate page table memory for SQ which is page aligned */
+	ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
+	ep->qp.sq_mem_size =
+		(ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	ep->qp.sq_pgtbl_size =
+		(ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+	ep->qp.sq_pgtbl_size =
+		(ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	ep->qp.sq_pgtbl_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+				   &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
+	if (!ep->qp.sq_pgtbl_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
+				  ep->qp.sq_pgtbl_size);
+		goto mem_alloc_err;
+	}
+
+	/* Allocate memory area for actual SQ element */
+	ep->qp.sq_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+				   &ep->qp.sq_phys, GFP_KERNEL);
+	if (!ep->qp.sq_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
+				  ep->qp.sq_mem_size);
+		goto mem_alloc_err;
+	}
+
+	memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
+	ep->qp.sq_first_qe = ep->qp.sq_virt;
+	ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
+	ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
+	ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
+	ep->qp.sq_prod_idx = 0;
+	ep->qp.sq_cons_idx = 0;
+	ep->qp.sqe_left = hba->max_sqes;
+
+	/* Allocate page table memory for CQ which is page aligned */
+	ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
+	ep->qp.cq_mem_size =
+		(ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	ep->qp.cq_pgtbl_size =
+		(ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+	ep->qp.cq_pgtbl_size =
+		(ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	ep->qp.cq_pgtbl_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+				   &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
+	if (!ep->qp.cq_pgtbl_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
+				  ep->qp.cq_pgtbl_size);
+		goto mem_alloc_err;
+	}
+
+	/* Allocate memory area for actual CQ element */
+	ep->qp.cq_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+				   &ep->qp.cq_phys, GFP_KERNEL);
+	if (!ep->qp.cq_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
+				  ep->qp.cq_mem_size);
+		goto mem_alloc_err;
+	}
+	memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
+
+	ep->qp.cq_first_qe = ep->qp.cq_virt;
+	ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
+	ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
+	ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
+	ep->qp.cq_prod_idx = 0;
+	ep->qp.cq_cons_idx = 0;
+	ep->qp.cqe_left = hba->max_cqes;
+	ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+	ep->qp.cqe_size = hba->max_cqes;
+
+	/* Invalidate all EQ CQE index, req only for 57710 */
+	cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+	memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
+
+	/* Allocate page table memory for RQ which is page aligned */
+	ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
+	ep->qp.rq_mem_size =
+		(ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	ep->qp.rq_pgtbl_size =
+		(ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+	ep->qp.rq_pgtbl_size =
+		(ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	ep->qp.rq_pgtbl_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+				   &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
+	if (!ep->qp.rq_pgtbl_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
+				  ep->qp.rq_pgtbl_size);
+		goto mem_alloc_err;
+	}
+
+	/* Allocate memory area for actual RQ element */
+	ep->qp.rq_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+				   &ep->qp.rq_phys, GFP_KERNEL);
+	if (!ep->qp.rq_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
+				  ep->qp.rq_mem_size);
+		goto mem_alloc_err;
+	}
+
+	ep->qp.rq_first_qe = ep->qp.rq_virt;
+	ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
+	ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
+	ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
+	ep->qp.rq_prod_idx = 0x8000;
+	ep->qp.rq_cons_idx = 0;
+	ep->qp.rqe_left = hba->max_rqes;
+
+	setup_qp_page_tables(ep);
+
+	return 0;
+
+mem_alloc_err:
+	bnx2i_free_qp_resc(hba, ep);
+	return -ENOMEM;
+}
+
+
+
+/**
+ * bnx2i_free_qp_resc - free memory resources held by QP
+ * @hba:	adapter structure pointer
+ * @ep:	endpoint (transport indentifier) structure
+ *
+ * Free QP resources - SQ/RQ/CQ memory and page tables.
+ */
+void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	if (ep->qp.ctx_base) {
+		iounmap(ep->qp.ctx_base);
+		ep->qp.ctx_base = NULL;
+	}
+	/* Free SQ mem */
+	if (ep->qp.sq_pgtbl_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+				  ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
+		ep->qp.sq_pgtbl_virt = NULL;
+		ep->qp.sq_pgtbl_phys = 0;
+	}
+	if (ep->qp.sq_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+				  ep->qp.sq_virt, ep->qp.sq_phys);
+		ep->qp.sq_virt = NULL;
+		ep->qp.sq_phys = 0;
+	}
+
+	/* Free RQ mem */
+	if (ep->qp.rq_pgtbl_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+				  ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
+		ep->qp.rq_pgtbl_virt = NULL;
+		ep->qp.rq_pgtbl_phys = 0;
+	}
+	if (ep->qp.rq_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+				  ep->qp.rq_virt, ep->qp.rq_phys);
+		ep->qp.rq_virt = NULL;
+		ep->qp.rq_phys = 0;
+	}
+
+	/* Free CQ mem */
+	if (ep->qp.cq_pgtbl_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+				  ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
+		ep->qp.cq_pgtbl_virt = NULL;
+		ep->qp.cq_pgtbl_phys = 0;
+	}
+	if (ep->qp.cq_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+				  ep->qp.cq_virt, ep->qp.cq_phys);
+		ep->qp.cq_virt = NULL;
+		ep->qp.cq_phys = 0;
+	}
+}
+
+
+/**
+ * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
+ * @hba:	adapter structure pointer
+ *
+ * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
+ * 	This results in iSCSi support validation and on-chip context manager
+ * 	initialization.  Firmware completes this handshake with a CQE carrying
+ * 	the result of iscsi support validation. Parameter carried by
+ * 	iscsi init request determines the number of offloaded connection and
+ * 	tolerance level for iscsi protocol violation this hba/chip can support
+ */
+int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+{
+	struct kwqe *kwqe_arr[3];
+	struct iscsi_kwqe_init1 iscsi_init;
+	struct iscsi_kwqe_init2 iscsi_init2;
+	int rc = 0;
+	u64 mask64;
+
+	bnx2i_adjust_qp_size(hba);
+
+	iscsi_init.flags =
+		ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+	if (en_tcp_dack)
+		iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
+	iscsi_init.reserved0 = 0;
+	iscsi_init.num_cqs = 1;
+	iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
+	iscsi_init.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+	iscsi_init.dummy_buffer_addr_hi =
+		(u32) ((u64) hba->dummy_buf_dma >> 32);
+
+	hba->ctx_ccell_tasks =
+			((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
+	iscsi_init.num_ccells_per_conn = hba->num_ccell;
+	iscsi_init.num_tasks_per_conn = hba->max_sqes;
+	iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+	iscsi_init.sq_num_wqes = hba->max_sqes;
+	iscsi_init.cq_log_wqes_per_page =
+		(u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+	iscsi_init.cq_num_wqes = hba->max_cqes;
+	iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
+				   (PAGE_SIZE - 1)) / PAGE_SIZE;
+	iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
+				   (PAGE_SIZE - 1)) / PAGE_SIZE;
+	iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
+	iscsi_init.rq_num_wqes = hba->max_rqes;
+
+
+	iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
+	iscsi_init2.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+	iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
+	mask64 = 0x0ULL;
+	mask64 |= (
+		/* CISCO MDS */
+		(1UL <<
+		  ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
+		/* HP MSA1510i */
+		(1UL <<
+		  ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
+		/* EMC */
+		(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
+	if (error_mask1)
+		iscsi_init2.error_bit_map[0] = error_mask1;
+	else
+		iscsi_init2.error_bit_map[0] = (u32) mask64;
+
+	if (error_mask2)
+		iscsi_init2.error_bit_map[1] = error_mask2;
+	else
+		iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
+
+	iscsi_error_mask = mask64;
+
+	kwqe_arr[0] = (struct kwqe *) &iscsi_init;
+	kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
+	return rc;
+}
+
+
+/**
+ * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
+ * @conn:	iscsi connection
+ * @cqe:	pointer to newly DMA'ed CQE entry for processing
+ *
+ * process SCSI CMD Response CQE & complete the request to SCSI-ML
+ */
+static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+				       struct bnx2i_conn *bnx2i_conn,
+				       struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct bnx2i_cmd_response *resp_cqe;
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct iscsi_task *task;
+	struct iscsi_cmd_rsp *hdr;
+	u32 datalen = 0;
+
+	resp_cqe = (struct bnx2i_cmd_response *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+	if (!task)
+		goto fail;
+
+	bnx2i_cmd = task->dd_data;
+
+	if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
+		conn->datain_pdus_cnt +=
+			resp_cqe->task_stat.read_stat.num_data_outs;
+		conn->rxdata_octets +=
+			bnx2i_cmd->req.total_data_transfer_length;
+	} else {
+		conn->dataout_pdus_cnt +=
+			resp_cqe->task_stat.read_stat.num_data_outs;
+		conn->r2t_pdus_cnt +=
+			resp_cqe->task_stat.read_stat.num_r2ts;
+		conn->txdata_octets +=
+			bnx2i_cmd->req.total_data_transfer_length;
+	}
+	bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
+
+	hdr = (struct iscsi_cmd_rsp *)task->hdr;
+	resp_cqe = (struct bnx2i_cmd_response *)cqe;
+	hdr->opcode = resp_cqe->op_code;
+	hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
+	hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
+	hdr->response = resp_cqe->response;
+	hdr->cmd_status = resp_cqe->status;
+	hdr->flags = resp_cqe->response_flags;
+	hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
+
+	if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
+		goto done;
+
+	if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
+		datalen = resp_cqe->data_length;
+		if (datalen < 2)
+			goto done;
+
+		if (datalen > BNX2I_RQ_WQE_SIZE) {
+			iscsi_conn_printk(KERN_ERR, conn,
+					  "sense data len %d > RQ sz\n",
+					  datalen);
+			datalen = BNX2I_RQ_WQE_SIZE;
+		} else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+			iscsi_conn_printk(KERN_ERR, conn,
+					  "sense data len %d > conn data\n",
+					  datalen);
+			datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
+		}
+
+		bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
+		bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
+	}
+
+done:
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+			     conn->data, datalen);
+fail:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+
+/**
+ * bnx2i_process_login_resp - this function handles iscsi login response
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process Login Response CQE & complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_login_resp(struct iscsi_session *session,
+				    struct bnx2i_conn *bnx2i_conn,
+				    struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_login_response *login;
+	struct iscsi_login_rsp *resp_hdr;
+	int pld_len;
+	int pad_len;
+
+	login = (struct bnx2i_login_response *) cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = login->op_code;
+	resp_hdr->flags = login->response_flags;
+	resp_hdr->max_version = login->version_max;
+	resp_hdr->active_version = login->version_active;;
+	resp_hdr->hlength = 0;
+
+	hton24(resp_hdr->dlength, login->data_length);
+	memcpy(resp_hdr->isid, &login->isid_lo, 6);
+	resp_hdr->tsih = cpu_to_be16(login->tsih);
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->statsn = cpu_to_be32(login->stat_sn);
+	resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
+	resp_hdr->status_class = login->status_class;
+	resp_hdr->status_detail = login->status_detail;
+	pld_len = login->data_length;
+	bnx2i_conn->gen_pdu.resp_wr_ptr =
+					bnx2i_conn->gen_pdu.resp_buf + pld_len;
+
+	pad_len = 0;
+	if (pld_len & 0x3)
+		pad_len = 4 - (pld_len % 4);
+
+	if (pad_len) {
+		int i = 0;
+		for (i = 0; i < pad_len; i++) {
+			bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+			bnx2i_conn->gen_pdu.resp_wr_ptr++;
+		}
+	}
+
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+		bnx2i_conn->gen_pdu.resp_buf,
+		bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+/**
+ * bnx2i_process_tmf_resp - this function handles iscsi TMF response
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI TMF Response CQE and wake up the driver eh thread.
+ */
+static int bnx2i_process_tmf_resp(struct iscsi_session *session,
+				  struct bnx2i_conn *bnx2i_conn,
+				  struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_tmf_response *tmf_cqe;
+	struct iscsi_tm_rsp *resp_hdr;
+
+	tmf_cqe = (struct bnx2i_tmf_response *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = tmf_cqe->op_code;
+	resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
+	resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->response = tmf_cqe->response;
+
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+/**
+ * bnx2i_process_logout_resp - this function handles iscsi logout response
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Logout Response CQE & make function call to
+ * notify the user daemon.
+ */
+static int bnx2i_process_logout_resp(struct iscsi_session *session,
+				     struct bnx2i_conn *bnx2i_conn,
+				     struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_logout_response *logout;
+	struct iscsi_logout_rsp *resp_hdr;
+
+	logout = (struct bnx2i_logout_response *) cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = logout->op_code;
+	resp_hdr->flags = logout->response;
+	resp_hdr->hlength = 0;
+
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->statsn = task->hdr->exp_statsn;
+	resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
+
+	resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
+	resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
+
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+/**
+ * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI NOPIN local completion CQE, frees IIT and command structures
+ */
+static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
+					   struct bnx2i_conn *bnx2i_conn,
+					   struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct bnx2i_nop_in_msg *nop_in;
+	struct iscsi_task *task;
+
+	nop_in = (struct bnx2i_nop_in_msg *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
+	if (task)
+		iscsi_put_task(task);
+	spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
+ * @conn:	iscsi connection
+ *
+ * Firmware advances RQ producer index for every unsolicited PDU even if
+ *	payload data length is '0'. This function makes corresponding
+ *	adjustments on the driver side to match this f/w behavior
+ */
+static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
+{
+	char dummy_rq_data[2];
+	bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
+	bnx2i_put_rq_buf(bnx2i_conn, 1);
+}
+
+
+/**
+ * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI target's proactive iSCSI NOPIN request
+ */
+static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
+				     struct bnx2i_conn *bnx2i_conn,
+				     struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_nop_in_msg *nop_in;
+	struct iscsi_nopin *hdr;
+	u32 itt;
+	int tgt_async_nop = 0;
+
+	nop_in = (struct bnx2i_nop_in_msg *)cqe;
+	itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
+
+	spin_lock(&session->lock);
+	hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
+	memset(hdr, 0, sizeof(struct iscsi_hdr));
+	hdr->opcode = nop_in->op_code;
+	hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
+	hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
+	hdr->ttt = cpu_to_be32(nop_in->ttt);
+
+	if (itt == (u16) RESERVED_ITT) {
+		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+		hdr->itt = RESERVED_ITT;
+		tgt_async_nop = 1;
+		goto done;
+	}
+
+	/* this is a response to one of our nop-outs */
+	task = iscsi_itt_to_task(conn, itt);
+	if (task) {
+		hdr->flags = ISCSI_FLAG_CMD_FINAL;
+		hdr->itt = task->hdr->itt;
+		hdr->ttt = cpu_to_be32(nop_in->ttt);
+		memcpy(hdr->lun, nop_in->lun, 8);
+	}
+done:
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+	spin_unlock(&session->lock);
+
+	return tgt_async_nop;
+}
+
+
+/**
+ * bnx2i_process_async_mesg - this function handles iscsi async message
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI ASYNC Message
+ */
+static void bnx2i_process_async_mesg(struct iscsi_session *session,
+				     struct bnx2i_conn *bnx2i_conn,
+				     struct cqe *cqe)
+{
+	struct bnx2i_async_msg *async_cqe;
+	struct iscsi_async *resp_hdr;
+	u8 async_event;
+
+	bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+	async_cqe = (struct bnx2i_async_msg *)cqe;
+	async_event = async_cqe->async_event;
+
+	if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
+		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+				  "async: scsi events not supported\n");
+		return;
+	}
+
+	spin_lock(&session->lock);
+	resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = async_cqe->op_code;
+	resp_hdr->flags = 0x80;
+
+	memcpy(resp_hdr->lun, async_cqe->lun, 8);
+	resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
+
+	resp_hdr->async_event = async_cqe->async_event;
+	resp_hdr->async_vcode = async_cqe->async_vcode;
+
+	resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
+	resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
+	resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
+
+	__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
+			     (struct iscsi_hdr *)resp_hdr, NULL, 0);
+	spin_unlock(&session->lock);
+}
+
+
+/**
+ * bnx2i_process_reject_mesg - process iscsi reject pdu
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI REJECT message
+ */
+static void bnx2i_process_reject_mesg(struct iscsi_session *session,
+				      struct bnx2i_conn *bnx2i_conn,
+				      struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct bnx2i_reject_msg *reject;
+	struct iscsi_reject *hdr;
+
+	reject = (struct bnx2i_reject_msg *) cqe;
+	if (reject->data_length) {
+		bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
+		bnx2i_put_rq_buf(bnx2i_conn, 1);
+	} else
+		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+	spin_lock(&session->lock);
+	hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(hdr, 0, sizeof(struct iscsi_hdr));
+	hdr->opcode = reject->op_code;
+	hdr->reason = reject->reason;
+	hton24(hdr->dlength, reject->data_length);
+	hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
+	hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
+	hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
+			     reject->data_length);
+	spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process command cleanup response CQE during conn shutdown or error recovery
+ */
+static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
+					   struct bnx2i_conn *bnx2i_conn,
+					   struct cqe *cqe)
+{
+	struct bnx2i_cleanup_response *cmd_clean_rsp;
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+
+	cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+	if (!task)
+		printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
+			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+	spin_unlock(&session->lock);
+	complete(&bnx2i_conn->cmd_cleanup_cmpl);
+}
+
+
+
+/**
+ * bnx2i_process_new_cqes - process newly DMA'ed CQE's
+ * @bnx2i_conn:		iscsi connection
+ *
+ * this function is called by generic KCQ handler to process all pending CQE's
+ */
+static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_session *session = conn->session;
+	struct qp_info *qp = &bnx2i_conn->ep->qp;
+	struct bnx2i_nop_in_msg *nopin;
+	int tgt_async_msg;
+
+	while (1) {
+		nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
+		if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
+			break;
+
+		if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
+			break;
+
+		tgt_async_msg = 0;
+
+		switch (nopin->op_code) {
+		case ISCSI_OP_SCSI_CMD_RSP:
+		case ISCSI_OP_SCSI_DATA_IN:
+			bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
+						    qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_LOGIN_RSP:
+			bnx2i_process_login_resp(session, bnx2i_conn,
+						 qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_SCSI_TMFUNC_RSP:
+			bnx2i_process_tmf_resp(session, bnx2i_conn,
+					       qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_LOGOUT_RSP:
+			bnx2i_process_logout_resp(session, bnx2i_conn,
+						  qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_NOOP_IN:
+			if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
+						     qp->cq_cons_qe))
+				tgt_async_msg = 1;
+			break;
+		case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
+			bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
+						       qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_ASYNC_EVENT:
+			bnx2i_process_async_mesg(session, bnx2i_conn,
+						 qp->cq_cons_qe);
+			tgt_async_msg = 1;
+			break;
+		case ISCSI_OP_REJECT:
+			bnx2i_process_reject_mesg(session, bnx2i_conn,
+						  qp->cq_cons_qe);
+			break;
+		case ISCSI_OPCODE_CLEANUP_RESPONSE:
+			bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
+						       qp->cq_cons_qe);
+			break;
+		default:
+			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+					  nopin->op_code);
+		}
+
+		if (!tgt_async_msg)
+			bnx2i_conn->ep->num_active_cmds--;
+
+		/* clear out in production version only, till beta keep opcode
+		 * field intact, will be helpful in debugging (context dump)
+		 * nopin->op_code = 0;
+		 */
+		qp->cqe_exp_seq_sn++;
+		if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
+			qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+
+		if (qp->cq_cons_qe == qp->cq_last_qe) {
+			qp->cq_cons_qe = qp->cq_first_qe;
+			qp->cq_cons_idx = 0;
+		} else {
+			qp->cq_cons_qe++;
+			qp->cq_cons_idx++;
+		}
+	}
+	bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+}
+
+/**
+ * bnx2i_fastpath_notification - process global event queue (KCQ)
+ * @hba:		adapter structure pointer
+ * @new_cqe_kcqe:	pointer to newly DMA'ed KCQE entry
+ *
+ * Fast path event notification handler, KCQ entry carries context id
+ *	of the connection that has 1 or more pending CQ entries
+ */
+static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
+					struct iscsi_kcqe *new_cqe_kcqe)
+{
+	struct bnx2i_conn *conn;
+	u32 iscsi_cid;
+
+	iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
+	conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+	if (!conn) {
+		printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
+		return;
+	}
+	if (!conn->ep) {
+		printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
+		return;
+	}
+
+	bnx2i_process_new_cqes(conn);
+}
+
+
+/**
+ * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
+ */
+static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
+					   struct iscsi_kcqe *update_kcqe)
+{
+	struct bnx2i_conn *conn;
+	u32 iscsi_cid;
+
+	iscsi_cid = update_kcqe->iscsi_conn_id;
+	conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+	if (!conn) {
+		printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
+		return;
+	}
+	if (!conn->ep) {
+		printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
+		return;
+	}
+
+	if (update_kcqe->completion_status) {
+		printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
+		conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
+	} else
+		conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
+
+	wake_up_interruptible(&conn->ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_recovery_que_add_conn - add connection to recovery queue
+ * @hba:		adapter structure pointer
+ * @bnx2i_conn:		iscsi connection
+ *
+ * Add connection to recovery queue and schedule adapter eh worker
+ */
+static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
+					struct bnx2i_conn *bnx2i_conn)
+{
+	iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
+			   ISCSI_ERR_CONN_FAILED);
+}
+
+
+/**
+ * bnx2i_process_tcp_error - process error notification on a given connection
+ *
+ * @hba: 		adapter structure pointer
+ * @tcp_err: 		tcp error kcqe pointer
+ *
+ * handles tcp level error notifications from FW.
+ */
+static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
+				    struct iscsi_kcqe *tcp_err)
+{
+	struct bnx2i_conn *bnx2i_conn;
+	u32 iscsi_cid;
+
+	iscsi_cid = tcp_err->iscsi_conn_id;
+	bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+	if (!bnx2i_conn) {
+		printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+		return;
+	}
+
+	printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
+			  iscsi_cid, tcp_err->completion_status);
+	bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+}
+
+
+/**
+ * bnx2i_process_iscsi_error - process error notification on a given connection
+ * @hba:		adapter structure pointer
+ * @iscsi_err:		iscsi error kcqe pointer
+ *
+ * handles iscsi error notifications from the FW. Firmware based in initial
+ *	handshake classifies iscsi protocol / TCP rfc violation into either
+ *	warning or error indications. If indication is of "Error" type, driver
+ *	will initiate session recovery for that connection/session. For
+ *	"Warning" type indication, driver will put out a system log message
+ *	(there will be only one message for each type for the life of the
+ *	session, this is to avoid un-necessarily overloading the system)
+ */
+static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
+				      struct iscsi_kcqe *iscsi_err)
+{
+	struct bnx2i_conn *bnx2i_conn;
+	u32 iscsi_cid;
+	char warn_notice[] = "iscsi_warning";
+	char error_notice[] = "iscsi_error";
+	char additional_notice[64];
+	char *message;
+	int need_recovery;
+	u64 err_mask64;
+
+	iscsi_cid = iscsi_err->iscsi_conn_id;
+	bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+	if (!bnx2i_conn) {
+		printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+		return;
+	}
+
+	err_mask64 = (0x1ULL << iscsi_err->completion_status);
+
+	if (err_mask64 & iscsi_error_mask) {
+		need_recovery = 0;
+		message = warn_notice;
+	} else {
+		need_recovery = 1;
+		message = error_notice;
+	}
+
+	switch (iscsi_err->completion_status) {
+	case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
+		strcpy(additional_notice, "hdr digest err");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
+		strcpy(additional_notice, "data digest err");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
+		strcpy(additional_notice, "wrong opcode rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
+		strcpy(additional_notice, "AHS len > 0 rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
+		strcpy(additional_notice, "invalid ITT rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
+		strcpy(additional_notice, "wrong StatSN rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
+		strcpy(additional_notice, "wrong DataSN rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
+		strcpy(additional_notice, "pend R2T violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
+		strcpy(additional_notice, "ERL0, UO");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
+		strcpy(additional_notice, "ERL0, U1");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
+		strcpy(additional_notice, "ERL0, U2");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
+		strcpy(additional_notice, "ERL0, U3");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
+		strcpy(additional_notice, "ERL0, U4");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
+		strcpy(additional_notice, "ERL0, U5");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
+		strcpy(additional_notice, "ERL0, U6");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
+		strcpy(additional_notice, "invalid resi len");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
+		strcpy(additional_notice, "MRDSL violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
+		strcpy(additional_notice, "F-bit not set");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
+		strcpy(additional_notice, "invalid TTT");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
+		strcpy(additional_notice, "invalid DataSN");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
+		strcpy(additional_notice, "burst len violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
+		strcpy(additional_notice, "buf offset violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
+		strcpy(additional_notice, "invalid LUN field");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
+		strcpy(additional_notice, "invalid R2TSN field");
+		break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 	\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
+	case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
+		strcpy(additional_notice, "invalid cmd len1");
+		break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 	\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
+	case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
+		strcpy(additional_notice, "invalid cmd len2");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
+		strcpy(additional_notice,
+		       "pend r2t exceeds MaxOutstandingR2T value");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
+		strcpy(additional_notice, "TTT is rsvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
+		strcpy(additional_notice, "MBL violation");
+		break;
+#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO 	\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
+	case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
+		strcpy(additional_notice, "data seg len != 0");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
+		strcpy(additional_notice, "reject pdu len error");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
+		strcpy(additional_notice, "async pdu len error");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
+		strcpy(additional_notice, "nopin pdu len error");
+		break;
+#define BNX2_ERR_PEND_R2T_IN_CLEANUP			\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
+	case BNX2_ERR_PEND_R2T_IN_CLEANUP:
+		strcpy(additional_notice, "pend r2t in cleanup");
+		break;
+
+	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
+		strcpy(additional_notice, "IP fragments rcvd");
+		break;
+	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
+		strcpy(additional_notice, "IP options error");
+		break;
+	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
+		strcpy(additional_notice, "urgent flag error");
+		break;
+	default:
+		printk(KERN_ALERT "iscsi_err - unknown err %x\n",
+				  iscsi_err->completion_status);
+	}
+
+	if (need_recovery) {
+		iscsi_conn_printk(KERN_ALERT,
+				  bnx2i_conn->cls_conn->dd_data,
+				  "bnx2i: %s - %s\n",
+				  message, additional_notice);
+
+		iscsi_conn_printk(KERN_ALERT,
+				  bnx2i_conn->cls_conn->dd_data,
+				  "conn_err - hostno %d conn %p, "
+				  "iscsi_cid %x cid %x\n",
+				  bnx2i_conn->hba->shost->host_no,
+				  bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
+				  bnx2i_conn->ep->ep_cid);
+		bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+	} else
+		if (!test_and_set_bit(iscsi_err->completion_status,
+				      (void *) &bnx2i_conn->violation_notified))
+			iscsi_conn_printk(KERN_ALERT,
+					  bnx2i_conn->cls_conn->dd_data,
+					  "bnx2i: %s - %s\n",
+					  message, additional_notice);
+}
+
+
+/**
+ * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
+ * @hba:		adapter structure pointer
+ * @conn_destroy:	conn destroy kcqe pointer
+ *
+ * handles connection destroy completion request.
+ */
+static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
+					    struct iscsi_kcqe *conn_destroy)
+{
+	struct bnx2i_endpoint *ep;
+
+	ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
+	if (!ep) {
+		printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
+				  "offload request, unexpected complection\n");
+		return;
+	}
+
+	if (hba != ep->hba) {
+		printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+		return;
+	}
+
+	if (conn_destroy->completion_status) {
+		printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
+		ep->state = EP_STATE_CLEANUP_FAILED;
+	} else
+		ep->state = EP_STATE_CLEANUP_CMPL;
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
+ * @hba:		adapter structure pointer
+ * @ofld_kcqe:		conn offload kcqe pointer
+ *
+ * handles initial connection offload completion, ep_connect() thread is
+ *	woken-up to continue with LLP connect process
+ */
+static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
+				    struct iscsi_kcqe *ofld_kcqe)
+{
+	u32 cid_addr;
+	struct bnx2i_endpoint *ep;
+	u32 cid_num;
+
+	ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
+	if (!ep) {
+		printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
+		return;
+	}
+
+	if (hba != ep->hba) {
+		printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+		return;
+	}
+
+	if (ofld_kcqe->completion_status) {
+		if (ofld_kcqe->completion_status ==
+		    ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
+			printk(KERN_ALERT "bnx2i: unable to allocate"
+					  " iSCSI context resources\n");
+		ep->state = EP_STATE_OFLD_FAILED;
+	} else {
+		ep->state = EP_STATE_OFLD_COMPL;
+		cid_addr = ofld_kcqe->iscsi_conn_context_id;
+		cid_num = bnx2i_get_cid_num(ep);
+		ep->ep_cid = cid_addr;
+		ep->qp.ctx_base = NULL;
+	}
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+/**
+ * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * Generic KCQ event handler/dispatcher
+ */
+static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
+				u32 num_cqe)
+{
+	struct bnx2i_hba *hba = context;
+	int i = 0;
+	struct iscsi_kcqe *ikcqe = NULL;
+
+	while (i < num_cqe) {
+		ikcqe = (struct iscsi_kcqe *) kcqe[i++];
+
+		if (ikcqe->op_code ==
+		    ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
+			bnx2i_fastpath_notification(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
+			bnx2i_process_ofld_cmpl(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
+			bnx2i_process_update_conn_cmpl(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
+			if (ikcqe->completion_status !=
+			    ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
+				bnx2i_iscsi_license_error(hba, ikcqe->\
+							  completion_status);
+			else {
+				set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+				bnx2i_get_link_state(hba);
+				printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
+						 "ISCSI_INIT passed\n",
+						 (u8)hba->pcidev->bus->number,
+						 hba->pci_devno,
+						 (u8)hba->pci_func);
+
+
+			}
+		} else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
+			bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
+			bnx2i_process_iscsi_error(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
+			bnx2i_process_tcp_error(hba, ikcqe);
+		else
+			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+					  ikcqe->op_code);
+	}
+}
+
+
+/**
+ * bnx2i_indicate_netevent - Generic netdev event handler
+ * @context:	adapter structure pointer
+ * @event:	event type
+ *
+ * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
+ *	NETDEV_GOING_DOWN and NETDEV_CHANGE
+ */
+static void bnx2i_indicate_netevent(void *context, unsigned long event)
+{
+	struct bnx2i_hba *hba = context;
+
+	switch (event) {
+	case NETDEV_UP:
+		if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+			bnx2i_send_fw_iscsi_init_msg(hba);
+		break;
+	case NETDEV_DOWN:
+		clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+		clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+		break;
+	case NETDEV_GOING_DOWN:
+		set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+		iscsi_host_for_each_session(hba->shost,
+					    bnx2i_drop_session);
+		break;
+	case NETDEV_CHANGE:
+		bnx2i_get_link_state(hba);
+		break;
+	default:
+		;
+	}
+}
+
+
+/**
+ * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
+ * @cm_sk: 		cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate completion of option-2 TCP connect request.
+ */
+static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+		ep->state = EP_STATE_CONNECT_FAILED;
+	else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
+		ep->state = EP_STATE_CONNECT_COMPL;
+	else
+		ep->state = EP_STATE_CONNECT_FAILED;
+
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_close_cmpl - process tcp conn close completion
+ * @cm_sk:	cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate completion of option-2 graceful TCP connect shutdown
+ */
+static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_DISCONN_COMPL;
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
+ * @cm_sk:	cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate completion of option-2 abortive TCP connect termination
+ */
+static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_DISCONN_COMPL;
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_remote_close - process received TCP FIN
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to indicate
+ *	async TCP events such as FIN
+ */
+static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_TCP_FIN_RCVD;
+	if (ep->conn)
+		bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+/**
+ * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate async TCP events (RST) sent by the peer.
+ */
+static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_TCP_RST_RCVD;
+	if (ep->conn)
+		bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+
+static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
+			       char *buf, u16 buflen)
+{
+	struct bnx2i_hba *hba;
+
+	hba = bnx2i_find_hba_for_cnic(dev);
+	if (!hba)
+		return;
+
+	if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
+				   msg_type, buf, buflen))
+		printk(KERN_ALERT "bnx2i: private nl message send error\n");
+
+}
+
+
+/**
+ * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
+ *			carrying callback function pointers
+ *
+ */
+struct cnic_ulp_ops bnx2i_cnic_cb = {
+	.cnic_init = bnx2i_ulp_init,
+	.cnic_exit = bnx2i_ulp_exit,
+	.cnic_start = bnx2i_start,
+	.cnic_stop = bnx2i_stop,
+	.indicate_kcqes = bnx2i_indicate_kcqe,
+	.indicate_netevent = bnx2i_indicate_netevent,
+	.cm_connect_complete = bnx2i_cm_connect_cmpl,
+	.cm_close_complete = bnx2i_cm_close_cmpl,
+	.cm_abort_complete = bnx2i_cm_abort_cmpl,
+	.cm_remote_close = bnx2i_cm_remote_close,
+	.cm_remote_abort = bnx2i_cm_remote_abort,
+	.iscsi_nl_send_msg = bnx2i_send_nl_mesg,
+	.owner = THIS_MODULE
+};
+
+
+/**
+ * bnx2i_map_ep_dbell_regs - map connection doorbell registers
+ * @ep: bnx2i endpoint
+ *
+ * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
+ *	register in BAR #0. Whereas in 57710 these register are accessed by
+ *	mapping BAR #1
+ */
+int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
+{
+	u32 cid_num;
+	u32 reg_off;
+	u32 first_l4l5;
+	u32 ctx_sz;
+	u32 config2;
+	resource_size_t reg_base;
+
+	cid_num = bnx2i_get_cid_num(ep);
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		reg_base = pci_resource_start(ep->hba->pcidev,
+					      BNX2X_DOORBELL_PCI_BAR);
+		reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
+		ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+		goto arm_cq;
+	}
+
+	reg_base = ep->hba->netdev->base_addr;
+	if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
+	    (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
+		config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
+		first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
+		ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
+		if (ctx_sz)
+			reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
+				  + PAGE_SIZE *
+				  (((cid_num - first_l4l5) / ctx_sz) + 256);
+		else
+			reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+	} else
+		/* 5709 device in normal node and 5706/5708 devices */
+		reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+
+	ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+					  MB_KERNEL_CTX_SIZE);
+	if (!ep->qp.ctx_base)
+		return -ENOMEM;
+
+arm_cq:
+	bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
+	return 0;
+}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644
index 00000000000..ae4b2d588fd
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -0,0 +1,438 @@
+/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
+static u32 adapter_count;
+static int bnx2i_reg_device;
+
+#define DRV_MODULE_NAME		"bnx2i"
+#define DRV_MODULE_VERSION	"2.0.1d"
+#define DRV_MODULE_RELDATE	"Mar 25, 2009"
+
+static char version[] __devinitdata =
+		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
+		" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static DEFINE_RWLOCK(bnx2i_dev_lock);
+
+unsigned int event_coal_div = 1;
+module_param(event_coal_div, int, 0664);
+MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
+
+unsigned int en_tcp_dack = 1;
+module_param(en_tcp_dack, int, 0664);
+MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
+
+unsigned int error_mask1 = 0x00;
+module_param(error_mask1, int, 0664);
+MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
+
+unsigned int error_mask2 = 0x00;
+module_param(error_mask2, int, 0664);
+MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
+
+unsigned int sq_size;
+module_param(sq_size, int, 0664);
+MODULE_PARM_DESC(sq_size, "Configure SQ size");
+
+unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
+module_param(rq_size, int, 0664);
+MODULE_PARM_DESC(rq_size, "Configure RQ size");
+
+u64 iscsi_error_mask = 0x00;
+
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
+
+
+/**
+ * bnx2i_identify_device - identifies NetXtreme II device type
+ * @hba: 		Adapter structure pointer
+ *
+ * This function identifies the NX2 device type and sets appropriate
+ *	queue mailbox register access method, 5709 requires driver to
+ *	access MBOX regs using *bin* mode
+ */
+void bnx2i_identify_device(struct bnx2i_hba *hba)
+{
+	hba->cnic_dev_type = 0;
+	if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
+	    (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
+		set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+	else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
+	    (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
+		set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+	else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
+	    (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
+		set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+		hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+	} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+		   hba->pci_did == PCI_DEVICE_ID_NX2_57711)
+		set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+}
+
+
+/**
+ * get_adapter_list_head - returns head of adapter list
+ */
+struct bnx2i_hba *get_adapter_list_head(void)
+{
+	struct bnx2i_hba *hba = NULL;
+	struct bnx2i_hba *tmp_hba;
+
+	if (!adapter_count)
+		goto hba_not_found;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry(tmp_hba, &adapter_list, link) {
+		if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
+			hba = tmp_hba;
+			break;
+		}
+	}
+	read_unlock(&bnx2i_dev_lock);
+hba_not_found:
+	return hba;
+}
+
+
+/**
+ * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
+ * @cnic:	pointer to cnic device instance
+ *
+ */
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+	struct bnx2i_hba *hba, *temp;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry_safe(hba, temp, &adapter_list, link) {
+		if (hba->cnic == cnic) {
+			read_unlock(&bnx2i_dev_lock);
+			return hba;
+		}
+	}
+	read_unlock(&bnx2i_dev_lock);
+	return NULL;
+}
+
+
+/**
+ * bnx2i_start - cnic callback to initialize & start adapter instance
+ * @handle:	transparent handle pointing to adapter structure
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ *	firmware handshake to enable/initialize on chip iscsi components
+ * 	This bnx2i - cnic interface api callback is issued after following
+ *	2 conditions are met -
+ *	  a) underlying network interface is up (marked by event 'NETDEV_UP'
+ *		from netdev
+ *	  b) bnx2i adapter instance is registered
+ */
+void bnx2i_start(void *handle)
+{
+#define BNX2I_INIT_POLL_TIME	(1000 / HZ)
+	struct bnx2i_hba *hba = handle;
+	int i = HZ;
+
+	bnx2i_send_fw_iscsi_init_msg(hba);
+	while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+		msleep(BNX2I_INIT_POLL_TIME);
+}
+
+
+/**
+ * bnx2i_stop - cnic callback to shutdown adapter instance
+ * @handle:	transparent handle pointing to adapter structure
+ *
+ * driver checks if adapter is already in shutdown mode, if not start
+ *	the shutdown process
+ */
+void bnx2i_stop(void *handle)
+{
+	struct bnx2i_hba *hba = handle;
+
+	/* check if cleanup happened in GOING_DOWN context */
+	clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+	if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
+				&hba->adapter_state))
+		iscsi_host_for_each_session(hba->shost,
+					    bnx2i_drop_session);
+}
+
+/**
+ * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
+ * @hba:	Adapter instance to register
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding the
+ *	adapter structure lock
+ */
+void bnx2i_register_device(struct bnx2i_hba *hba)
+{
+	if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+	    test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		return;
+	}
+
+	hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
+
+	spin_lock(&hba->lock);
+	bnx2i_reg_device++;
+	spin_unlock(&hba->lock);
+
+	set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+
+/**
+ * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
+ *
+ * registers all bnx2i adapter instances with the cnic driver while holding
+ *	the global resource lock
+ */
+void bnx2i_reg_dev_all(void)
+{
+	struct bnx2i_hba *hba, *temp;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry_safe(hba, temp, &adapter_list, link)
+		bnx2i_register_device(hba);
+	read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
+ * @hba:	Adapter instance to unregister
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding
+ *	the adapter structure lock
+ */
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
+{
+	if (hba->ofld_conns_active ||
+	    !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
+	    test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
+		return;
+
+	hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+
+	spin_lock(&hba->lock);
+	bnx2i_reg_device--;
+	spin_unlock(&hba->lock);
+
+	/* ep_disconnect could come before NETDEV_DOWN, driver won't
+	 * see NETDEV_DOWN as it already unregistered itself.
+	 */
+	hba->adapter_state = 0;
+	clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+/**
+ * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
+ *
+ * unregisters all bnx2i adapter instances with the cnic driver while holding
+ *	the global resource lock
+ */
+void bnx2i_unreg_dev_all(void)
+{
+	struct bnx2i_hba *hba, *temp;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry_safe(hba, temp, &adapter_list, link)
+		bnx2i_unreg_one_device(hba);
+	read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_init_one - initialize an adapter instance and allocate memory resources
+ * @hba:	bnx2i adapter instance
+ * @cnic:	cnic device handle
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *	below. This routine is called from cnic_register_driver() context and
+ *	work horse thread which does majority of device specific initialization
+ */
+static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
+{
+	int rc;
+
+	read_lock(&bnx2i_dev_lock);
+	if (bnx2i_reg_device &&
+	    !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
+		if (rc)		/* duplicate registration */
+			printk(KERN_ERR "bnx2i- dev reg failed\n");
+
+		spin_lock(&hba->lock);
+		bnx2i_reg_device++;
+		hba->age++;
+		spin_unlock(&hba->lock);
+
+		set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+	}
+	read_unlock(&bnx2i_dev_lock);
+
+	write_lock(&bnx2i_dev_lock);
+	list_add_tail(&hba->link, &adapter_list);
+	adapter_count++;
+	write_unlock(&bnx2i_dev_lock);
+	return 0;
+}
+
+
+/**
+ * bnx2i_ulp_init - initialize an adapter instance
+ * @dev:	cnic device handle
+ *
+ * Called from cnic_register_driver() context to initialize all enumerated
+ *	cnic devices. This routine allocate adapter structure and other
+ *	device specific resources.
+ */
+void bnx2i_ulp_init(struct cnic_dev *dev)
+{
+	struct bnx2i_hba *hba;
+
+	/* Allocate a HBA structure for this device */
+	hba = bnx2i_alloc_hba(dev);
+	if (!hba) {
+		printk(KERN_ERR "bnx2i init: hba initialization failed\n");
+		return;
+	}
+
+	/* Get PCI related information and update hba struct members */
+	clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+	if (bnx2i_init_one(hba, dev)) {
+		printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
+		bnx2i_free_hba(hba);
+	} else
+		hba->cnic = dev;
+}
+
+
+/**
+ * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
+ * @dev:	cnic device handle
+ *
+ */
+void bnx2i_ulp_exit(struct cnic_dev *dev)
+{
+	struct bnx2i_hba *hba;
+
+	hba = bnx2i_find_hba_for_cnic(dev);
+	if (!hba) {
+		printk(KERN_INFO "bnx2i_ulp_exit: hba not "
+				 "found, dev 0x%p\n", dev);
+		return;
+	}
+	write_lock(&bnx2i_dev_lock);
+	list_del_init(&hba->link);
+	adapter_count--;
+
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+		clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+
+		spin_lock(&hba->lock);
+		bnx2i_reg_device--;
+		spin_unlock(&hba->lock);
+	}
+	write_unlock(&bnx2i_dev_lock);
+
+	bnx2i_free_hba(hba);
+}
+
+
+/**
+ * bnx2i_mod_init - module init entry point
+ *
+ * initialize any driver wide global data structures such as endpoint pool,
+ *	tcp port manager/queue, sysfs. finally driver will register itself
+ *	with the cnic module
+ */
+static int __init bnx2i_mod_init(void)
+{
+	int err;
+
+	printk(KERN_INFO "%s", version);
+
+	if (!is_power_of_2(sq_size))
+		sq_size = roundup_pow_of_two(sq_size);
+
+	bnx2i_scsi_xport_template =
+			iscsi_register_transport(&bnx2i_iscsi_transport);
+	if (!bnx2i_scsi_xport_template) {
+		printk(KERN_ERR "Could not register bnx2i transport.\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
+	if (err) {
+		printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
+		goto unreg_xport;
+	}
+
+	return 0;
+
+unreg_xport:
+	iscsi_unregister_transport(&bnx2i_iscsi_transport);
+out:
+	return err;
+}
+
+
+/**
+ * bnx2i_mod_exit - module cleanup/exit entry point
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *	in this function. Driver will browse through the adapter list, cleans-up
+ *	each instance, unregisters iscsi transport name and finally driver will
+ *	unregister itself with the cnic module
+ */
+static void __exit bnx2i_mod_exit(void)
+{
+	struct bnx2i_hba *hba;
+
+	write_lock(&bnx2i_dev_lock);
+	while (!list_empty(&adapter_list)) {
+		hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
+		list_del(&hba->link);
+		adapter_count--;
+
+		if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+			hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+			clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+			bnx2i_reg_device--;
+		}
+
+		write_unlock(&bnx2i_dev_lock);
+		bnx2i_free_hba(hba);
+		write_lock(&bnx2i_dev_lock);
+	}
+	write_unlock(&bnx2i_dev_lock);
+
+	iscsi_unregister_transport(&bnx2i_iscsi_transport);
+	cnic_unregister_driver(CNIC_ULP_ISCSI);
+}
+
+module_init(bnx2i_mod_init);
+module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644
index 00000000000..f7412196f2f
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -0,0 +1,2064 @@
+/*
+ * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+struct scsi_transport_template *bnx2i_scsi_xport_template;
+struct iscsi_transport bnx2i_iscsi_transport;
+static struct scsi_host_template bnx2i_host_template;
+
+/*
+ * Global endpoint resource info
+ */
+static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
+
+
+static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
+{
+	int retval = 0;
+
+	if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+	    test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+	    test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+		retval = -EPERM;
+	return retval;
+}
+
+/**
+ * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
+ * @cmd:		iscsi cmd struct pointer
+ * @buf_off:		absolute buffer offset
+ * @start_bd_off:	u32 pointer to return the offset within the BD
+ *			indicated by 'start_bd_idx' on which 'buf_off' falls
+ * @start_bd_idx:	index of the BD on which 'buf_off' falls
+ *
+ * identifies & marks various bd info for scsi command's imm data,
+ * unsolicited data and the first solicited data seq.
+ */
+static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
+				       u32 *start_bd_off, u32 *start_bd_idx)
+{
+	struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
+	u32 cur_offset = 0;
+	u32 cur_bd_idx = 0;
+
+	if (buf_off) {
+		while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
+			cur_offset += bd_tbl->buffer_length;
+			cur_bd_idx++;
+			bd_tbl++;
+		}
+	}
+
+	*start_bd_off = buf_off - cur_offset;
+	*start_bd_idx = cur_bd_idx;
+}
+
+/**
+ * bnx2i_setup_write_cmd_bd_info - sets up BD various information
+ * @task:	transport layer's cmd struct pointer
+ *
+ * identifies & marks various bd info for scsi command's immediate data,
+ * unsolicited data and first solicited data seq which includes BD start
+ * index & BD buf off. his function takes into account iscsi parameter such
+ * as immediate data and unsolicited data is support on this connection.
+ */
+static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
+{
+	struct bnx2i_cmd *cmd = task->dd_data;
+	u32 start_bd_offset;
+	u32 start_bd_idx;
+	u32 buffer_offset = 0;
+	u32 cmd_len = cmd->req.total_data_transfer_length;
+
+	/* if ImmediateData is turned off & IntialR2T is turned on,
+	 * there will be no immediate or unsolicited data, just return.
+	 */
+	if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
+		return;
+
+	/* Immediate data */
+	buffer_offset += task->imm_count;
+	if (task->imm_count == cmd_len)
+		return;
+
+	if (iscsi_task_has_unsol_data(task)) {
+		bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+					   &start_bd_offset, &start_bd_idx);
+		cmd->req.ud_buffer_offset = start_bd_offset;
+		cmd->req.ud_start_bd_index = start_bd_idx;
+		buffer_offset += task->unsol_r2t.data_length;
+	}
+
+	if (buffer_offset != cmd_len) {
+		bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+					   &start_bd_offset, &start_bd_idx);
+		if ((start_bd_offset > task->conn->session->first_burst) ||
+		    (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
+			int i = 0;
+
+			iscsi_conn_printk(KERN_ALERT, task->conn,
+					  "bnx2i- error, buf offset 0x%x "
+					  "bd_valid %d use_sg %d\n",
+					  buffer_offset, cmd->io_tbl.bd_valid,
+					  scsi_sg_count(cmd->scsi_cmd));
+			for (i = 0; i < cmd->io_tbl.bd_valid; i++)
+				iscsi_conn_printk(KERN_ALERT, task->conn,
+						  "bnx2i err, bd[%d]: len %x\n",
+						  i, cmd->io_tbl.bd_tbl[i].\
+						  buffer_length);
+		}
+		cmd->req.sd_buffer_offset = start_bd_offset;
+		cmd->req.sd_start_bd_index = start_bd_idx;
+	}
+}
+
+
+
+/**
+ * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
+ * @hba:	adapter instance
+ * @cmd:	iscsi cmd struct pointer
+ *
+ * map SG list
+ */
+static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+	struct scsi_cmnd *sc = cmd->scsi_cmd;
+	struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+	struct scatterlist *sg;
+	int byte_count = 0;
+	int bd_count = 0;
+	int sg_count;
+	int sg_len;
+	u64 addr;
+	int i;
+
+	BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
+
+	sg_count = scsi_dma_map(sc);
+
+	scsi_for_each_sg(sc, sg, sg_count, i) {
+		sg_len = sg_dma_len(sg);
+		addr = (u64) sg_dma_address(sg);
+		bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
+		bd[bd_count].buffer_addr_hi = addr >> 32;
+		bd[bd_count].buffer_length = sg_len;
+		bd[bd_count].flags = 0;
+		if (bd_count == 0)
+			bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+		byte_count += sg_len;
+		bd_count++;
+	}
+
+	if (bd_count)
+		bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
+
+	BUG_ON(byte_count != scsi_bufflen(sc));
+	return bd_count;
+}
+
+/**
+ * bnx2i_iscsi_map_sg_list - maps SG list
+ * @cmd:	iscsi cmd struct pointer
+ *
+ * creates BD list table for the command
+ */
+static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
+{
+	int bd_count;
+
+	bd_count  = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
+	if (!bd_count) {
+		struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+
+		bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
+		bd[0].buffer_length = bd[0].flags = 0;
+	}
+	cmd->io_tbl.bd_valid = bd_count;
+}
+
+
+/**
+ * bnx2i_iscsi_unmap_sg_list - unmaps SG list
+ * @cmd:	iscsi cmd struct pointer
+ *
+ * unmap IO buffers and invalidate the BD table
+ */
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
+{
+	struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+	if (cmd->io_tbl.bd_valid && sc) {
+		scsi_dma_unmap(sc);
+		cmd->io_tbl.bd_valid = 0;
+	}
+}
+
+static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
+{
+	memset(&cmd->req, 0x00, sizeof(cmd->req));
+	cmd->req.op_code = 0xFF;
+	cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
+	cmd->req.bd_list_addr_hi =
+		(u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
+
+}
+
+
+/**
+ * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
+ * @hba:	pointer to adapter instance
+ * @conn:	pointer to iscsi connection
+ * @iscsi_cid:	iscsi context ID, range 0 - (MAX_CONN - 1)
+ *
+ * update iscsi cid table entry with connection pointer. This enables
+ *	driver to quickly get hold of connection structure pointer in
+ *	completion/interrupt thread using iscsi context ID
+ */
+static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
+					struct bnx2i_conn *bnx2i_conn,
+					u32 iscsi_cid)
+{
+	if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
+		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+				 "conn bind - entry #%d not free\n", iscsi_cid);
+		return -EBUSY;
+	}
+
+	hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
+	return 0;
+}
+
+
+/**
+ * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
+ * @hba:	pointer to adapter instance
+ * @iscsi_cid:	iscsi context ID, range 0 - (MAX_CONN - 1)
+ */
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+					  u16 iscsi_cid)
+{
+	if (!hba->cid_que.conn_cid_tbl) {
+		printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
+		return NULL;
+
+	} else if (iscsi_cid >= hba->max_active_conns) {
+		printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
+		return NULL;
+	}
+	return hba->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+
+/**
+ * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
+ * @hba:	pointer to adapter instance
+ */
+static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
+{
+	int idx;
+
+	if (!hba->cid_que.cid_free_cnt)
+		return -1;
+
+	idx = hba->cid_que.cid_q_cons_idx;
+	hba->cid_que.cid_q_cons_idx++;
+	if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
+		hba->cid_que.cid_q_cons_idx = 0;
+
+	hba->cid_que.cid_free_cnt--;
+	return hba->cid_que.cid_que[idx];
+}
+
+
+/**
+ * bnx2i_free_iscsi_cid - returns tcp port to free list
+ * @hba: 		pointer to adapter instance
+ * @iscsi_cid:		iscsi context ID to free
+ */
+static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
+{
+	int idx;
+
+	if (iscsi_cid == (u16) -1)
+		return;
+
+	hba->cid_que.cid_free_cnt++;
+
+	idx = hba->cid_que.cid_q_prod_idx;
+	hba->cid_que.cid_que[idx] = iscsi_cid;
+	hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
+	hba->cid_que.cid_q_prod_idx++;
+	if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
+		hba->cid_que.cid_q_prod_idx = 0;
+}
+
+
+/**
+ * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
+ * @hba:	pointer to adapter instance
+ *
+ * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
+ * 	and initialize table attributes
+ */
+static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
+{
+	int mem_size;
+	int i;
+
+	mem_size = hba->max_active_conns * sizeof(u32);
+	mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
+	if (!hba->cid_que.cid_que_base)
+		return -ENOMEM;
+
+	mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
+	mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
+	if (!hba->cid_que.conn_cid_tbl) {
+		kfree(hba->cid_que.cid_que_base);
+		hba->cid_que.cid_que_base = NULL;
+		return -ENOMEM;
+	}
+
+	hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
+	hba->cid_que.cid_q_prod_idx = 0;
+	hba->cid_que.cid_q_cons_idx = 0;
+	hba->cid_que.cid_q_max_idx = hba->max_active_conns;
+	hba->cid_que.cid_free_cnt = hba->max_active_conns;
+
+	for (i = 0; i < hba->max_active_conns; i++) {
+		hba->cid_que.cid_que[i] = i;
+		hba->cid_que.conn_cid_tbl[i] = NULL;
+	}
+	return 0;
+}
+
+
+/**
+ * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
+ * @hba:	pointer to adapter instance
+ */
+static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
+{
+	kfree(hba->cid_que.cid_que_base);
+	hba->cid_que.cid_que_base = NULL;
+
+	kfree(hba->cid_que.conn_cid_tbl);
+	hba->cid_que.conn_cid_tbl = NULL;
+}
+
+
+/**
+ * bnx2i_alloc_ep - allocates ep structure from global pool
+ * @hba:	pointer to adapter instance
+ *
+ * routine allocates a free endpoint structure from global pool and
+ *	a tcp port to be used for this connection.  Global resource lock,
+ *	'bnx2i_resc_lock' is held while accessing shared global data structures
+ */
+static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
+{
+	struct iscsi_endpoint *ep;
+	struct bnx2i_endpoint *bnx2i_ep;
+
+	ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
+	if (!ep) {
+		printk(KERN_ERR "bnx2i: Could not allocate ep\n");
+		return NULL;
+	}
+
+	bnx2i_ep = ep->dd_data;
+	INIT_LIST_HEAD(&bnx2i_ep->link);
+	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->hba = hba;
+	bnx2i_ep->hba_age = hba->age;
+	hba->ofld_conns_active++;
+	init_waitqueue_head(&bnx2i_ep->ofld_wait);
+	return ep;
+}
+
+
+/**
+ * bnx2i_free_ep - free endpoint
+ * @ep:		pointer to iscsi endpoint structure
+ */
+static void bnx2i_free_ep(struct iscsi_endpoint *ep)
+{
+	struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bnx2i_resc_lock, flags);
+	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->hba->ofld_conns_active--;
+
+	bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+	if (bnx2i_ep->conn) {
+		bnx2i_ep->conn->ep = NULL;
+		bnx2i_ep->conn = NULL;
+	}
+
+	bnx2i_ep->hba = NULL;
+	spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+	iscsi_destroy_endpoint(ep);
+}
+
+
+/**
+ * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ * @cmd:	iscsi command structure
+ */
+static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
+			   struct bnx2i_cmd *cmd)
+{
+	struct io_bdt *io = &cmd->io_tbl;
+	struct iscsi_bd *bd;
+
+	io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+					ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
+					&io->bd_tbl_dma, GFP_KERNEL);
+	if (!io->bd_tbl) {
+		iscsi_session_printk(KERN_ERR, session, "Could not "
+				     "allocate bdt.\n");
+		return -ENOMEM;
+	}
+	io->bd_valid = 0;
+	return 0;
+}
+
+/**
+ * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ * @cmd:	iscsi command structure
+ */
+static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
+				   struct iscsi_session *session)
+{
+	int i;
+
+	for (i = 0; i < session->cmds_max; i++) {
+		struct iscsi_task *task = session->cmds[i];
+		struct bnx2i_cmd *cmd = task->dd_data;
+
+		if (cmd->io_tbl.bd_tbl)
+			dma_free_coherent(&hba->pcidev->dev,
+					  ISCSI_MAX_BDS_PER_CMD *
+					  sizeof(struct iscsi_bd),
+					  cmd->io_tbl.bd_tbl,
+					  cmd->io_tbl.bd_tbl_dma);
+	}
+
+}
+
+
+/**
+ * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ */
+static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
+				struct iscsi_session *session)
+{
+	int i;
+
+	for (i = 0; i < session->cmds_max; i++) {
+		struct iscsi_task *task = session->cmds[i];
+		struct bnx2i_cmd *cmd = task->dd_data;
+
+		/* Anil */
+		task->hdr = &cmd->hdr;
+		task->hdr_max = sizeof(struct iscsi_hdr);
+
+		if (bnx2i_alloc_bdt(hba, session, cmd))
+			goto free_bdts;
+	}
+
+	return 0;
+
+free_bdts:
+	bnx2i_destroy_cmd_pool(hba, session);
+	return -ENOMEM;
+}
+
+
+/**
+ * bnx2i_setup_mp_bdt - allocate BD table resources
+ * @hba:	pointer to adapter structure
+ *
+ * Allocate memory for dummy buffer and associated BD
+ * table to be used by middle path (MP) requests
+ */
+static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
+{
+	int rc = 0;
+	struct iscsi_bd *mp_bdt;
+	u64 addr;
+
+	hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					    &hba->mp_bd_dma, GFP_KERNEL);
+	if (!hba->mp_bd_tbl) {
+		printk(KERN_ERR "unable to allocate Middle Path BDT\n");
+		rc = -1;
+		goto out;
+	}
+
+	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					       &hba->dummy_buf_dma, GFP_KERNEL);
+	if (!hba->dummy_buffer) {
+		printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->mp_bd_tbl, hba->mp_bd_dma);
+		hba->mp_bd_tbl = NULL;
+		rc = -1;
+		goto out;
+	}
+
+	mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
+	addr = (unsigned long) hba->dummy_buf_dma;
+	mp_bdt->buffer_addr_lo = addr & 0xffffffff;
+	mp_bdt->buffer_addr_hi = addr >> 32;
+	mp_bdt->buffer_length = PAGE_SIZE;
+	mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+			ISCSI_BD_FIRST_IN_BD_CHAIN;
+out:
+	return rc;
+}
+
+
+/**
+ * bnx2i_free_mp_bdt - releases ITT back to free pool
+ * @hba:	pointer to adapter instance
+ *
+ * free MP dummy buffer and associated BD table
+ */
+static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
+{
+	if (hba->mp_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->mp_bd_tbl, hba->mp_bd_dma);
+		hba->mp_bd_tbl = NULL;
+	}
+	if (hba->dummy_buffer) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->dummy_buffer, hba->dummy_buf_dma);
+		hba->dummy_buffer = NULL;
+	}
+		return;
+}
+
+/**
+ * bnx2i_drop_session - notifies iscsid of connection error.
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ *
+ * This notifies iscsid that there is a error, so it can initiate
+ * recovery.
+ *
+ * This relies on caller using the iscsi class iterator so the object
+ * is refcounted and does not disapper from under us.
+ */
+void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
+{
+	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+/**
+ * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
+ * @hba:	pointer to adapter instance
+ * @ep:		pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
+				     struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_add_tail(&ep->link, &hba->ep_destroy_list);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+	return 0;
+}
+
+/**
+ * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
+ *
+ * @hba: 		pointer to adapter instance
+ * @ep: 		pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
+				     struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_del_init(&ep->link);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+
+	return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
+ * @hba:	pointer to adapter instance
+ * @ep:		pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
+				  struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_add_tail(&ep->link, &hba->ep_ofld_list);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+	return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
+ * @hba: 		pointer to adapter instance
+ * @ep: 		pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
+				  struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_del_init(&ep->link);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+	return 0;
+}
+
+
+/**
+ * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
+ *
+ * @hba: 		pointer to adapter instance
+ * @iscsi_cid:		iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+	struct list_head *list;
+	struct list_head *tmp;
+	struct bnx2i_endpoint *ep;
+
+	read_lock_bh(&hba->ep_rdwr_lock);
+	list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
+		ep = (struct bnx2i_endpoint *)list;
+
+		if (ep->ep_iscsi_cid == iscsi_cid)
+			break;
+		ep = NULL;
+	}
+	read_unlock_bh(&hba->ep_rdwr_lock);
+
+	if (!ep)
+		printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+	return ep;
+}
+
+
+/**
+ * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
+ * @hba: 		pointer to adapter instance
+ * @iscsi_cid:		iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+	struct list_head *list;
+	struct list_head *tmp;
+	struct bnx2i_endpoint *ep;
+
+	read_lock_bh(&hba->ep_rdwr_lock);
+	list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
+		ep = (struct bnx2i_endpoint *)list;
+
+		if (ep->ep_iscsi_cid == iscsi_cid)
+			break;
+		ep = NULL;
+	}
+	read_unlock_bh(&hba->ep_rdwr_lock);
+
+	if (!ep)
+		printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+
+	return ep;
+}
+
+/**
+ * bnx2i_setup_host_queue_size - assigns shost->can_queue param
+ * @hba:	pointer to adapter instance
+ * @shost:	scsi host pointer
+ *
+ * Initializes 'can_queue' parameter based on how many outstanding commands
+ * 	the device can handle. Each device 5708/5709/57710 has different
+ *	capabilities
+ */
+static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
+					struct Scsi_Host *shost)
+{
+	if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+	else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
+	else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
+	else
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+}
+
+
+/**
+ * bnx2i_alloc_hba - allocate and init adapter instance
+ * @cnic:	cnic device pointer
+ *
+ * allocate & initialize adapter structure and call other
+ *	support routines to do per adapter initialization
+ */
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
+{
+	struct Scsi_Host *shost;
+	struct bnx2i_hba *hba;
+
+	shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
+	if (!shost)
+		return NULL;
+	shost->dma_boundary = cnic->pcidev->dma_mask;
+	shost->transportt = bnx2i_scsi_xport_template;
+	shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
+	shost->max_channel = 0;
+	shost->max_lun = 512;
+	shost->max_cmd_len = 16;
+
+	hba = iscsi_host_priv(shost);
+	hba->shost = shost;
+	hba->netdev = cnic->netdev;
+	/* Get PCI related information and update hba struct members */
+	hba->pcidev = cnic->pcidev;
+	pci_dev_get(hba->pcidev);
+	hba->pci_did = hba->pcidev->device;
+	hba->pci_vid = hba->pcidev->vendor;
+	hba->pci_sdid = hba->pcidev->subsystem_device;
+	hba->pci_svid = hba->pcidev->subsystem_vendor;
+	hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
+	hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
+	bnx2i_identify_device(hba);
+
+	bnx2i_identify_device(hba);
+	bnx2i_setup_host_queue_size(hba, shost);
+
+	if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+		hba->regview = ioremap_nocache(hba->netdev->base_addr,
+					       BNX2_MQ_CONFIG2);
+		if (!hba->regview)
+			goto ioreg_map_err;
+	} else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+		hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+		if (!hba->regview)
+			goto ioreg_map_err;
+	}
+
+	if (bnx2i_setup_mp_bdt(hba))
+		goto mp_bdt_mem_err;
+
+	INIT_LIST_HEAD(&hba->ep_ofld_list);
+	INIT_LIST_HEAD(&hba->ep_destroy_list);
+	rwlock_init(&hba->ep_rdwr_lock);
+
+	hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
+
+	/* different values for 5708/5709/57710 */
+	hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
+
+	if (bnx2i_setup_free_cid_que(hba))
+		goto cid_que_err;
+
+	/* SQ/RQ/CQ size can be changed via sysfx interface */
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+		if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
+			hba->max_sqes = sq_size;
+		else
+			hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
+	} else {	/* 5706/5708/5709 */
+		if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
+			hba->max_sqes = sq_size;
+		else
+			hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
+	}
+
+	hba->max_rqes = rq_size;
+	hba->max_cqes = hba->max_sqes + rq_size;
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+		if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
+			hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
+	} else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
+		hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
+
+	hba->num_ccell = hba->max_sqes / 2;
+
+	spin_lock_init(&hba->lock);
+	mutex_init(&hba->net_dev_lock);
+
+	if (iscsi_host_add(shost, &hba->pcidev->dev))
+		goto free_dump_mem;
+	return hba;
+
+free_dump_mem:
+	bnx2i_release_free_cid_que(hba);
+cid_que_err:
+	bnx2i_free_mp_bdt(hba);
+mp_bdt_mem_err:
+	if (hba->regview) {
+		iounmap(hba->regview);
+		hba->regview = NULL;
+	}
+ioreg_map_err:
+	pci_dev_put(hba->pcidev);
+	scsi_host_put(shost);
+	return NULL;
+}
+
+/**
+ * bnx2i_free_hba- releases hba structure and resources held by the adapter
+ * @hba:	pointer to adapter instance
+ *
+ * free adapter structure and call various cleanup routines.
+ */
+void bnx2i_free_hba(struct bnx2i_hba *hba)
+{
+	struct Scsi_Host *shost = hba->shost;
+
+	iscsi_host_remove(shost);
+	INIT_LIST_HEAD(&hba->ep_ofld_list);
+	INIT_LIST_HEAD(&hba->ep_destroy_list);
+	pci_dev_put(hba->pcidev);
+
+	if (hba->regview) {
+		iounmap(hba->regview);
+		hba->regview = NULL;
+	}
+	bnx2i_free_mp_bdt(hba);
+	bnx2i_release_free_cid_que(hba);
+	iscsi_host_free(shost);
+}
+
+/**
+ * bnx2i_conn_free_login_resources - free DMA resources used for login process
+ * @hba:		pointer to adapter instance
+ * @bnx2i_conn:		iscsi connection pointer
+ *
+ * Login related resources, mostly BDT & payload DMA memory is freed
+ */
+static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
+					    struct bnx2i_conn *bnx2i_conn)
+{
+	if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  bnx2i_conn->gen_pdu.resp_bd_tbl,
+				  bnx2i_conn->gen_pdu.resp_bd_dma);
+		bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
+	}
+
+	if (bnx2i_conn->gen_pdu.req_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  bnx2i_conn->gen_pdu.req_bd_tbl,
+				  bnx2i_conn->gen_pdu.req_bd_dma);
+		bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+	}
+
+	if (bnx2i_conn->gen_pdu.resp_buf) {
+		dma_free_coherent(&hba->pcidev->dev,
+				  ISCSI_DEF_MAX_RECV_SEG_LEN,
+				  bnx2i_conn->gen_pdu.resp_buf,
+				  bnx2i_conn->gen_pdu.resp_dma_addr);
+		bnx2i_conn->gen_pdu.resp_buf = NULL;
+	}
+
+	if (bnx2i_conn->gen_pdu.req_buf) {
+		dma_free_coherent(&hba->pcidev->dev,
+				  ISCSI_DEF_MAX_RECV_SEG_LEN,
+				  bnx2i_conn->gen_pdu.req_buf,
+				  bnx2i_conn->gen_pdu.req_dma_addr);
+		bnx2i_conn->gen_pdu.req_buf = NULL;
+	}
+}
+
+/**
+ * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
+ * @hba:		pointer to adapter instance
+ * @bnx2i_conn:		iscsi connection pointer
+ *
+ * Mgmt task DNA resources are allocated in this routine.
+ */
+static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
+					    struct bnx2i_conn *bnx2i_conn)
+{
+	/* Allocate memory for login request/response buffers */
+	bnx2i_conn->gen_pdu.req_buf =
+		dma_alloc_coherent(&hba->pcidev->dev,
+				   ISCSI_DEF_MAX_RECV_SEG_LEN,
+				   &bnx2i_conn->gen_pdu.req_dma_addr,
+				   GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.req_buf == NULL)
+		goto login_req_buf_failure;
+
+	bnx2i_conn->gen_pdu.req_buf_size = 0;
+	bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
+
+	bnx2i_conn->gen_pdu.resp_buf =
+		dma_alloc_coherent(&hba->pcidev->dev,
+				   ISCSI_DEF_MAX_RECV_SEG_LEN,
+				   &bnx2i_conn->gen_pdu.resp_dma_addr,
+				   GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.resp_buf == NULL)
+		goto login_resp_buf_failure;
+
+	bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+	bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
+
+	bnx2i_conn->gen_pdu.req_bd_tbl =
+		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				   &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
+		goto login_req_bd_tbl_failure;
+
+	bnx2i_conn->gen_pdu.resp_bd_tbl =
+		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				   &bnx2i_conn->gen_pdu.resp_bd_dma,
+				   GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
+		goto login_resp_bd_tbl_failure;
+
+	return 0;
+
+login_resp_bd_tbl_failure:
+	dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+			  bnx2i_conn->gen_pdu.req_bd_tbl,
+			  bnx2i_conn->gen_pdu.req_bd_dma);
+	bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+	dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+			  bnx2i_conn->gen_pdu.resp_buf,
+			  bnx2i_conn->gen_pdu.resp_dma_addr);
+	bnx2i_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+	dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+			  bnx2i_conn->gen_pdu.req_buf,
+			  bnx2i_conn->gen_pdu.req_dma_addr);
+	bnx2i_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+	iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
+			  "login resource alloc failed!!\n");
+	return -ENOMEM;
+
+}
+
+
+/**
+ * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
+ * @bnx2i_conn:		iscsi connection pointer
+ *
+ * Allocates buffers and BD tables before shipping requests to cnic
+ *	for PDUs prepared by 'iscsid' daemon
+ */
+static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
+{
+	struct iscsi_bd *bd_tbl;
+
+	bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
+
+	bd_tbl->buffer_addr_hi =
+		(u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
+	bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
+	bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
+				bnx2i_conn->gen_pdu.req_buf;
+	bd_tbl->reserved0 = 0;
+	bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+			ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+	bd_tbl = (struct iscsi_bd  *) bnx2i_conn->gen_pdu.resp_bd_tbl;
+	bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
+	bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
+	bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
+	bd_tbl->reserved0 = 0;
+	bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+			ISCSI_BD_FIRST_IN_BD_CHAIN;
+}
+
+
+/**
+ * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
+ * @task:	transport layer task pointer
+ *
+ * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
+ *	Nop-out and Logout requests flow through this path.
+ */
+static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
+{
+	struct bnx2i_cmd *cmd = task->dd_data;
+	struct bnx2i_conn *bnx2i_conn = cmd->conn;
+	int rc = 0;
+	char *buf;
+	int data_len;
+
+	bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
+	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+	case ISCSI_OP_LOGIN:
+		bnx2i_send_iscsi_login(bnx2i_conn, task);
+		break;
+	case ISCSI_OP_NOOP_OUT:
+		data_len = bnx2i_conn->gen_pdu.req_buf_size;
+		buf = bnx2i_conn->gen_pdu.req_buf;
+		if (data_len)
+			rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+						     RESERVED_ITT,
+						     buf, data_len, 1);
+		else
+			rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+						     RESERVED_ITT,
+						     NULL, 0, 1);
+		break;
+	case ISCSI_OP_LOGOUT:
+		rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
+		break;
+	case ISCSI_OP_SCSI_TMFUNC:
+		rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
+		break;
+	default:
+		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+				  "send_gen: unsupported op 0x%x\n",
+				  task->hdr->opcode);
+	}
+	return rc;
+}
+
+
+/**********************************************************************
+ *		SCSI-ML Interface
+ **********************************************************************/
+
+/**
+ * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
+ * @sc:		SCSI-ML command pointer
+ * @cmd:	iscsi cmd pointer
+ */
+static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
+{
+	u32 dword;
+	int lpcnt;
+	u8 *srcp;
+	u32 *dstp;
+	u32 scsi_lun[2];
+
+	int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
+	cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
+	cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
+
+	lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
+	srcp = (u8 *) sc->cmnd;
+	dstp = (u32 *) cmd->req.cdb;
+	while (lpcnt--) {
+		memcpy(&dword, (const void *) srcp, 4);
+		*dstp = cpu_to_be32(dword);
+		srcp += 4;
+		dstp++;
+	}
+	if (sc->cmd_len & 0x3) {
+		dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
+		*dstp = cpu_to_be32(dword);
+	}
+}
+
+static void bnx2i_cleanup_task(struct iscsi_task *task)
+{
+	struct iscsi_conn *conn = task->conn;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct bnx2i_hba *hba = bnx2i_conn->hba;
+
+	/*
+	 * mgmt task or cmd was never sent to us to transmit.
+	 */
+	if (!task->sc || task->state == ISCSI_TASK_PENDING)
+		return;
+	/*
+	 * need to clean-up task context to claim dma buffers
+	 */
+	if (task->state == ISCSI_TASK_ABRT_TMF) {
+		bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
+
+		spin_unlock_bh(&conn->session->lock);
+		wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
+				msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
+		spin_lock_bh(&conn->session->lock);
+	}
+	bnx2i_iscsi_unmap_sg_list(task->dd_data);
+}
+
+/**
+ * bnx2i_mtask_xmit - transmit mtask to chip for further processing
+ * @conn:	transport layer conn structure pointer
+ * @task:	transport layer command structure pointer
+ */
+static int
+bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct bnx2i_cmd *cmd = task->dd_data;
+
+	memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+	bnx2i_setup_cmd_wqe_template(cmd);
+	bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
+	if (task->data_count) {
+		memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
+		       task->data_count);
+		bnx2i_conn->gen_pdu.req_wr_ptr =
+			bnx2i_conn->gen_pdu.req_buf + task->data_count;
+	}
+	cmd->conn = conn->dd_data;
+	cmd->scsi_cmd = NULL;
+	return bnx2i_iscsi_send_generic_request(task);
+}
+
+/**
+ * bnx2i_task_xmit - transmit iscsi command to chip for further processing
+ * @task:	transport layer command structure pointer
+ *
+ * maps SG buffers and send request to chip/firmware in the form of SQ WQE
+ */
+static int bnx2i_task_xmit(struct iscsi_task *task)
+{
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_session *session = conn->session;
+	struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct scsi_cmnd *sc = task->sc;
+	struct bnx2i_cmd *cmd = task->dd_data;
+	struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+
+	if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+		return -ENOTCONN;
+
+	if (!bnx2i_conn->is_bound)
+		return -ENOTCONN;
+
+	/*
+	 * If there is no scsi_cmnd this must be a mgmt task
+	 */
+	if (!sc)
+		return bnx2i_mtask_xmit(conn, task);
+
+	bnx2i_setup_cmd_wqe_template(cmd);
+	cmd->req.op_code = ISCSI_OP_SCSI_CMD;
+	cmd->conn = bnx2i_conn;
+	cmd->scsi_cmd = sc;
+	cmd->req.total_data_transfer_length = scsi_bufflen(sc);
+	cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
+
+	bnx2i_iscsi_map_sg_list(cmd);
+	bnx2i_cpy_scsi_cdb(sc, cmd);
+
+	cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
+	if (sc->sc_data_direction == DMA_TO_DEVICE) {
+		cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
+		cmd->req.itt = task->itt |
+			(ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+		bnx2i_setup_write_cmd_bd_info(task);
+	} else {
+		if (scsi_bufflen(sc))
+			cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
+		cmd->req.itt = task->itt |
+			(ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+	}
+
+	cmd->req.num_bds = cmd->io_tbl.bd_valid;
+	if (!cmd->io_tbl.bd_valid) {
+		cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
+		cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
+		cmd->req.num_bds = 1;
+	}
+
+	bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
+	return 0;
+}
+
+/**
+ * bnx2i_session_create - create a new iscsi session
+ * @cmds_max:		max commands supported
+ * @qdepth:		scsi queue depth to support
+ * @initial_cmdsn:	initial iscsi CMDSN to be used for this session
+ *
+ * Creates a new iSCSI session instance on given device.
+ */
+static struct iscsi_cls_session *
+bnx2i_session_create(struct iscsi_endpoint *ep,
+		     uint16_t cmds_max, uint16_t qdepth,
+		     uint32_t initial_cmdsn)
+{
+	struct Scsi_Host *shost;
+	struct iscsi_cls_session *cls_session;
+	struct bnx2i_hba *hba;
+	struct bnx2i_endpoint *bnx2i_ep;
+
+	if (!ep) {
+		printk(KERN_ERR "bnx2i: missing ep.\n");
+		return NULL;
+	}
+
+	bnx2i_ep = ep->dd_data;
+	shost = bnx2i_ep->hba->shost;
+	hba = iscsi_host_priv(shost);
+	if (bnx2i_adapter_ready(hba))
+		return NULL;
+
+	/*
+	 * user can override hw limit as long as it is within
+	 * the min/max.
+	 */
+	if (cmds_max > hba->max_sqes)
+		cmds_max = hba->max_sqes;
+	else if (cmds_max < BNX2I_SQ_WQES_MIN)
+		cmds_max = BNX2I_SQ_WQES_MIN;
+
+	cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
+					  cmds_max, sizeof(struct bnx2i_cmd),
+					  initial_cmdsn, ISCSI_MAX_TARGET);
+	if (!cls_session)
+		return NULL;
+
+	if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
+		goto session_teardown;
+	return cls_session;
+
+session_teardown:
+	iscsi_session_teardown(cls_session);
+	return NULL;
+}
+
+
+/**
+ * bnx2i_session_destroy - destroys iscsi session
+ * @cls_session:	pointer to iscsi cls session
+ *
+ * Destroys previously created iSCSI session instance and releases
+ *	all resources held by it
+ */
+static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *session = cls_session->dd_data;
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+
+	bnx2i_destroy_cmd_pool(hba, session);
+	iscsi_session_teardown(cls_session);
+}
+
+
+/**
+ * bnx2i_conn_create - create iscsi connection instance
+ * @cls_session:	pointer to iscsi cls session
+ * @cid:		iscsi cid as per rfc (not NX2's CID terminology)
+ *
+ * Creates a new iSCSI connection instance for a given session
+ */
+static struct iscsi_cls_conn *
+bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	struct bnx2i_conn *bnx2i_conn;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_conn *conn;
+
+	cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
+				    cid);
+	if (!cls_conn)
+		return NULL;
+	conn = cls_conn->dd_data;
+
+	bnx2i_conn = conn->dd_data;
+	bnx2i_conn->cls_conn = cls_conn;
+	bnx2i_conn->hba = hba;
+	/* 'ep' ptr will be assigned in bind() call */
+	bnx2i_conn->ep = NULL;
+	init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
+
+	if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
+		iscsi_conn_printk(KERN_ALERT, conn,
+				  "conn_new: login resc alloc failed!!\n");
+		goto free_conn;
+	}
+
+	return cls_conn;
+
+free_conn:
+	iscsi_conn_teardown(cls_conn);
+	return NULL;
+}
+
+/**
+ * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
+ * @cls_session:	pointer to iscsi cls session
+ * @cls_conn:		pointer to iscsi cls conn
+ * @transport_fd:	64-bit EP handle
+ * @is_leading:		leading connection on this session?
+ *
+ * Binds together iSCSI session instance, iSCSI connection instance
+ *	and the TCP connection. This routine returns error code if
+ *	TCP connection does not belong on the device iSCSI sess/conn
+ *	is bound
+ */
+static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+			   struct iscsi_cls_conn *cls_conn,
+			   uint64_t transport_fd, int is_leading)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	struct bnx2i_endpoint *bnx2i_ep;
+	struct iscsi_endpoint *ep;
+	int ret_code;
+
+	ep = iscsi_lookup_endpoint(transport_fd);
+	if (!ep)
+		return -EINVAL;
+
+	bnx2i_ep = ep->dd_data;
+	if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+	    (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+		/* Peer disconnect via' FIN or RST */
+		return -EINVAL;
+
+	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+		return -EINVAL;
+
+	if (bnx2i_ep->hba != hba) {
+		/* Error - TCP connection does not belong to this device
+		 */
+		iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+				  "conn bind, ep=0x%p (%s) does not",
+				  bnx2i_ep, bnx2i_ep->hba->netdev->name);
+		iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+				  "belong to hba (%s)\n",
+				  hba->netdev->name);
+		return -EEXIST;
+	}
+
+	bnx2i_ep->conn = bnx2i_conn;
+	bnx2i_conn->ep = bnx2i_ep;
+	bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
+	bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
+	bnx2i_conn->is_bound = 1;
+
+	ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
+						bnx2i_ep->ep_iscsi_cid);
+
+	/* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
+	 * driver needs to explicitly replenish RQ index during setup.
+	 */
+	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+		bnx2i_put_rq_buf(bnx2i_conn, 0);
+
+	bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+	return ret_code;
+}
+
+
+/**
+ * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
+ * @cls_conn:	pointer to iscsi cls conn
+ *
+ * Destroy an iSCSI connection instance and release memory resources held by
+ *	this connection
+ */
+static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct Scsi_Host *shost;
+	struct bnx2i_hba *hba;
+
+	shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+	hba = iscsi_host_priv(shost);
+
+	bnx2i_conn_free_login_resources(hba, bnx2i_conn);
+	iscsi_conn_teardown(cls_conn);
+}
+
+
+/**
+ * bnx2i_conn_get_param - return iscsi connection parameter to caller
+ * @cls_conn:	pointer to iscsi cls conn
+ * @param:	parameter type identifier
+ * @buf: 	buffer pointer
+ *
+ * returns iSCSI connection parameters
+ */
+static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
+				enum iscsi_param param, char *buf)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	int len = 0;
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_PORT:
+		if (bnx2i_conn->ep)
+			len = sprintf(buf, "%hu\n",
+				      bnx2i_conn->ep->cm_sk->dst_port);
+		break;
+	case ISCSI_PARAM_CONN_ADDRESS:
+		if (bnx2i_conn->ep)
+			len = sprintf(buf, NIPQUAD_FMT "\n",
+				      NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
+		break;
+	default:
+		return iscsi_conn_get_param(cls_conn, param, buf);
+	}
+
+	return len;
+}
+
+/**
+ * bnx2i_host_get_param - returns host (adapter) related parameters
+ * @shost:	scsi host pointer
+ * @param:	parameter type identifier
+ * @buf:	buffer pointer
+ */
+static int bnx2i_host_get_param(struct Scsi_Host *shost,
+				enum iscsi_host_param param, char *buf)
+{
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	int len = 0;
+
+	switch (param) {
+	case ISCSI_HOST_PARAM_HWADDRESS:
+		len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
+		break;
+	case ISCSI_HOST_PARAM_NETDEV_NAME:
+		len = sprintf(buf, "%s\n", hba->netdev->name);
+		break;
+	default:
+		return iscsi_host_get_param(shost, param, buf);
+	}
+	return len;
+}
+
+/**
+ * bnx2i_conn_start - completes iscsi connection migration to FFP
+ * @cls_conn:	pointer to iscsi cls conn
+ *
+ * last call in FFP migration to handover iscsi conn to the driver
+ */
+static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+
+	bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
+	bnx2i_update_iscsi_conn(conn);
+
+	/*
+	 * this should normally not sleep for a long time so it should
+	 * not disrupt the caller.
+	 */
+	bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
+	bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
+	add_timer(&bnx2i_conn->ep->ofld_timer);
+	/* update iSCSI context for this conn, wait for CNIC to complete */
+	wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
+			bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&bnx2i_conn->ep->ofld_timer);
+
+	iscsi_conn_start(cls_conn);
+	return 0;
+}
+
+
+/**
+ * bnx2i_conn_get_stats - returns iSCSI stats
+ * @cls_conn:	pointer to iscsi cls conn
+ * @stats:	pointer to iscsi statistic struct
+ */
+static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+				 struct iscsi_stats *stats)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+
+	stats->txdata_octets = conn->txdata_octets;
+	stats->rxdata_octets = conn->rxdata_octets;
+	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+	stats->dataout_pdus = conn->dataout_pdus_cnt;
+	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+	stats->datain_pdus = conn->datain_pdus_cnt;
+	stats->r2t_pdus = conn->r2t_pdus_cnt;
+	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+	stats->custom_length = 3;
+	strcpy(stats->custom[2].desc, "eh_abort_cnt");
+	stats->custom[2].value = conn->eh_abort_cnt;
+	stats->digest_err = 0;
+	stats->timeout_err = 0;
+	stats->custom_length = 0;
+}
+
+
+/**
+ * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
+ * @dst_addr:	target IP address
+ *
+ * check if route resolves to BNX2 device
+ */
+static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
+{
+	struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+	struct bnx2i_hba *hba;
+	struct cnic_dev *cnic = NULL;
+
+	bnx2i_reg_dev_all();
+
+	hba = get_adapter_list_head();
+	if (hba && hba->cnic)
+		cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
+	if (!cnic) {
+		printk(KERN_ALERT "bnx2i: no route,"
+		       "can't connect using cnic\n");
+		goto no_nx2_route;
+	}
+	hba = bnx2i_find_hba_for_cnic(cnic);
+	if (!hba)
+		goto no_nx2_route;
+
+	if (bnx2i_adapter_ready(hba)) {
+		printk(KERN_ALERT "bnx2i: check route, hba not found\n");
+		goto no_nx2_route;
+	}
+	if (hba->netdev->mtu > hba->mtu_supported) {
+		printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
+				  hba->netdev->name, hba->netdev->mtu);
+		printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
+				  hba->mtu_supported);
+		goto no_nx2_route;
+	}
+	return hba;
+no_nx2_route:
+	return NULL;
+}
+
+
+/**
+ * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
+ * @hba:	pointer to adapter instance
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * destroys cm_sock structure and on chip iscsi context
+ */
+static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
+				 struct bnx2i_endpoint *ep)
+{
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+		hba->cnic->cm_destroy(ep->cm_sk);
+
+	if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+		ep->state = EP_STATE_DISCONN_COMPL;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
+	    ep->state == EP_STATE_DISCONN_TIMEDOUT) {
+		printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
+				  " NW/PCIe trace, driver msgs to developers"
+				  " for analysis\n");
+		return 1;
+	}
+
+	ep->state = EP_STATE_CLEANUP_START;
+	init_timer(&ep->ofld_timer);
+	ep->ofld_timer.expires = 10*HZ + jiffies;
+	ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	ep->ofld_timer.data = (unsigned long) ep;
+	add_timer(&ep->ofld_timer);
+
+	bnx2i_ep_destroy_list_add(hba, ep);
+
+	/* destroy iSCSI context, wait for it to complete */
+	bnx2i_send_conn_destroy(hba, ep);
+	wait_event_interruptible(ep->ofld_wait,
+				 (ep->state != EP_STATE_CLEANUP_START));
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&ep->ofld_timer);
+
+	bnx2i_ep_destroy_list_del(hba, ep);
+
+	if (ep->state != EP_STATE_CLEANUP_CMPL)
+		/* should never happen */
+		printk(KERN_ALERT "bnx2i - conn destroy failed\n");
+
+	return 0;
+}
+
+
+/**
+ * bnx2i_ep_connect - establish TCP connection to target portal
+ * @shost:		scsi host
+ * @dst_addr:		target IP address
+ * @non_blocking:	blocking or non-blocking call
+ *
+ * this routine initiates the TCP/IP connection by invoking Option-2 i/f
+ *	with l5_core and the CNIC. This is a multi-step process of resolving
+ *	route to target, create a iscsi connection context, handshaking with
+ *	CNIC module to create/initialize the socket struct and finally
+ *	sending down option-2 request to complete TCP 3-way handshake
+ */
+static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
+					       struct sockaddr *dst_addr,
+					       int non_blocking)
+{
+	u32 iscsi_cid = BNX2I_CID_RESERVED;
+	struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+	struct sockaddr_in6 *desti6;
+	struct bnx2i_endpoint *bnx2i_ep;
+	struct bnx2i_hba *hba;
+	struct cnic_dev *cnic;
+	struct cnic_sockaddr saddr;
+	struct iscsi_endpoint *ep;
+	int rc = 0;
+
+	if (shost)
+		/* driver is given scsi host to work with */
+		hba = iscsi_host_priv(shost);
+	else
+		/*
+		 * check if the given destination can be reached through
+		 * a iscsi capable NetXtreme2 device
+		 */
+		hba = bnx2i_check_route(dst_addr);
+	if (!hba) {
+		rc = -ENOMEM;
+		goto check_busy;
+	}
+
+	cnic = hba->cnic;
+	ep = bnx2i_alloc_ep(hba);
+	if (!ep) {
+		rc = -ENOMEM;
+		goto check_busy;
+	}
+	bnx2i_ep = ep->dd_data;
+
+	mutex_lock(&hba->net_dev_lock);
+	if (bnx2i_adapter_ready(hba)) {
+		rc = -EPERM;
+		goto net_if_down;
+	}
+
+	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->ep_iscsi_cid = (u16) -1;
+	bnx2i_ep->num_active_cmds = 0;
+	iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
+	if (iscsi_cid == -1) {
+		printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
+		rc = -ENOMEM;
+		goto iscsi_cid_err;
+	}
+	bnx2i_ep->hba_age = hba->age;
+
+	rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
+	if (rc != 0) {
+		printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
+		rc = -ENOMEM;
+		goto qp_resc_err;
+	}
+
+	bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
+	bnx2i_ep->state = EP_STATE_OFLD_START;
+	bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
+
+	init_timer(&bnx2i_ep->ofld_timer);
+	bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
+	bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+	add_timer(&bnx2i_ep->ofld_timer);
+
+	bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
+
+	/* Wait for CNIC hardware to setup conn context and return 'cid' */
+	wait_event_interruptible(bnx2i_ep->ofld_wait,
+				 bnx2i_ep->state != EP_STATE_OFLD_START);
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&bnx2i_ep->ofld_timer);
+
+	bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
+
+	if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
+		rc = -ENOSPC;
+		goto conn_failed;
+	}
+
+	rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
+			     iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
+	if (rc) {
+		rc = -EINVAL;
+		goto conn_failed;
+	}
+
+	bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
+	bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
+	clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
+
+	memset(&saddr, 0, sizeof(saddr));
+	if (dst_addr->sa_family == AF_INET) {
+		desti = (struct sockaddr_in *) dst_addr;
+		saddr.remote.v4 = *desti;
+		saddr.local.v4.sin_family = desti->sin_family;
+	} else if (dst_addr->sa_family == AF_INET6) {
+		desti6 = (struct sockaddr_in6 *) dst_addr;
+		saddr.remote.v6 = *desti6;
+		saddr.local.v6.sin6_family = desti6->sin6_family;
+	}
+
+	bnx2i_ep->timestamp = jiffies;
+	bnx2i_ep->state = EP_STATE_CONNECT_START;
+	if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		rc = -EINVAL;
+		goto conn_failed;
+	} else
+		rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
+
+	if (rc)
+		goto release_ep;
+
+	if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
+		goto release_ep;
+	mutex_unlock(&hba->net_dev_lock);
+	return ep;
+
+release_ep:
+	if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+		mutex_unlock(&hba->net_dev_lock);
+		return ERR_PTR(rc);
+	}
+conn_failed:
+net_if_down:
+iscsi_cid_err:
+	bnx2i_free_qp_resc(hba, bnx2i_ep);
+qp_resc_err:
+	bnx2i_free_ep(ep);
+	mutex_unlock(&hba->net_dev_lock);
+check_busy:
+	bnx2i_unreg_dev_all();
+	return ERR_PTR(rc);
+}
+
+
+/**
+ * bnx2i_ep_poll - polls for TCP connection establishement
+ * @ep:			TCP connection (endpoint) handle
+ * @timeout_ms:		timeout value in milli secs
+ *
+ * polls for TCP connect request to complete
+ */
+static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+	struct bnx2i_endpoint *bnx2i_ep;
+	int rc = 0;
+
+	bnx2i_ep = ep->dd_data;
+	if ((bnx2i_ep->state == EP_STATE_IDLE) ||
+	    (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
+	    (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+		return -1;
+	if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
+		return 1;
+
+	rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
+					      ((bnx2i_ep->state ==
+						EP_STATE_OFLD_FAILED) ||
+					       (bnx2i_ep->state ==
+						EP_STATE_CONNECT_FAILED) ||
+					       (bnx2i_ep->state ==
+						EP_STATE_CONNECT_COMPL)),
+					      msecs_to_jiffies(timeout_ms));
+	if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+		rc = -1;
+
+	if (rc > 0)
+		return 1;
+	else if (!rc)
+		return 0;	/* timeout */
+	else
+		return rc;
+}
+
+
+/**
+ * bnx2i_ep_tcp_conn_active - check EP state transition
+ * @ep:		endpoint pointer
+ *
+ * check if underlying TCP connection is active
+ */
+static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
+{
+	int ret;
+	int cnic_dev_10g = 0;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+		cnic_dev_10g = 1;
+
+	switch (bnx2i_ep->state) {
+	case EP_STATE_CONNECT_START:
+	case EP_STATE_CLEANUP_FAILED:
+	case EP_STATE_OFLD_FAILED:
+	case EP_STATE_DISCONN_TIMEDOUT:
+		ret = 0;
+		break;
+	case EP_STATE_CONNECT_COMPL:
+	case EP_STATE_ULP_UPDATE_START:
+	case EP_STATE_ULP_UPDATE_COMPL:
+	case EP_STATE_TCP_FIN_RCVD:
+	case EP_STATE_ULP_UPDATE_FAILED:
+		ret = 1;
+		break;
+	case EP_STATE_TCP_RST_RCVD:
+		ret = 0;
+		break;
+	case EP_STATE_CONNECT_FAILED:
+		if (cnic_dev_10g)
+			ret = 1;
+		else
+			ret = 0;
+		break;
+	default:
+		ret = 0;
+	}
+
+	return ret;
+}
+
+
+/**
+ * bnx2i_ep_disconnect - executes TCP connection teardown process
+ * @ep:		TCP connection (endpoint) handle
+ *
+ * executes  TCP connection teardown process
+ */
+static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
+{
+	struct bnx2i_endpoint *bnx2i_ep;
+	struct bnx2i_conn *bnx2i_conn = NULL;
+	struct iscsi_session *session = NULL;
+	struct iscsi_conn *conn;
+	struct cnic_dev *cnic;
+	struct bnx2i_hba *hba;
+
+	bnx2i_ep = ep->dd_data;
+
+	/* driver should not attempt connection cleanup untill TCP_CONNECT
+	 * completes either successfully or fails. Timeout is 9-secs, so
+	 * wait for it to complete
+	 */
+	while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
+		!time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
+		msleep(250);
+
+	if (bnx2i_ep->conn) {
+		bnx2i_conn = bnx2i_ep->conn;
+		conn = bnx2i_conn->cls_conn->dd_data;
+		session = conn->session;
+
+		spin_lock_bh(&session->lock);
+		bnx2i_conn->is_bound = 0;
+		spin_unlock_bh(&session->lock);
+	}
+
+	hba = bnx2i_ep->hba;
+	if (bnx2i_ep->state == EP_STATE_IDLE)
+		goto return_bnx2i_ep;
+	cnic = hba->cnic;
+
+	mutex_lock(&hba->net_dev_lock);
+
+	if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+		goto free_resc;
+	if (bnx2i_ep->hba_age != hba->age)
+		goto free_resc;
+
+	if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
+		goto destory_conn;
+
+	bnx2i_ep->state = EP_STATE_DISCONN_START;
+
+	init_timer(&bnx2i_ep->ofld_timer);
+	bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
+	bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+	add_timer(&bnx2i_ep->ofld_timer);
+
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		int close = 0;
+
+		if (session) {
+			spin_lock_bh(&session->lock);
+			if (session->state == ISCSI_STATE_LOGGING_OUT)
+				close = 1;
+			spin_unlock_bh(&session->lock);
+		}
+		if (close)
+			cnic->cm_close(bnx2i_ep->cm_sk);
+		else
+			cnic->cm_abort(bnx2i_ep->cm_sk);
+	} else
+		goto free_resc;
+
+	/* wait for option-2 conn teardown */
+	wait_event_interruptible(bnx2i_ep->ofld_wait,
+				 bnx2i_ep->state != EP_STATE_DISCONN_START);
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&bnx2i_ep->ofld_timer);
+
+destory_conn:
+	if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+		mutex_unlock(&hba->net_dev_lock);
+		return;
+	}
+free_resc:
+	mutex_unlock(&hba->net_dev_lock);
+	bnx2i_free_qp_resc(hba, bnx2i_ep);
+return_bnx2i_ep:
+	if (bnx2i_conn)
+		bnx2i_conn->ep = NULL;
+
+	bnx2i_free_ep(ep);
+
+	if (!hba->ofld_conns_active)
+		bnx2i_unreg_dev_all();
+}
+
+
+/**
+ * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
+ * @buf:	pointer to buffer containing iscsi path message
+ *
+ */
+static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
+{
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	char *buf = (char *) params;
+	u16 len = sizeof(*params);
+
+	/* handled by cnic driver */
+	hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
+				     len);
+
+	return 0;
+}
+
+
+/*
+ * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
+ * used while registering with the scsi host and iSCSI transport module.
+ */
+static struct scsi_host_template bnx2i_host_template = {
+	.module			= THIS_MODULE,
+	.name			= "Broadcom Offload iSCSI Initiator",
+	.proc_name		= "bnx2i",
+	.queuecommand		= iscsi_queuecommand,
+	.eh_abort_handler	= iscsi_eh_abort,
+	.eh_device_reset_handler = iscsi_eh_device_reset,
+	.eh_target_reset_handler = iscsi_eh_target_reset,
+	.can_queue		= 1024,
+	.max_sectors		= 127,
+	.cmd_per_lun		= 32,
+	.this_id		= -1,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.sg_tablesize		= ISCSI_MAX_BDS_PER_CMD,
+	.shost_attrs		= bnx2i_dev_attributes,
+};
+
+struct iscsi_transport bnx2i_iscsi_transport = {
+	.owner			= THIS_MODULE,
+	.name			= "bnx2i",
+	.caps			= CAP_RECOVERY_L0 | CAP_HDRDGST |
+				  CAP_MULTI_R2T | CAP_DATADGST |
+				  CAP_DATA_PATH_OFFLOAD,
+	.param_mask		= ISCSI_MAX_RECV_DLENGTH |
+				  ISCSI_MAX_XMIT_DLENGTH |
+				  ISCSI_HDRDGST_EN |
+				  ISCSI_DATADGST_EN |
+				  ISCSI_INITIAL_R2T_EN |
+				  ISCSI_MAX_R2T |
+				  ISCSI_IMM_DATA_EN |
+				  ISCSI_FIRST_BURST |
+				  ISCSI_MAX_BURST |
+				  ISCSI_PDU_INORDER_EN |
+				  ISCSI_DATASEQ_INORDER_EN |
+				  ISCSI_ERL |
+				  ISCSI_CONN_PORT |
+				  ISCSI_CONN_ADDRESS |
+				  ISCSI_EXP_STATSN |
+				  ISCSI_PERSISTENT_PORT |
+				  ISCSI_PERSISTENT_ADDRESS |
+				  ISCSI_TARGET_NAME | ISCSI_TPGT |
+				  ISCSI_USERNAME | ISCSI_PASSWORD |
+				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+				  ISCSI_LU_RESET_TMO |
+				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
+				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
+	.create_session		= bnx2i_session_create,
+	.destroy_session	= bnx2i_session_destroy,
+	.create_conn		= bnx2i_conn_create,
+	.bind_conn		= bnx2i_conn_bind,
+	.destroy_conn		= bnx2i_conn_destroy,
+	.set_param		= iscsi_set_param,
+	.get_conn_param		= bnx2i_conn_get_param,
+	.get_session_param	= iscsi_session_get_param,
+	.get_host_param		= bnx2i_host_get_param,
+	.start_conn		= bnx2i_conn_start,
+	.stop_conn		= iscsi_conn_stop,
+	.send_pdu		= iscsi_conn_send_pdu,
+	.xmit_task		= bnx2i_task_xmit,
+	.get_stats		= bnx2i_conn_get_stats,
+	/* TCP connect - disconnect - option-2 interface calls */
+	.ep_connect		= bnx2i_ep_connect,
+	.ep_poll		= bnx2i_ep_poll,
+	.ep_disconnect		= bnx2i_ep_disconnect,
+	.set_path		= bnx2i_nl_set_path,
+	/* Error recovery timeout call */
+	.session_recovery_timedout = iscsi_session_recovery_timedout,
+	.cleanup_task		= bnx2i_cleanup_task,
+};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644
index 00000000000..96426b751eb
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -0,0 +1,142 @@
+/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2004 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+/**
+ * bnx2i_dev_to_hba - maps dev pointer to adapter struct
+ * @dev:	device pointer
+ *
+ * Map device to hba structure
+ */
+static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	return iscsi_host_priv(shost);
+}
+
+
+/**
+ * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ *
+ * Returns current SQ size parameter, this paramater determines the number
+ * outstanding iSCSI commands supported on a connection
+ */
+static ssize_t bnx2i_show_sq_info(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+	return sprintf(buf, "0x%x\n", hba->max_sqes);
+}
+
+
+/**
+ * bnx2i_set_sq_info - update send queue (SQ) size parameter
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ * @count:	parameter buffer size
+ *
+ * Interface for user to change shared queue size allocated for each conn
+ * Must be within SQ limits and a power of 2. For the latter this is needed
+ * because of how libiscsi preallocates tasks.
+ */
+static ssize_t bnx2i_set_sq_info(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+	u32 val;
+	int max_sq_size;
+
+	if (hba->ofld_conns_active)
+		goto skip_config;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+		max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
+	else
+		max_sq_size = BNX2I_570X_SQ_WQES_MAX;
+
+	if (sscanf(buf, " 0x%x ", &val) > 0) {
+		if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
+		    (is_power_of_2(val)))
+			hba->max_sqes = val;
+	}
+
+	return count;
+
+skip_config:
+	printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
+	return 0;
+}
+
+
+/**
+ * bnx2i_show_ccell_info - returns command cell (HQ) size
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ *
+ * returns per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_show_ccell_info(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+	return sprintf(buf, "0x%x\n", hba->num_ccell);
+}
+
+
+/**
+ * bnx2i_get_link_state - set command cell (HQ) size
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ * @count:	parameter buffer size
+ *
+ * updates per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_set_ccell_info(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	u32 val;
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+	if (hba->ofld_conns_active)
+		goto skip_config;
+
+	if (sscanf(buf, " 0x%x ", &val) > 0) {
+		if ((val >= BNX2I_CCELLS_MIN) &&
+		    (val <= BNX2I_CCELLS_MAX)) {
+			hba->num_ccell = val;
+		}
+	}
+
+	return count;
+
+skip_config:
+	printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
+	return 0;
+}
+
+
+static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
+		   bnx2i_show_sq_info, bnx2i_set_sq_info);
+static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
+		   bnx2i_show_ccell_info, bnx2i_set_ccell_info);
+
+struct device_attribute *bnx2i_dev_attributes[] = {
+	&dev_attr_sq_size,
+	&dev_attr_num_ccell,
+	NULL
+};
-- 
cgit v1.2.3-70-g09d2