summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c312
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h87
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h501
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c545
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1659
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
20 files changed, 4905 insertions, 764 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 02d53d89534..8ec2c86a49d 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
downloads using bsg */
#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
@@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
(1 << LPFC_USER_LINK_SPEED_AUTO))
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+enum nemb_type {
+ nemb_mse = 1,
+ nemb_hbd
+};
+
+enum mbox_type {
+ mbox_rd = 1,
+ mbox_wr
+};
+
+enum dma_type {
+ dma_mbox = 1,
+ dma_ebuf
+};
+
+enum sta_type {
+ sta_pre_addr = 1,
+ sta_pos_addr
+};
+
+struct lpfc_mbox_ext_buf_ctx {
+ uint32_t state;
+#define LPFC_BSG_MBOX_IDLE 0
+#define LPFC_BSG_MBOX_HOST 1
+#define LPFC_BSG_MBOX_PORT 2
+#define LPFC_BSG_MBOX_DONE 3
+#define LPFC_BSG_MBOX_ABTS 4
+ enum nemb_type nembType;
+ enum mbox_type mboxType;
+ uint32_t numBuf;
+ uint32_t mbxTag;
+ uint32_t seqNum;
+ struct lpfc_dmabuf *mbx_dmabuf;
+ struct list_head ext_dmabuf_list;
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@@ -589,6 +626,7 @@ struct lpfc_hba {
MAILBOX_t *mbox;
uint32_t *mbox_ext;
+ struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
uint32_t ha_copy;
struct _PCB *pcb;
struct _IOCB *IOCBs;
@@ -659,6 +697,7 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
+ uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -706,7 +745,6 @@ struct lpfc_hba {
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
int brd_no; /* FC board number */
-
char SerialNumber[32]; /* adapter Serial Number */
char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
char ModelDesc[256]; /* Model Description */
@@ -778,6 +816,9 @@ struct lpfc_hba {
uint16_t vpi_base;
uint16_t vfi_base;
unsigned long *vpi_bmask; /* vpi allocation table */
+ uint16_t *vpi_ids;
+ uint16_t vpi_count;
+ struct list_head lpfc_vpi_blk_list;
/* Data structure used by fabric iocb scheduler */
struct list_head fabric_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8dcbf8fff67..135a53baa73 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Request SLI4 interface type-2 device to perform a physical register set
+ * access.
+ *
+ * Returns:
+ * zero for success
+ **/
+static ssize_t
+lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
+{
+ struct completion online_compl;
+ uint32_t reg_val;
+ int status = 0;
+ int rc;
+
+ if (!phba->cfg_enable_hba_reset)
+ return -EIO;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+ if (status != 0)
+ return status;
+
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
+
+ reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+
+ if (opcode == LPFC_FW_DUMP)
+ reg_val |= LPFC_FW_DUMP_REQUEST;
+ else if (opcode == LPFC_FW_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_FRST;
+ else if (opcode == LPFC_DV_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_DRST;
+
+ writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+ /* flush */
+ readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* delay driver action following IF_TYPE_2 reset */
+ msleep(100);
+
+ init_completion(&online_compl);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* lpfc_nport_evt_cnt_show - Return the number of nport events
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+ else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
+ else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
+ else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
else
return -EINVAL;
@@ -1322,6 +1395,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns the maximum number of virtual functions a physical function can
+ * support, 0 will be returned if called on virtual function.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sriov_hw_max_virtfn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
+ struct lpfc_rsrc_desc_pcie *desc;
+ uint32_t max_nr_virtfn;
+ uint32_t desc_count;
+ int length, rc, i;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ if (!pdev->is_physfn)
+ return snprintf(buf, PAGE_SIZE, "%d\n", 0);
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /* get the maximum number of virtfn support by physfn */
+ length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
+ phba->sli4_hba.iov.pf_number + 1);
+
+ get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
+ bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
+ LPFC_CFG_TYPE_CURRENT_ACTIVE);
+
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+ lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+
+ if (rc != MBX_TIMEOUT) {
+ /* check return status */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (shdr_status || shdr_add_status || rc)
+ goto error_out;
+
+ } else
+ goto error_out;
+
+ desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_pcie *)
+ &get_prof_cfg->u.response.prof_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_PCIE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
+ desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+ }
+
+error_out:
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
+static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
+ lpfc_sriov_hw_max_virtfn_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing enable or disable aer flag.
* @count: unused variable.
*
* Description:
@@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
/**
* lpfc_aer_support_init - Set the initial adapters aer support flag
* @phba: lpfc_hba pointer.
- * @val: link speed value.
+ * @val: enable aer or disable aer flag.
*
* Description:
* If val is in a valid range [0,1], then set the adapter's initial
@@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
* lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing flag 1 for aer cleanup state.
* @count: unused variable.
*
* Description:
@@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
lpfc_aer_cleanup_state);
+/**
+ * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ * When this api is called either through user sysfs, the driver shall
+ * try to enable or disable SR-IOV virtual functions according to the
+ * following:
+ *
+ * If zero virtual function has been enabled to the physical function,
+ * the driver shall invoke the pci enable virtual function api trying
+ * to enable the virtual functions. If the nr_vfn provided is greater
+ * than the maximum supported, the maximum virtual function number will
+ * be used for invoking the api; otherwise, the nr_vfn provided shall
+ * be used for invoking the api. If the api call returned success, the
+ * actual number of virtual functions enabled will be set to the driver
+ * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
+ * cfg_sriov_nr_virtfn remains zero.
+ *
+ * If none-zero virtual functions have already been enabled to the
+ * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
+ * -EINVAL will be returned and the driver does nothing;
+ *
+ * If the nr_vfn provided is zero and none-zero virtual functions have
+ * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
+ * disabling virtual function api shall be invoded to disable all the
+ * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
+ * zero. Otherwise, if zero virtual function has been enabled, do
+ * nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val < 0)
+ return -EINVAL;
+
+ /* Request disabling virtual functions */
+ if (val == 0) {
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ return strlen(buf);
+ }
+
+ /* Request enabling virtual functions */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3018 There are %d virtual functions "
+ "enabled on physical function.\n",
+ phba->cfg_sriov_nr_virtfn);
+ return -EEXIST;
+ }
+
+ if (val <= LPFC_MAX_VFN_PER_PFN)
+ phba->cfg_sriov_nr_virtfn = val;
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3019 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+ }
+
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ phba->cfg_sriov_nr_virtfn = 0;
+ rc = -EPERM;
+ } else
+ rc = strlen(buf);
+
+ return rc;
+}
+
+static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
+module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
+lpfc_param_show(sriov_nr_virtfn)
+
+/**
+ * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0,255], then set the adapter's initial
+ * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
+ * number shall be used instead. It will be up to the driver's probe_one
+ * routine to determine whether the device's SR-IOV is supported or not.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
+ phba->cfg_sriov_nr_virtfn = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3017 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
+ lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
# Value range is [2,3]. Default value is 3.
@@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_prot_sg_seg_cnt,
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
+ &dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
@@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
+ &dev_attr_lpfc_sriov_hw_max_virtfn,
NULL,
};
@@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
},
- .size = MAILBOX_CMD_SIZE,
+ .size = MAILBOX_SYSFS_MAX,
.read = sysfs_mbox_read,
.write = sysfs_mbox_write,
};
@@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
+ lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e5042f39..7fb0ba4cbfa 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/list.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
- struct lpfc_dmabuf *rxbmp; /* for BIU diags */
- struct lpfc_dmabufext *dmp; /* for BIU diags */
+ struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
uint8_t *ext; /* extended mailbox data */
uint32_t mbOffset; /* from app */
uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
cmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
cmd->ulpOwner = OWN_CHIP;
cmdiocbq->vport = phba->pport;
cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
}
icmd->un.ulpWord[3] = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext =
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
} else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
}
/**
- * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
+ * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
+ * @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
*
- * This function is responsible for placing a port into diagnostic loopback
- * mode in order to perform a diagnostic loopback test.
+ * This function is responsible for preparing driver for diag loopback
+ * on device.
+ */
+static int
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i = 0;
+
+ psli = &phba->sli;
+ if (!psli)
+ return -ENODEV;
+
+ pring = &psli->ring[LPFC_FCP_RING];
+ if (!pring)
+ return -ENODEV;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ return -EACCES;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_block_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_block_requests(shost);
+ }
+
+ while (pring->txcmplq_cnt) {
+ if (i++ > 500) /* wait up to 5 seconds */
+ break;
+ msleep(10);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for driver exit processing of setting up
+ * diag loopback mode on device.
+ */
+static void
+lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_unblock_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_unblock_requests(shost);
+ }
+ return;
+}
+
+/**
+ * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli3 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
* All new scsi requests are blocked, a small delay is used to allow the
* scsi requests to complete then the link is brought down. If the link is
* is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
* All of this is done in-line.
*/
static int
-lpfc_bsg_diag_mode(struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
- struct Scsi_Host *shost = job->shost;
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
struct diag_mode_set *loopback_mode;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
uint32_t link_flags;
uint32_t timeout;
- struct lpfc_vport **vports;
LPFC_MBOXQ_t *pmboxq;
int mbxstatus;
int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
- if (job->request_len <
- sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2738 Received DIAG MODE request below minimum "
- "size\n");
+ "2738 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
rc = -EINVAL;
goto job_error;
}
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
- if ((phba->link_state == LPFC_HBA_ERROR) ||
- (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
- rc = -EACCES;
- goto job_error;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
- goto job_error;
- }
-
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_block_requests(shost);
- }
-
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_block_requests(shost);
+ goto loopback_mode_exit;
}
-
- while (pring->txcmplq_cnt) {
- if (i++ > 500) /* wait up to 5 seconds */
- break;
-
- msleep(10);
- }
-
memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
rc = -ENODEV;
loopback_mode_exit:
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_unblock_requests(shost);
+ lpfc_bsg_diag_mode_exit(phba);
+
+ /*
+ * Let SLI layer release mboxq if mbox command completed after timeout.
+ */
+ if (mbxstatus != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
+ * @phba: Pointer to HBA context object.
+ * @diag: Flag for set link to diag or nomral operation state.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * link to either diag state or normal operation state.
+ */
+static int
+lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ struct lpfc_mbx_set_link_diag_state *link_diag_state;
+ uint32_t req_len, alloc_len;
+ int mbxstatus = MBX_SUCCESS, rc;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_state_set_out;
+ }
+ link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+ bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.type);
+ if (diag)
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 1);
+ else
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 0);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
+ rc = 0;
+ else
+ rc = -ENODEV;
+
+link_diag_state_set_out:
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli4 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ */
+static int
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct diag_mode_set *loopback_mode;
+ uint32_t link_flags, timeout, req_len, alloc_len;
+ struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ int mbxstatus, i, rc = 0;
+
+ /* no data to return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3011 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
+ loopback_mode = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+ if (rc)
+ goto loopback_mode_exit;
+
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ goto loopback_mode_exit;
+ }
+ msleep(10);
+ }
+ /* set up loopback mode */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+ bf_set(lpfc_mbx_set_diag_state_link_num,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
+ if (link_flags == INTERNAL_LOOP_BACK)
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+ else
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+ rc = -ENODEV;
+ else {
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ /* wait for the link attention interrupt */
+ msleep(100);
+ i = 0;
+ while (phba->link_state != LPFC_HBA_READY) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+ msleep(10);
}
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_unblock_requests(shost);
}
+loopback_mode_exit:
+ lpfc_bsg_diag_mode_exit(phba);
+
/*
* Let SLI layer release mboxq if mbox command completed after timeout.
*/
- if (mbxstatus != MBX_TIMEOUT)
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
mempool_free(pmboxq, phba->mbox_mem_pool);
job_error:
@@ -1622,6 +1846,234 @@ job_error:
}
/**
+ * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
+ else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)
+ rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
+ else
+ rc = -ENODEV;
+
+ return rc;
+
+}
+
+/**
+ * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return -ENODEV;
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (!rc)
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
+ * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
+ *
+ * This function is to perform SLI4 diag link test request from the user
+ * applicaiton.
+ */
+static int
+lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmboxq;
+ struct sli4_link_diag *link_diag_test_cmd;
+ uint32_t req_len, alloc_len;
+ uint32_t timeout;
+ struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct diag_status *diag_status_reply;
+ int mbxstatus, rc = 0;
+
+ shost = job->shost;
+ if (!shost) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ phba = vport->phba;
+ if (!phba) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3013 Received LINK DIAG TEST request "
+ " size:%d below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ link_diag_test_cmd = (struct sli4_link_diag *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ timeout = link_diag_test_cmd->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+
+ if (rc)
+ goto job_error;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+ run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
+ bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.type);
+ bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_id);
+ bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
+ link_diag_test_cmd->loops);
+ bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_version);
+ bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
+ link_diag_test_cmd->error_action);
+
+ mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || mbxstatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3010 Run link diag test mailbox failed with "
+ "mbx_status x%x status x%x, add_status x%x\n",
+ mbxstatus, shdr_status, shdr_add_status);
+ }
+
+ diag_status_reply = (struct diag_status *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3012 Received Run link diag test reply "
+ "below minimum size (%d): reply_len:%d\n",
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_status)),
+ job->reply_len);
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ diag_status_reply->mbox_status = mbxstatus;
+ diag_status_reply->shdr_status = shdr_status;
+ diag_status_reply->shdr_add_status = shdr_add_status;
+
+link_diag_test_exit:
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
* lpfcdiag_loop_self_reg - obtains a remote port login id
* @phba: Pointer to HBA context object
* @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
}
/**
+ * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object
+ *
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
+ * retruns the pointer to the buffer.
+ **/
+static struct lpfc_dmabuf *
+lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_dmabuf *dmabuf;
+ struct pci_dev *pcidev = phba->pcidev;
+
+ /* allocate dma buffer struct */
+ dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return NULL;
+
+ INIT_LIST_HEAD(&dmabuf->list);
+
+ /* now, allocate dma buffer */
+ dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
+
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return NULL;
+ }
+ memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
+
+ return dmabuf;
+}
+
+/**
+ * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
+ *
+ * This routine just simply frees a dma buffer and its associated buffer
+ * descriptor referred by @dmabuf.
+ **/
+static void
+lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
+{
+ struct pci_dev *pcidev = phba->pcidev;
+
+ if (!dmabuf)
+ return;
+
+ if (dmabuf->virt)
+ dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return;
+}
+
+/**
+ * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object.
+ * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
+ *
+ * This routine just simply frees all dma buffers and their associated buffer
+ * descriptors referred by @dmabuf_list.
+ **/
+static void
+lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
+ struct list_head *dmabuf_list)
+{
+ struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+
+ if (list_empty(dmabuf_list))
+ return;
+
+ list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
+ list_del_init(&dmabuf->list);
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ }
+ return;
+}
+
+/**
* diag_cmd_data_alloc - fills in a bde struct with dma buffers
* @phba: Pointer to HBA context object
* @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
}
/**
- * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
+ * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
* @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
*
* This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_test(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
}
/**
- * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
+ * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
@@ -2422,15 +2954,13 @@ job_error:
* of the mailbox.
**/
void
-lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
- struct lpfc_mbx_nembed_cmd *nembed_sge;
uint32_t size;
unsigned long flags;
- uint8_t *to;
- uint8_t *from;
+ uint8_t *pmb, *pmb_buf;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
- /* build the outgoing buffer to do an sg copy
- * the format is the response mailbox followed by any extended
- * mailbox data
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
*/
- from = (uint8_t *)&pmboxq->u.mb;
- to = (uint8_t *)dd_data->context_un.mbox.mb;
- memcpy(to, from, sizeof(MAILBOX_t));
- if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
- /* copy the extended data if any, count is in words */
- if (dd_data->context_un.mbox.outExtWLen) {
- from = (uint8_t *)dd_data->context_un.mbox.ext;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.outExtWLen *
- sizeof(uint32_t);
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
- from = (uint8_t *)dd_data->context_un.mbox.
- dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = pmboxq->u.mb.un.varWords[5];
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmboxq->u.mb.un.varWords[0];
-
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = nembed_sge->sge[0].length;
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
- from = (uint8_t *)dd_data->context_un.
- mbox.dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- }
- }
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
- from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
if (job) {
size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- from, size);
- job->reply->result = 0;
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
/* need to hold the lock until we set job->dd_data to NULL
* to hold off the timeout handler returning to the mid-layer
* while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
job->dd_data = NULL;
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
} else {
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
}
- kfree(dd_data->context_un.mbox.mb);
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
- kfree(dd_data->context_un.mbox.ext);
- if (dd_data->context_un.mbox.dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dd_data->context_un.mbox.dmp->size,
- dd_data->context_un.mbox.dmp->dma.virt,
- dd_data->context_un.mbox.dmp->dma.phys);
- kfree(dd_data->context_un.mbox.dmp);
- }
- if (dd_data->context_un.mbox.rxbmp) {
- lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
- dd_data->context_un.mbox.rxbmp->phys);
- kfree(dd_data->context_un.mbox.rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
kfree(dd_data);
+
+ if (job) {
+ job->reply->result = 0;
+ job->job_done(job);
+ }
return;
}
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
}
/**
+ * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * @phba: Pointer to HBA context object.
+ *
+ * This is routine clean up and reset BSG handling of multi-buffer mbox
+ * command session.
+ **/
+static void
+lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
+ return;
+
+ /* free all memory, including dma buffers */
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
+ /* multi-buffer write mailbox command pass-through complete */
+ memset((char *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is routine handles BSG job for mailbox commands completions with
+ * multiple external buffers.
+ **/
+static struct fc_bsg_job *
+lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ uint8_t *pmb, *pmb_buf;
+ unsigned long flags;
+ uint32_t size;
+ int rc = 0;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = pmboxq->context1;
+ /* has the job already timed out? */
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job = NULL;
+ goto job_done_out;
+ }
+
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
+ */
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+
+ job = dd_data->context_un.mbox.set_job;
+ if (job) {
+ size = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
+ /* result for successful */
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ /* need to hold the lock util we set job->dd_data to NULL
+ * to hold off the timeout handler from midlayer to take
+ * any action.
+ */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2937 SLI_CONFIG ext-buffer maibox command "
+ "(x%x/x%x) complete bsg job done, bsize:%d\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, size);
+ } else
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+job_done_out:
+ if (!job)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2938 SLI_CONFIG ext-buffer maibox "
+ "command (x%x/x%x) failure, rc:x%x\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, rc);
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
+ kfree(dd_data);
+
+ return job;
+}
+
+/**
+ * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox read commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2939 SLI_CONFIG ext-buffer rd maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* free base driver mailbox structure memory */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox write commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with the mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2940 SLI_CONFIG ext-buffer wr maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ /* free all memory, including dma buffers */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+static void
+lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+ uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
+ struct lpfc_dmabuf *ext_dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2943 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ } else {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2944 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ }
+ } else {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3007 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+
+ } else {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3008 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+ }
+ }
+ return;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ uint32_t ext_buf_cnt, ext_buf_index;
+ struct lpfc_dmabuf *ext_dmabuf = NULL;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *pmbx;
+ int rc, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2945 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2941 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2946 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2942 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ /* reject non-embedded mailbox command with none external buffer */
+ if (ext_buf_cnt == 0) {
+ rc = -EPERM;
+ goto job_error;
+ } else if (ext_buf_cnt > 1) {
+ /* additional external read buffers */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ }
+ }
+
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* for the rest of external buffer descriptors if any */
+ if (ext_buf_cnt > 1) {
+ ext_buf_index = 1;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
+ ext_buf_index, dmabuf,
+ curr_dmabuf);
+ ext_buf_index++;
+ }
+ }
+
+ /* construct base driver mbox command */
+ pmb = &pmboxq->u.mb;
+ pmbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, pmbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2947 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2948 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ kfree(dd_data);
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t ext_buf_cnt;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *mbx;
+ int rc = 0, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2953 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2949 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2954 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2950 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ if (ext_buf_cnt == 0)
+ return -EPERM;
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* log for looking forward */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ if (nemb_tp == nemb_mse)
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
+ i, sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[i].buf_len);
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
+ i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[i]));
+ }
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ if (ext_buf_cnt == 1) {
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb = &pmboxq->u.mb;
+ mbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, mbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2955 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2956 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ }
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
+ * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded sussystem 0x1 and opcodes with external HBDs.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t subsys;
+ uint32_t opcode;
+ int rc = SLI_CONFIG_NOT_HANDLED;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
+ switch (opcode) {
+ case FCOE_OPCODE_READ_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2957 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ case FCOE_OPCODE_ADD_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2958 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2959 Not handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2977 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ } else {
+ subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ case COMN_OPCODE_READ_OBJECT_LIST:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2960 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ case COMN_OPCODE_WRITE_OBJECT:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2961 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2962 Not handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2978 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine is for requesting to abort a pass-through mailbox command with
+ * multiple external buffers due to error condition.
+ **/
+static void
+lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+ else
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ return;
+}
+
+/**
+ * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine extracts the next mailbox read external buffer back to
+ * user space through BSG.
+ **/
+static int
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_dmabuf *dmabuf;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2963 SLI_CONFIG (mse) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ } else {
+ size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2964 SLI_CONFIG (hbd) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ }
+ if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
+ return -EPIPE;
+ dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ struct lpfc_dmabuf, list);
+ list_del_init(&dmabuf->list);
+ pbuf = (uint8_t *)dmabuf->virt;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pbuf, size);
+
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
+ "command session done\n");
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ }
+
+ job->reply->result = 0;
+ job->job_done(job);
+
+ return SLI_CONFIG_HANDLED;
+}
+
+/**
+ * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine sets up the next mailbox read external buffer obtained
+ * from user space through BSG.
+ **/
+static int
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ enum nemb_type nemb_tp;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+ int rc;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+ nemb_tp = phba->mbox_ext_buf_ctx.nembType;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ pbuf = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ pbuf, size);
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2966 SLI_CONFIG (mse) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2967 SLI_CONFIG (hbd) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ }
+
+ /* set up external buffer descriptor and add to external buffer list */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
+ phba->mbox_ext_buf_ctx.mbx_dmabuf,
+ dmabuf);
+ list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2968 SLI_CONFIG ext-buffer wr all %d "
+ "ebuffers received\n",
+ phba->mbox_ext_buf_ctx.numBuf);
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+ pmb = &pmboxq->u.mb;
+ memcpy(pmb, pbuf, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer write mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2969 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2970 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ goto job_error;
+ }
+
+ /* wait for additoinal external buffers */
+ job->reply->result = 0;
+ job->job_done(job);
+ return SLI_CONFIG_HANDLED;
+
+job_error:
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
+ * command with multiple non-embedded external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ int rc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2971 SLI_CONFIG buffer (type:x%x)\n",
+ phba->mbox_ext_buf_ctx.mboxType);
+
+ if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2972 SLI_CONFIG rd buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_read_ebuf_get(phba, job);
+ if (rc == SLI_CONFIG_HANDLED)
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2973 SLI_CONFIG wr buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * (0x9B) mailbox commands and external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ int rc;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* mbox command with/without single external buffer */
+ if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
+ return SLI_CONFIG_NOT_HANDLED;
+
+ /* mbox command and first external buffer */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
+ if (mbox_req->extSeqNum == 1) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2974 SLI_CONFIG mailbox: tag:%d, "
+ "seq:%d\n", mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
+ return rc;
+ } else
+ goto sli_cfg_ext_error;
+ }
+
+ /*
+ * handle additional external buffers
+ */
+
+ /* check broken pipe conditions */
+ if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
+ goto sli_cfg_ext_error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2975 SLI_CONFIG mailbox external buffer: "
+ "extSta:x%x, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
+ return rc;
+
+sli_cfg_ext_error:
+ /* all other cases, broken pipe */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2976 SLI_CONFIG mailbox broken pipe: "
+ "ctxSta:x%x, ctxNumBuf:%d "
+ "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state,
+ phba->mbox_ext_buf_ctx.numBuf,
+ phba->mbox_ext_buf_ctx.mbxTag,
+ phba->mbox_ext_buf_ctx.seqNum,
+ mbox_req->extMboxTag, mbox_req->extSeqNum);
+
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ return -EPIPE;
+}
+
+/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
* @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
- MAILBOX_t *mb = NULL;
+ uint8_t *pmbx = NULL;
struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
- uint32_t size;
- struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
- struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
- struct ulp_bde64 *rxbpl = NULL;
- struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ struct lpfc_dmabuf *dmabuf = NULL;
+ struct dfc_mbox_req *mbox_req;
struct READ_EVENT_LOG_VAR *rdEventLog;
uint32_t transmit_length, receive_length, mode;
+ struct lpfc_mbx_sli4_config *sli4_config;
struct lpfc_mbx_nembed_cmd *nembed_sge;
struct mbox_header *header;
struct ulp_bde64 *bde;
uint8_t *ext = NULL;
int rc = 0;
uint8_t *from;
+ uint32_t size;
+
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ /*
+ * Don't allow mailbox commands to be sent when blocked or when in
+ * the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_done;
+ }
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
(mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!dmabuf || !dmabuf->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* Get the mailbox command or external buffer from BSG */
+ pmbx = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, pmbx, size);
+
+ /* Handle possible SLI_CONFIG with non-embedded payloads */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
+ if (rc == SLI_CONFIG_HANDLED)
+ goto job_cont;
+ if (rc)
+ goto job_done;
+ /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
+ }
+
+ rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
+ if (rc != 0)
+ goto job_done; /* must be negative */
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
- mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
- if (!mb) {
- rc = -ENOMEM;
- goto job_done;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
- size = job->request_payload.payload_len;
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- mb, size);
-
- rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
- if (rc != 0)
- goto job_done; /* must be negative */
-
pmb = &pmboxq->u.mb;
- memcpy(pmb, mb, sizeof(*pmb));
+ memcpy(pmb, pmbx, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
"0x%x while in stopped state.\n",
pmb->mbxCommand);
- /* Don't allow mailbox commands to be sent when blocked
- * or when in the middle of discovery
- */
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_done;
- }
-
/* extended mailbox commands will need an extended buffer */
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
- ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
- if (!ext) {
- rc = -ENOMEM;
- goto job_done;
- }
-
/* any data for the device? */
if (mbox_req->inExtWLen) {
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)ext, from,
- mbox_req->inExtWLen * sizeof(uint32_t));
+ from = pmbx;
+ ext = from + sizeof(MAILBOX_t);
}
-
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
- putPaddrLow(dmp->dma.phys);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
- putPaddrLow(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
-
- /* copy the transmit data found in the mailbox extension area */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
rdEventLog = &pmb->un.varRdEventLog;
receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* mode zero uses a bde like biu diags command */
if (mode == 0) {
-
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- if (rxbpl) {
- INIT_LIST_HEAD(&rxbmp->list);
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- receive_length, 0);
- }
-
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
} else if (phba->sli_rev == LPFC_SLI_REV4) {
if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* receive length cannot be greater than mailbox
* extension size
*/
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
+ if (receive_length == 0) {
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
pmb->un.varUpdateCfg.co) {
bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- bde->tus.f.bdeSize, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- bde->addrHigh = putPaddrHigh(dmp->dma.phys);
- bde->addrLow = putPaddrLow(dmp->dma.phys);
-
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- bde->tus.f.bdeSize);
+ bde->addrHigh = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ bde->addrLow = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
- header = (struct mbox_header *)&pmb->un.varWords[0];
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmb->un.varWords[0];
- receive_length = nembed_sge->sge[0].length;
-
- /* receive length cannot be greater than mailbox
- * extension size
- */
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
- rc = -ERANGE;
- goto job_done;
- }
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
+ /* Handling non-embedded SLI_CONFIG mailbox command */
+ sli4_config = &pmboxq->u.mqe.un.sli4_config;
+ if (!bf_get(lpfc_mbox_hdr_emb,
+ &sli4_config->header.cfg_mhdr)) {
+ /* rebuild the command for sli4 using our
+ * own buffers like we do for biu diags
+ */
+ header = (struct mbox_header *)
+ &pmb->un.varWords[0];
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &pmb->un.varWords[0];
+ receive_length = nembed_sge->sge[0].length;
+
+ /* receive length cannot be greater than
+ * mailbox extension size
+ */
+ if ((receive_length == 0) ||
+ (receive_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
+ nembed_sge->sge[0].pa_hi =
+ putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ nembed_sge->sge[0].pa_lo =
+ putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
-
- INIT_LIST_HEAD(&dmp->dma.list);
- nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
- nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- header->cfg_mhdr.payload_length);
}
}
- dd_data->context_un.mbox.rxbmp = rxbmp;
- dd_data->context_un.mbox.dmp = dmp;
+ dd_data->context_un.mbox.dmabuffers = dmabuf;
/* setup wake call as IOCB callback */
- pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->context_un.mbox.pmboxq = pmboxq;
- dd_data->context_un.mbox.mb = mb;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
dd_data->context_un.mbox.set_job = job;
dd_data->context_un.mbox.ext = ext;
dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* job finished, copy the data */
- memcpy(mb, pmb, sizeof(*pmb));
+ memcpy(pmbx, pmb, sizeof(*pmb));
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- mb, size);
+ job->reply_payload.sg_cnt,
+ pmbx, size);
/* not waiting mbox already done */
rc = 0;
goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
job_done:
/* common exit for error or job completed inline */
- kfree(mb);
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
- kfree(ext);
- if (dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dmp->size, dmp->dma.virt,
- dmp->dma.phys);
- kfree(dmp);
- }
- if (rxbmp) {
- lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
- kfree(rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
+job_cont:
return rc;
}
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct dfc_mbox_req *mbox_req;
int rc = 0;
- /* in case no data is transferred */
+ /* mix-and-match backward compatibility */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2737 Received MBOX_REQ request below "
- "minimum size\n");
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_error;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2737 Mix-and-match backward compability "
+ "between MBOX_REQ old size:%d and "
+ "new request size:%d\n",
+ (int)(job->request_len -
+ sizeof(struct fc_bsg_request)),
+ (int)sizeof(struct dfc_mbox_req));
+ mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ mbox_req->extMboxTag = 0;
+ mbox_req->extSeqNum = 0;
}
rc = lpfc_bsg_issue_mbox(phba, job, vport);
-job_error:
if (rc == 0) {
/* job done */
job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
rc = lpfc_bsg_send_mgmt_rsp(job);
break;
case LPFC_BSG_VENDOR_DIAG_MODE:
- rc = lpfc_bsg_diag_mode(job);
+ rc = lpfc_bsg_diag_loopback_mode(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_MODE_END:
+ rc = lpfc_sli4_bsg_diag_mode_end(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
+ rc = lpfc_bsg_diag_loopback_run(job);
break;
- case LPFC_BSG_VENDOR_DIAG_TEST:
- rc = lpfc_bsg_diag_test(job);
+ case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
+ rc = lpfc_sli4_bsg_link_diag_test(job);
break;
case LPFC_BSG_VENDOR_GET_MGMT_REV:
rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
/* the mbox completion handler can now be run */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
break;
case TYPE_MENLO:
menlo = &dd_data->context_un.menlo;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index b542aca6f5a..c8c2b47ea88 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -24,15 +24,17 @@
* These are the vendor unique structures passed in using the bsg
* FC_BSG_HST_VENDOR message code type.
*/
-#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
-#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
-#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
-#define LPFC_BSG_VENDOR_DIAG_MODE 4
-#define LPFC_BSG_VENDOR_DIAG_TEST 5
-#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
-#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
+#define LPFC_BSG_VENDOR_DIAG_MODE 4
+#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
+#define LPFC_BSG_VENDOR_MBOX 7
+#define LPFC_BSG_VENDOR_MENLO_CMD 8
+#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
+#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
struct set_ct_event {
uint32_t command;
@@ -67,10 +69,25 @@ struct diag_mode_set {
uint32_t timeout;
};
+struct sli4_link_diag {
+ uint32_t command;
+ uint32_t timeout;
+ uint32_t test_id;
+ uint32_t loops;
+ uint32_t test_version;
+ uint32_t error_action;
+};
+
struct diag_mode_test {
uint32_t command;
};
+struct diag_status {
+ uint32_t mbox_status;
+ uint32_t shdr_status;
+ uint32_t shdr_add_status;
+};
+
#define LPFC_WWNN_TYPE 0
#define LPFC_WWPN_TYPE 1
@@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
};
#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
+
+/* BSG mailbox request header */
struct dfc_mbox_req {
uint32_t command;
uint32_t mbOffset;
uint32_t inExtWLen;
uint32_t outExtWLen;
+ uint32_t extMboxTag;
+ uint32_t extSeqNum;
};
/* Used for menlo command or menlo data. The xri is only used for menlo data */
@@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
#define lpfc_mbox_sli_config_mse_len_WORD buf_len
};
-struct lpfc_sli_config_subcmd_hbd {
+struct lpfc_sli_config_hbd {
uint32_t buf_len;
#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
@@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
uint32_t reserved5;
};
-struct lpfc_sli_config_generic {
+struct lpfc_sli_config_emb0_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
+ uint32_t padding;
+ uint32_t word64;
+#define lpfc_emb0_subcmnd_opcode_SHIFT 0
+#define lpfc_emb0_subcmnd_opcode_MASK 0xff
+#define lpfc_emb0_subcmnd_opcode_WORD word64
+#define lpfc_emb0_subcmnd_subsys_SHIFT 8
+#define lpfc_emb0_subcmnd_subsys_MASK 0xff
+#define lpfc_emb0_subcmnd_subsys_WORD word64
+/* Subsystem FCOE (0x0C) OpCodes */
+#define SLI_CONFIG_SUBSYS_FCOE 0x0C
+#define FCOE_OPCODE_READ_FCF 0x08
+#define FCOE_OPCODE_ADD_FCF 0x09
};
-struct lpfc_sli_config_subcmnd {
+struct lpfc_sli_config_emb1_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
uint32_t word6;
-#define lpfc_subcmnd_opcode_SHIFT 0
-#define lpfc_subcmnd_opcode_MASK 0xff
-#define lpfc_subcmnd_opcode_WORD word6
-#define lpfc_subcmnd_subsys_SHIFT 8
-#define lpfc_subcmnd_subsys_MASK 0xff
-#define lpfc_subcmnd_subsys_WORD word6
+#define lpfc_emb1_subcmnd_opcode_SHIFT 0
+#define lpfc_emb1_subcmnd_opcode_MASK 0xff
+#define lpfc_emb1_subcmnd_opcode_WORD word6
+#define lpfc_emb1_subcmnd_subsys_SHIFT 8
+#define lpfc_emb1_subcmnd_subsys_MASK 0xff
+#define lpfc_emb1_subcmnd_subsys_WORD word6
+/* Subsystem COMN (0x01) OpCodes */
+#define SLI_CONFIG_SUBSYS_COMN 0x01
+#define COMN_OPCODE_READ_OBJECT 0xAB
+#define COMN_OPCODE_WRITE_OBJECT 0xAC
+#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
+#define COMN_OPCODE_DELETE_OBJECT 0xAE
uint32_t timeout;
uint32_t request_length;
uint32_t word9;
@@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
uint32_t rd_offset;
uint32_t obj_name[26];
uint32_t hbd_count;
-#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10
- struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
+#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
+ struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
};
struct lpfc_sli_config_mbox {
@@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
#define lpfc_mqe_command_MASK 0x000000FF
#define lpfc_mqe_command_WORD word0
union {
- struct lpfc_sli_config_generic sli_config_generic;
- struct lpfc_sli_config_subcmnd sli_config_subcmnd;
+ struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
+ struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
} un;
};
+
+/* driver only */
+#define SLI_CONFIG_NOT_HANDLED 0
+#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f0b332f4eed..fc20c247f36 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
void lpfc_pc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+ uint16_t, uint16_t, bool);
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
+void lpfc_update_vport_wwn(struct lpfc_vport *vport);
int lpfc_config_port_post(struct lpfc_hba *);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
@@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
uint32_t, uint32_t);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+/* SLI4 if_type 2 externs. */
+int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+
/* externs BlockGuard */
extern char *_dump_buf_data;
extern unsigned long _dump_buf_data_order;
@@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
+int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
+/* functions to support SR-IOV */
+int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d9edfd90d7f..779b88e1469 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
icmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
/* For GEN_REQUEST64_CR, use the RPI */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c93fca05860..ffe82d169b4 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ fcp_qidx = 0;
+ do {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
@@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
- }
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get mailbox queue information */
@@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP complete queue */
- for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+ qidx = 0;
+ do {
if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
phba->sli4_hba.fcp_cq[qidx];
goto pass_check;
}
- }
+ } while (++qidx < phba->cfg_fcp_eq_count);
goto error_out;
break;
case LPFC_IDIAG_MQ:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e2c452467c8..32a084534f3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.myID = vport->fc_myDID;
/* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = vport->vpi + phba->vpi_base;
+ icmd->ulpContext = phba->vpi_ids[vport->vpi];
icmd->ulpCt_h = 0;
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
rc = -ENOMEM;
goto fail_free_dmabuf;
}
+
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
struct lpfc_vport *vport;
unsigned long flags;
+ int i;
+
+ /* The physical ports are always vpi 0 - translate is unnecessary. */
+ if (vpi > 0) {
+ /*
+ * Translate the physical vpi to the logical vpi. The
+ * vport stores the logical vpi.
+ */
+ for (i = 0; i < phba->max_vpi; i++) {
+ if (vpi == phba->vpi_ids[i])
+ break;
+ }
+
+ if (i >= phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
+ return NULL;
+ }
+ }
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
vport = phba->pport;
else
vport = lpfc_find_vport_by_vpid(phba,
- icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
+ icmd->unsli3.rcvsli3.vpi);
}
+
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
/* Set the ulpContext to the vpi */
- elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
+ elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
} else {
/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
icmd->ulpCt_h = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7a35df5e203..18d0dbfda2b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
- lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
+ lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
mb->vport = vport;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(vport->fc_nodename));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(vport->fc_portname));
+ lpfc_update_vport_wwn(vport);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3504,7 +3496,8 @@ out:
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
-
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
@@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
+ uint16_t rpi;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
+ /* SLI4 ports require the physical rpi value. */
+ rpi = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
+ lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
+ lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
- /*
- * For SLI3, cmpl_reg_vpi will set port_state to READY, and
- * continue discovery.
- */
+ /* Register the VPI for SLI3, NON-NPIV only. */
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4943,7 +4939,7 @@ restart_disc:
if (phba->sli_rev < LPFC_SLI_REV4) {
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_reg_vpi(phba, vport);
- else { /* NPIV Not enabled */
+ else {
lpfc_issue_clear_la(phba, vport);
vport->port_state = LPFC_VPORT_READY;
}
@@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
+ /*
+ * IF the CVL_RCVD bit is not set then we have sent the
+ * flogi.
+ * If dev_loss fires while we are waiting we do not want to
+ * unreg the fcf.
+ */
+ if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
+ spin_unlock_irq(shost->host_lock);
+ ret = 1;
+ goto out;
+ }
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 86b6f7e6686..9059524cf22 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,8 @@
#define SLI3_IOCB_CMD_SIZE 128
#define SLI3_IOCB_RSP_SIZE 64
+#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
+#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
/* vendor ID used in SCSI netlink calls */
#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
@@ -903,6 +905,8 @@ struct RRQ { /* Structure is in Big Endian format */
#define rrq_rxid_WORD rrq_exchg
};
+#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
+#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
struct RTV_RSP { /* Structure is in Big Endian format */
uint32_t ratov;
@@ -1199,7 +1203,9 @@ typedef struct {
#define PCI_DEVICE_ID_BALIUS 0xe131
#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
#define PCI_DEVICE_ID_LANCER_FC 0xe200
+#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
+#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@@ -3021,7 +3027,7 @@ typedef struct {
#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
#define MAILBOX_HBA_EXT_OFFSET 0x100
/* max mbox xmit size is a page size for sysfs IO operations */
-#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
+#define MAILBOX_SYSFS_MAX 4096
typedef union {
uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4dff668ebda..11e26a26b5d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,6 +170,25 @@ struct lpfc_sli_intf {
#define LPFC_PCI_FUNC3 3
#define LPFC_PCI_FUNC4 4
+/* SLI4 interface type-2 control register offsets */
+#define LPFC_CTL_PORT_SEM_OFFSET 0x400
+#define LPFC_CTL_PORT_STA_OFFSET 0x404
+#define LPFC_CTL_PORT_CTL_OFFSET 0x408
+#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
+
+/* Some SLI4 interface type-2 PDEV_CTL register bits */
+#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
+#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
+#define LPFC_CTL_PDEV_CTL_DD 0x00000004
+#define LPFC_CTL_PDEV_CTL_LC 0x00000008
+#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
+#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
+#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
+
+#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
+
/* Active interrupt test count */
#define LPFC_ACT_INTR_CNT 4
@@ -210,9 +229,26 @@ struct ulp_bde64 {
struct lpfc_sli4_flags {
uint32_t word0;
-#define lpfc_fip_flag_SHIFT 0
-#define lpfc_fip_flag_MASK 0x00000001
-#define lpfc_fip_flag_WORD word0
+#define lpfc_idx_rsrc_rdy_SHIFT 0
+#define lpfc_idx_rsrc_rdy_MASK 0x00000001
+#define lpfc_idx_rsrc_rdy_WORD word0
+#define LPFC_IDX_RSRC_RDY 1
+#define lpfc_xri_rsrc_rdy_SHIFT 1
+#define lpfc_xri_rsrc_rdy_MASK 0x00000001
+#define lpfc_xri_rsrc_rdy_WORD word0
+#define LPFC_XRI_RSRC_RDY 1
+#define lpfc_rpi_rsrc_rdy_SHIFT 2
+#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_rpi_rsrc_rdy_WORD word0
+#define LPFC_RPI_RSRC_RDY 1
+#define lpfc_vpi_rsrc_rdy_SHIFT 3
+#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vpi_rsrc_rdy_WORD word0
+#define LPFC_VPI_RSRC_RDY 1
+#define lpfc_vfi_rsrc_rdy_SHIFT 4
+#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vfi_rsrc_rdy_WORD word0
+#define LPFC_VFI_RSRC_RDY 1
};
struct sli4_bls_rsp {
@@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
#define lpfc_mbox_hdr_version_SHIFT 0
#define lpfc_mbox_hdr_version_MASK 0x000000FF
#define lpfc_mbox_hdr_version_WORD word9
+#define lpfc_mbox_hdr_pf_num_SHIFT 16
+#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_pf_num_WORD word9
+#define lpfc_mbox_hdr_vh_num_SHIFT 24
+#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_vh_num_WORD word9
#define LPFC_Q_CREATE_VERSION_2 2
#define LPFC_Q_CREATE_VERSION_1 1
#define LPFC_Q_CREATE_VERSION_0 0
@@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
} response;
};
-/* Mailbox structures */
+/* Mailbox Header structures.
+ * struct mbox_header is defined for first generation SLI4_CFG mailbox
+ * calls deployed for BE-based ports.
+ *
+ * struct sli4_mbox_header is defined for second generation SLI4
+ * ports that don't deploy the SLI4_CFG mechanism.
+ */
struct mbox_header {
struct lpfc_sli4_cfg_mhdr cfg_mhdr;
union lpfc_sli4_cfg_shdr cfg_shdr;
};
+#define LPFC_EXTENT_LOCAL 0
+#define LPFC_TIMEOUT_DEFAULT 0
+#define LPFC_EXTENT_VERSION_DEFAULT 0
+
/* Subsystem Definitions */
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -794,6 +846,13 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
@@ -808,6 +867,8 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
/* Mailbox command structures */
struct eq_context {
@@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
} u;
};
+/* Start Gen 2 SLI4 Mailbox definitions: */
+
+/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
+#define LPFC_RSC_TYPE_FCOE_VFI 0x20
+#define LPFC_RSC_TYPE_FCOE_VPI 0x21
+#define LPFC_RSC_TYPE_FCOE_RPI 0x22
+#define LPFC_RSC_TYPE_FCOE_XRI 0x23
+
+struct lpfc_mbx_get_rsrc_extent_info {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
+#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
+#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
+ } rsp;
+ } u;
+};
+
+struct lpfc_id_range {
+ uint32_t word5;
+#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
+#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_0_WORD word5
+#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
+#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_1_WORD word5
+};
+
+struct lpfc_mbx_set_link_diag_state {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_state_diag_SHIFT 0
+#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
+#define lpfc_mbx_set_diag_state_diag_WORD word0
+#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_state_link_num_WORD word0
+#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_state_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_set_link_diag_loopback {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_WORD word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_run_link_diag_test {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
+#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
+#define lpfc_mbx_run_diag_test_link_num_WORD word0
+#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
+#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
+#define lpfc_mbx_run_diag_test_link_type_WORD word0
+ uint32_t word1;
+#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_id_WORD word1
+#define lpfc_mbx_run_diag_test_loops_SHIFT 16
+#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_loops_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_ver_WORD word2
+#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
+#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
+#define lpfc_mbx_run_diag_test_err_act_WORD word2
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+/*
+ * struct lpfc_mbx_alloc_rsrc_extents:
+ * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
+ * 6 words of header + 4 words of shared subcommand header +
+ * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
+ *
+ * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
+ * for extents payload.
+ *
+ * 212/2 (bytes per extent) = 106 extents.
+ * 106/2 (extents per word) = 53 words.
+ * lpfc_id_range id is statically size to 53.
+ *
+ * This mailbox definition is used for ALLOC or GET_ALLOCATED
+ * extent ranges. For ALLOC, the type and cnt are required.
+ * For GET_ALLOCATED, only the type is required.
+ */
+struct lpfc_mbx_alloc_rsrc_extents {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
+#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
+#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_rsrc_cnt_SHIFT 0
+#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_cnt_WORD word4
+ struct lpfc_id_range id[53];
+ } rsp;
+ } u;
+};
+
+/*
+ * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
+ * structure shares the same SHIFT/MASK/WORD defines provided in the
+ * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
+ * the structures defined above. This non-embedded structure provides for the
+ * maximum number of extents supported by the port.
+ */
+struct lpfc_mbx_nembed_rsrc_extent {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word4;
+ struct lpfc_id_range id;
+};
+
+struct lpfc_mbx_dealloc_rsrc_extents {
+ struct mbox_header header;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
+ } req;
+
+};
+
+/* Start SLI4 FCoE specific mbox structures. */
+
struct lpfc_mbx_post_hdr_tmpl {
struct mbox_header header;
uint32_t word10;
@@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
uint32_t word2;
#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
-#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
+#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
#define lpfc_sli4_sge_offset_WORD word2
#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
this flag !! */
@@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
struct lpfc_mbx_read_config {
uint32_t word1;
-#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
-#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_bbc_WORD word1
-#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
-#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_init_bbc_WORD word1
+#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
+#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
+#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
uint32_t word2;
-#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
-#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
-#define lpfc_mbx_rd_conf_nport_did_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
- uint32_t word3;
-#define lpfc_mbx_rd_conf_ao_SHIFT 0
-#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
-#define lpfc_mbx_rd_conf_ao_WORD word3
-#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
-#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_bb_scn_WORD word3
-#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
-#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
-#define lpfc_mbx_rd_conf_mc_SHIFT 29
-#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
-#define lpfc_mbx_rd_conf_mc_WORD word3
+ uint32_t rsvd_3;
uint32_t word4;
#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
- uint32_t word5;
-#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_lp_tov_WORD word5
+ uint32_t rsvd_5;
uint32_t word6;
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
- uint32_t word7;
-#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
- uint32_t word8;
-#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_al_tov_WORD word8
+ uint32_t rsvd_7;
+ uint32_t rsvd_8;
uint32_t word9;
#define lpfc_mbx_rd_conf_lmt_SHIFT 0
#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_lmt_WORD word9
- uint32_t word10;
-#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
-#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_alpa_WORD word10
- uint32_t word11_rsvd;
+ uint32_t rsvd_10;
+ uint32_t rsvd_11;
uint32_t word12;
#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
@@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_vfi_count_WORD word15
uint32_t word16;
-#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
-#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
@@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
#define cfg_fcoe_SHIFT 0
#define cfg_fcoe_MASK 0x00000001
#define cfg_fcoe_WORD word12
+#define cfg_ext_SHIFT 1
+#define cfg_ext_MASK 0x00000001
+#define cfg_ext_WORD word12
+#define cfg_hdrr_SHIFT 2
+#define cfg_hdrr_MASK 0x00000001
+#define cfg_hdrr_WORD word12
#define cfg_phwq_SHIFT 15
#define cfg_phwq_MASK 0x00000001
#define cfg_phwq_WORD word12
@@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
struct lpfc_sli4_parameters sli4_parameters;
};
+struct lpfc_rscr_desc_generic {
+#define LPFC_RSRC_DESC_WSIZE 18
+ uint32_t desc[LPFC_RSRC_DESC_WSIZE];
+};
+
+struct lpfc_rsrc_desc_pcie {
+ uint32_t word0;
+#define lpfc_rsrc_desc_pcie_type_SHIFT 0
+#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
+ uint32_t word1;
+#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
+#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
+ uint32_t reserved;
+ uint32_t word3;
+#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
+#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
+#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
+#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
+#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
+#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
+};
+
+struct lpfc_rsrc_desc_fcfcoe {
+ uint32_t word0;
+#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
+ uint32_t word1;
+#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
+#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
+#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
+ uint32_t word2;
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
+ uint32_t word3;
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
+ uint32_t word5;
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
+ uint32_t word6;
+ uint32_t word7;
+ uint32_t word8;
+ uint32_t word9;
+ uint32_t word10;
+ uint32_t word11;
+ uint32_t word12;
+ uint32_t word13;
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
+#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
+#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
+};
+
+struct lpfc_func_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_func_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ struct lpfc_func_cfg func_cfg;
+};
+
+struct lpfc_prof_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_prof_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ union {
+ struct {
+ uint32_t word10;
+#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
+#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
+#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
+#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
+#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
+#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
+ } request;
+ struct {
+ struct lpfc_prof_cfg prof_cfg;
+ } response;
+ } u;
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
+#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
+struct lpfc_mbx_wr_object {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_wr_object_eof_SHIFT 31
+#define lpfc_wr_object_eof_MASK 0x00000001
+#define lpfc_wr_object_eof_WORD word4
+#define lpfc_wr_object_write_length_SHIFT 0
+#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
+#define lpfc_wr_object_write_length_WORD word4
+ uint32_t write_offset;
+ uint32_t object_name[26];
+ uint32_t bde_count;
+ struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
+ } request;
+ struct {
+ uint32_t actual_write_length;
+ } response;
+ } u;
+};
+
/* mailbox queue entry structure */
struct lpfc_mqe {
uint32_t word0;
@@ -2241,6 +2618,9 @@ struct lpfc_mqe {
struct lpfc_mbx_cq_destroy cq_destroy;
struct lpfc_mbx_wq_destroy wq_destroy;
struct lpfc_mbx_rq_destroy rq_destroy;
+ struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
+ struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
+ struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
struct lpfc_mbx_post_sgl_pages post_sgl_pages;
struct lpfc_mbx_nembed_cmd nembed_cmd;
struct lpfc_mbx_read_rev read_rev;
@@ -2252,7 +2632,13 @@ struct lpfc_mqe {
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ struct lpfc_mbx_set_link_diag_state link_diag_state;
+ struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+ struct lpfc_mbx_run_link_diag_test link_diag_test;
+ struct lpfc_mbx_get_func_cfg get_func_cfg;
+ struct lpfc_mbx_get_prof_cfg get_prof_cfg;
struct lpfc_mbx_nop nop;
+ struct lpfc_mbx_wr_object wr_object;
} un;
};
@@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
#define SGL_ALIGN_SZ 64
#define SGL_PAGE_SIZE 4096
/* align SGL addr on a size boundary - adjust address up */
-#define NO_XRI ((uint16_t)-1)
+#define NO_XRI 0xffff
struct wqe_common {
uint32_t word6;
@@ -2798,9 +3184,28 @@ union lpfc_wqe {
struct gen_req64_wqe gen_req;
};
+#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_FILE_TYPE_GROUP 0xf7
+#define LPFC_FILE_ID_GROUP 0xa2
+struct lpfc_grp_hdr {
+ uint32_t size;
+ uint32_t magic_number;
+ uint32_t word2;
+#define lpfc_grp_hdr_file_type_SHIFT 24
+#define lpfc_grp_hdr_file_type_MASK 0x000000FF
+#define lpfc_grp_hdr_file_type_WORD word2
+#define lpfc_grp_hdr_id_SHIFT 16
+#define lpfc_grp_hdr_id_MASK 0x000000FF
+#define lpfc_grp_hdr_id_WORD word2
+ uint8_t rev_name[128];
+};
+
#define FCP_COMMAND 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD
#define OTHER_COMMAND 0x8
+#define LPFC_FW_DUMP 1
+#define LPFC_FW_RESET 2
+#define LPFC_DV_RESET 3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dda036a1af..148b98ddbb1 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
#include <linux/ctype.h>
#include <linux/aer.h>
#include <linux/slab.h>
+#include <linux/firmware.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
if (!lpfc_vpd_data)
goto out_free_mbox;
-
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
}
/**
+ * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
+ * cfg_soft_wwnn, cfg_soft_wwpn
+ * @vport: pointer to lpfc vport data structure.
+ *
+ *
+ * Return codes
+ * None.
+ **/
+void
+lpfc_update_vport_wwn(struct lpfc_vport *vport)
+{
+ /* If the soft name exists then update it using the service params */
+ if (vport->phba->cfg_soft_wwnn)
+ u64_to_wwn(vport->phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
+ if (vport->phba->cfg_soft_wwpn)
+ u64_to_wwn(vport->phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+
+ /*
+ * If the name is empty or there exists a soft name
+ * then copy the service params name, otherwise use the fc name
+ */
+ if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
+ sizeof(struct lpfc_name));
+
+ if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name));
+}
+
+/**
* lpfc_config_port_post - Perform lpfc initialization after config port
* @phba: pointer to lpfc hba data structure.
*
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->context1 = NULL;
-
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof (struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Clear all pending interrupts */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
-
phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
- if (phba->lmt & LMT_10Gb)
+ if (phba->lmt & LMT_16Gb)
+ max_speed = 16;
+ else if (phba->lmt & LMT_10Gb)
max_speed = 10;
else if (phba->lmt & LMT_8Gb)
max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
"Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FC:
- oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
+ case PCI_DEVICE_ID_LANCER_FC_VF:
+ m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
+ m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
break;
default:
m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
if (mdp && mdp[0] == '\0')
snprintf(mdp, 79,"%s", m.name);
- /* oneConnect hba requires special processing, they are all initiators
+ /*
+ * oneConnect hba requires special processing, they are all initiators
* and we put the port number on the end
*/
if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
kfree(io);
phba->total_iocbq_bufs--;
}
+
spin_unlock_irq(&phba->hbalock);
return 0;
}
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
+
vport = lpfc_find_vport_by_vpid(phba,
acqe_fip->index - phba->vpi_base);
ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
pci_try_set_mwi(pdev);
pci_save_state(pdev);
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+ pdev->needs_freset = 1;
+
return 0;
out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ * @nr_vfn: number of virtual functions to be enabled.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+int
+lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ int rc;
+
+ rc = pci_enable_sriov(pdev, nr_vfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2806 Failed to enable sriov on this device "
+ "with vfn number nr_vf:%d, rc:%d\n",
+ nr_vfn, rc);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2807 Successful enable sriov on this device "
+ "with vfn number nr_vf:%d\n", nr_vfn);
+ return rc;
+}
+
+/**
* lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
* @phba: pointer to lpfc hba data structure.
*
@@ -4011,6 +4079,7 @@ static int
lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
+ int rc;
/*
* Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
return -ENOMEM;
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2808 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return 0;
}
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->fcf.redisc_wait.data = (unsigned long)phba;
/*
+ * Control structure for handling external multi-buffer mailbox
+ * command pass-through.
+ */
+ memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ /*
* We need to do a READ_CONFIG mailbox command here before
* calling lpfc_get_cfgparam. For VFs this will report the
* MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
/*
- * Initialize dirver internal slow-path work queues
+ * Initialize driver internal slow-path work queues
*/
/* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Receive queue CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+ /* Initialize extent block lists. */
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
+ INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+
/* Initialize the driver internal SLI layer lists. */
lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
/*
* Get sli4 parameters that override parameters from Port capabilities.
- * If this call fails it is not a critical error so continue loading.
+ * If this call fails, it isn't critical unless the SLI4 parameters come
+ * back in conflict.
*/
- lpfc_get_sli4_parameters(phba, mboxq);
+ rc = lpfc_get_sli4_parameters(phba, mboxq);
+ if (rc) {
+ if (phba->sli4_hba.extents_in_use &&
+ phba->sli4_hba.rpi_hdrs_in_use) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2999 Unsupported SLI4 Parameters "
+ "Extents and RPI headers enabled.\n");
+ goto out_free_bsmbx;
+ }
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
"1430 Failed to initialize sgl list.\n");
goto out_free_sgl_list;
}
-
rc = lpfc_sli4_init_rpi_hdrs(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2759 Failed allocate memory for FCF round "
"robin failover bmask\n");
+ rc = -ENOMEM;
goto out_remove_rpi_hdrs;
}
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for fast-path "
"per-EQ handle array\n");
+ rc = -ENOMEM;
goto out_free_fcf_rr_bmask;
}
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2573 Failed allocate memory for msi-x "
"interrupt vector entries\n");
+ rc = -ENOMEM;
goto out_free_fcp_eq_hdl;
}
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3020 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return rc;
out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
lpfc_sli4_cq_event_release_all(phba);
lpfc_sli4_cq_event_pool_destroy(phba);
+ /* Release resource identifiers. */
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+
/* Free the bsmbx region. */
lpfc_destroy_bootstrap_mbox(phba);
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
"Unloading driver.\n", __func__);
goto out_free_iocbq;
}
+ iocbq_entry->sli4_lxritag = NO_XRI;
iocbq_entry->sli4_xritag = NO_XRI;
spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2400 lpfc_init_sgl_list els %d.\n",
+ "2400 ELS XRI count %d.\n",
els_xri_cnt);
/* Initialize and populate the sglq list per host/VF. */
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
phba->sli4_hba.scsi_xri_max =
phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
phba->sli4_hba.scsi_xri_cnt = 0;
-
phba->sli4_hba.lpfc_scsi_psb_array =
kzalloc((sizeof(struct lpfc_scsi_buf *) *
phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
goto out_free_mem;
}
- sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (sglq_entry->sli4_xritag == NO_XRI) {
- kfree(sglq_entry);
- printk(KERN_ERR "%s: failed to allocate XRI.\n"
- "Unloading driver.\n", __func__);
- goto out_free_mem;
- }
sglq_entry->buff_type = GEN_BUFF_TYPE;
sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
{
int rc = 0;
- int longs;
- uint16_t rpi_count;
struct lpfc_rpi_hdr *rpi_hdr;
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
-
/*
- * Provision an rpi bitmask range for discovery. The total count
- * is the difference between max and base + 1.
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
*/
- rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
-
- longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
- GFP_KERNEL);
- if (!phba->sli4_hba.rpi_bmask)
- return -ENOMEM;
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ return rc;
+ }
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
uint32_t rpi_count;
+ /*
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return NULL;
+ if (phba->sli4_hba.extents_in_use)
+ return NULL;
+
+ /* The limit on the logical index is just the max_rpi count. */
rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
+ phba->sli4_hba.max_cfg_param.max_rpi - 1;
spin_lock_irq(&phba->hbalock);
- curr_rpi_range = phba->sli4_hba.next_rpi;
+ /*
+ * Establish the starting RPI in this header block. The starting
+ * rpi is normalized to a zero base because the physical rpi is
+ * port based.
+ */
+ curr_rpi_range = phba->sli4_hba.next_rpi -
+ phba->sli4_hba.max_cfg_param.rpi_base;
spin_unlock_irq(&phba->hbalock);
/*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
else
rpi_count = LPFC_RPI_HDR_COUNT;
+ if (!rpi_count)
+ return NULL;
/*
* First allocate the protocol header region for the port. The
* port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
rpi_hdr->page_count = 1;
spin_lock_irq(&phba->hbalock);
- rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
+
+ /* The rpi_hdr stores the logical index only. */
+ rpi_hdr->start_rpi = curr_rpi_range;
list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
/*
- * The next_rpi stores the next module-64 rpi value to post
- * in any subsequent rpi memory region postings.
+ * The next_rpi stores the next logical module-64 rpi value used
+ * to post physical rpis in subsequent rpi postings.
*/
phba->sli4_hba.next_rpi += rpi_count;
spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to remove all memory resources allocated
- * to support rpis. This routine presumes the caller has released all
- * rpis consumed by fabric or port logins and is prepared to have
- * the header pages removed.
+ * to support rpis for SLI4 ports not supporting extents. This routine
+ * presumes the caller has released all rpis consumed by fabric or port
+ * logins and is prepared to have the header pages removed.
**/
void
lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+
list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
&phba->sli4_hba.lpfc_rpi_hdr_list, list) {
list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
kfree(rpi_hdr->dmabuf);
kfree(rpi_hdr);
}
-
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
- memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
+ exit:
+ /* There are no rpis available to the port now. */
+ phba->sli4_hba.next_rpi = 0;
}
/**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
/* Final checks. The port status should be clean. */
if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&reg_data.word0) ||
- bf_get(lpfc_sliport_status_err, &reg_data)) {
+ (bf_get(lpfc_sliport_status_err, &reg_data) &&
+ !bf_get(lpfc_sliport_status_rn, &reg_data))) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.
ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb;
struct lpfc_mbx_read_config *rd_config;
- uint32_t rc = 0;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct lpfc_mbx_get_func_cfg *get_func_cfg;
+ struct lpfc_rsrc_desc_fcfcoe *desc;
+ uint32_t desc_count;
+ int length, i, rc = 0;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
+ phba->sli4_hba.extents_in_use =
+ bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_fcfi =
bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
- phba->sli4_hba.max_cfg_param.fcfi_base =
- bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_eq =
bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2003 cfg params XRI(B:%d M:%d), "
+ "2003 cfg params Extents? %d "
+ "XRI(B:%d M:%d), "
"VPI(B:%d M:%d) "
"VFI(B:%d M:%d) "
"RPI(B:%d M:%d) "
- "FCFI(B:%d M:%d)\n",
+ "FCFI(Count:%d)\n",
+ phba->sli4_hba.extents_in_use,
phba->sli4_hba.max_cfg_param.xri_base,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_vfi,
phba->sli4_hba.max_cfg_param.rpi_base,
phba->sli4_hba.max_cfg_param.max_rpi,
- phba->sli4_hba.max_cfg_param.fcfi_base,
phba->sli4_hba.max_cfg_param.max_fcfi);
}
- mempool_free(pmb, phba->mbox_mem_pool);
+
+ if (rc)
+ goto read_cfg_out;
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->cfg_hba_queue_depth =
phba->sli4_hba.max_cfg_param.max_xri -
lpfc_sli4_get_els_iocb_cnt(phba);
+
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ goto read_cfg_out;
+
+ /* get the pf# and vf# for SLI4 if_type 2 port */
+ length = (sizeof(struct lpfc_mbx_get_func_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc || shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3026 Mailbox failed , mbxCmd x%x "
+ "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ rc = -EIO;
+ goto read_cfg_out;
+ }
+
+ /* search for fc_fcoe resrouce descriptor */
+ get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
+ desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_fcfcoe *)
+ &get_func_cfg->func_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ phba->sli4_hba.iov.pf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
+ phba->sli4_hba.iov.vf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM)
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
+ "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
+ phba->sli4_hba.iov.vf_number);
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3028 GET_FUNCTION_CONFIG: failed to find "
+ "Resrouce Descriptor:x%x\n",
+ LPFC_RSRC_DESC_TYPE_FCFCOE);
+ rc = -EIO;
+ }
+
+read_cfg_out:
+ mempool_free(pmb, phba->mbox_mem_pool);
return rc;
}
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = NULL;
/* Release FCP response complete queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ fcp_qidx = 0;
+ do
lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+ while (++fcp_qidx < phba->cfg_fcp_eq_count);
kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+ fcp_cqidx = 0;
+ do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0526 Fast-path FCP CQ (%d) not "
"allocated\n", fcp_cqidx);
goto out_destroy_fcp_cq;
}
- rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.fp_eq[fcp_cqidx],
- LPFC_WCQ, LPFC_FCP);
+ if (phba->cfg_fcp_eq_count)
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.fp_eq[fcp_cqidx],
+ LPFC_WCQ, LPFC_FCP);
+ else
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.sp_eq,
+ LPFC_WCQ, LPFC_FCP);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2588 FCP CQ setup: cq[%d]-id=%d, "
- "parent eq[%d]-id=%d\n",
+ "parent %seq[%d]-id=%d\n",
fcp_cqidx,
phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+ (phba->cfg_fcp_eq_count) ? "" : "sp_",
fcp_cqidx,
- phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
- }
+ (phba->cfg_fcp_eq_count) ?
+ phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
+ phba->sli4_hba.sp_eq->queue_id);
+ } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
/*
* Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
fcp_cq_index,
phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
/* Round robin FCP Work Queue's Completion Queue assignment */
- fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
+ if (phba->cfg_fcp_eq_count)
+ fcp_cq_index = ((fcp_cq_index + 1) %
+ phba->cfg_fcp_eq_count);
}
/*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
if (rdy_chk < 1000)
break;
}
+ /* delay driver action following IF_TYPE_2 function reset */
+ msleep(100);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
/*
* Assign MSI-X vectors to interrupt handlers
*/
-
- /* The first vector must associated to slow-path handler for MQ */
- rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
- &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
- LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ if (vectors > 1)
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+ LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ else
+ /* All Interrupts need to be handled by one EQ */
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_intr_handler, IRQF_SHARED,
+ LPFC_DRIVER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
{
int wait_cnt = 0;
LPFC_MBOXQ_t *mboxq;
+ struct pci_dev *pdev = phba->pcidev;
lpfc_stop_hba_timers(phba);
phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return rc;
}
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int length;
struct lpfc_sli4_parameters *mbx_sli4_parameters;
+ /*
+ * By default, the driver assumes the SLI4 port requires RPI
+ * header postings. The SLI4_PARAM response will correct this
+ * assumption.
+ */
+ phba->sli4_hba.rpi_hdrs_in_use = 1;
+
/* Read the port's SLI4 Config Parameters */
length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mbx_sli4_parameters);
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
mbx_sli4_parameters);
+ phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
+ phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return 0;
}
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
lpfc_debugfs_terminate(vport);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Disable interrupt */
lpfc_sli_disable_intr(phba);
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
}
/**
+ * lpfc_write_firmware - attempt to write a firmware image to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @fw: pointer to firmware image returned from request_firmware.
+ *
+ * returns the number of bytes written if write is successful.
+ * returns a negative error value if there were errors.
+ * returns 0 if firmware matches currently active firmware on port.
+ **/
+int
+lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
+{
+ char fwrev[32];
+ struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
+ struct list_head dma_buffer_list;
+ int i, rc = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+ uint32_t offset = 0, temp_offset = 0;
+
+ INIT_LIST_HEAD(&dma_buffer_list);
+ if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
+ (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
+ (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
+ (image->size != fw->size)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 Invalid FW image found. "
+ "Magic:%d Type:%x ID:%x\n",
+ image->magic_number,
+ bf_get(lpfc_grp_hdr_file_type, image),
+ bf_get(lpfc_grp_hdr_id, image));
+ return -EINVAL;
+ }
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3023 Updating Firmware. Current Version:%s "
+ "New Version:%s\n",
+ fwrev, image->rev_name);
+ for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ rc = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&dmabuf->list, &dma_buffer_list);
+ }
+ while (offset < fw->size) {
+ temp_offset = offset;
+ list_for_each_entry(dmabuf, &dma_buffer_list, list) {
+ if (offset + SLI4_PAGE_SIZE > fw->size) {
+ temp_offset += fw->size - offset;
+ memcpy(dmabuf->virt,
+ fw->data + temp_offset,
+ fw->size - offset);
+ break;
+ }
+ temp_offset += SLI4_PAGE_SIZE;
+ memcpy(dmabuf->virt, fw->data + temp_offset,
+ SLI4_PAGE_SIZE);
+ }
+ rc = lpfc_wr_object(phba, &dma_buffer_list,
+ (fw->size - offset), &offset);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update failed. "
+ "%d\n", rc);
+ goto out;
+ }
+ }
+ rc = offset;
+ }
+out:
+ list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
+ list_del(&dmabuf->list);
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ return rc;
+}
+
+/**
* lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
int error;
uint32_t cfg_mode, intr_mode;
int mcnt;
+ int adjusted_fcp_eq_count;
+ int fcp_qidx;
+ const struct firmware *fw;
+ uint8_t file_name[16];
/* Allocate memory for HBA structure */
phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENODEV;
goto out_free_sysfs_attr;
}
- /* Default to single FCP EQ for non-MSI-X */
+ /* Default to single EQ for non-MSI-X */
if (phba->intr_type != MSIX)
- phba->cfg_fcp_eq_count = 1;
- else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
- phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ adjusted_fcp_eq_count = 0;
+ else if (phba->sli4_hba.msix_vec_nr <
+ phba->cfg_fcp_eq_count + 1)
+ adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ else
+ adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
+ /* Free unused EQs */
+ for (fcp_qidx = adjusted_fcp_eq_count;
+ fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+ /* do not delete the first fcp_cq */
+ if (fcp_qidx)
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_cq[fcp_qidx]);
+ }
+ phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
+ /* check for firmware upgrade or downgrade */
+ snprintf(file_name, 16, "%s.grp", phba->ModelName);
+ error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!error) {
+ lpfc_write_firmware(phba, fw);
+ release_firmware(fw);
+ }
+
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6ce9033f85..55676702835 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
- mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
/* save address for completion */
pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
- if (vpi != 0xffff)
- vpi += phba->vpi_base;
mb->un.varUnregDID.vpi = vpi;
+ if ((vpi != 0xffff) &&
+ (phba->sli_rev == LPFC_SLI_REV4))
+ mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_D_ID;
mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- mb->un.varRegLogin.rpi = rpi;
- if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
- return 1;
- }
- mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
mb->un.varRegLogin.did = did;
mb->mbxOwner = OWN_HOST;
/* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
"rpi x%x\n", vpi, did, rpi);
- return (1);
+ return 1;
}
INIT_LIST_HEAD(&mp->list);
sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
- return (0);
+ return 0;
}
/**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
*
* This routine prepares the mailbox command for unregistering remote port
* login.
+ *
+ * For SLI4 ports, the rpi passed to this function must be the physical
+ * rpi value, not the logical index.
**/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->un.varUnregLogin.rpi = (uint16_t) rpi;
+ mb->un.varUnregLogin.rpi = rpi;
mb->un.varUnregLogin.rsvd1 = 0;
- mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi,
- vport->vpi + phba->vpi_base, mbox);
- mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+ /*
+ * For SLI4 functions, the rpi field is overloaded for
+ * the vport context unreg all. This routine passes
+ * 0 for the rpi field in lpfc_unreg_login for compatibility
+ * with SLI3 and then overrides the rpi field with the
+ * expected value for SLI4.
+ */
+ lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
+ mbox);
+ mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
mb->un.varRegVpi.upd = 1;
- mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
+
+ mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
mb->un.varRegVpi.sid = vport->fc_myDID;
- mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
+ else
+ mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
sizeof(struct lpfc_name));
mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
@@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- if (phba->sli_rev < LPFC_SLI_REV4)
- mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
- else
- mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
+ else if (phba->sli_rev >= LPFC_SLI_REV4)
+ mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_VPI;
mb->mbxOwner = OWN_HOST;
@@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
return length;
}
- /* Setup for the none-embedded mbox command */
+ /* Setup for the non-embedded mbox command */
pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
- mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+ mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
GFP_KERNEL);
if (!mbox->sge_array) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
@@ -1790,12 +1805,87 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
/* The sub-header is in DMA memory, which needs endian converstion */
if (cfg_shdr)
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
- sizeof(union lpfc_sli4_cfg_shdr));
-
+ sizeof(union lpfc_sli4_cfg_shdr));
return alloc_len;
}
/**
+ * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to an allocated lpfc mbox resource.
+ * @exts_count: the number of extents, if required, to allocate.
+ * @rsrc_type: the resource extent type.
+ * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
+ *
+ * This routine completes the subcommand header for SLI4 resource extent
+ * mailbox commands. It is called after lpfc_sli4_config. The caller must
+ * pass an allocated mailbox and the attributes required to initialize the
+ * mailbox correctly.
+ *
+ * Return: the actual length of the mbox command allocated.
+ **/
+int
+lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+ uint16_t exts_count, uint16_t rsrc_type, bool emb)
+{
+ uint8_t opcode = 0;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
+ void *virtaddr = NULL;
+
+ /* Set up SLI4 ioctl command header fields */
+ if (emb == LPFC_SLI4_MBX_NEMBED) {
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ virtaddr = mbox->sge_array->addr[0];
+ if (virtaddr == NULL)
+ return 1;
+ n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ }
+
+ /*
+ * The resource type is common to all extent Opcodes and resides in the
+ * same position.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ rsrc_type);
+ else {
+ /* This is DMA data. Byteswap is required. */
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ n_rsrc_extnt, rsrc_type);
+ lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
+ &n_rsrc_extnt->word4,
+ sizeof(uint32_t));
+ }
+
+ /* Complete the initialization for the particular Opcode. */
+ opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ exts_count);
+ else
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ n_rsrc_extnt, exts_count);
+ break;
+ case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
+ case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
+ case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
+ /* Initialization is complete.*/
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2929 Resource Extent Opcode x%x is "
+ "unsupported\n", opcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
@@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
bf_set(lpfc_init_vfi_vr, init_vfi, 1);
bf_set(lpfc_init_vfi_vt, init_vfi, 1);
bf_set(lpfc_init_vfi_vp, init_vfi, 1);
- bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
- bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
- bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
+ bf_set(lpfc_init_vfi_vfi, init_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+ bf_set(lpfc_init_vpi_vpi, init_vfi,
+ vport->phba->vpi_ids[vport->vpi]);
+ bf_set(lpfc_init_vfi_fcfi, init_vfi,
+ vport->phba->fcf.fcfi);
}
/**
@@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi = &mbox->u.mqe.un.reg_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
- bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
+ bf_set(lpfc_reg_vfi_vfi, reg_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
- bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+ bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
@@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
- vpi + phba->vpi_base);
+ phba->vpi_ids[vpi]);
bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
- phba->pport->vfi + phba->vfi_base);
+ phba->sli4_hba.vfi_ids[phba->pport->vfi]);
}
/**
@@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
- vport->vfi + vport->phba->vfi_base);
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
}
/**
@@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
void
lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = ndlp->phba;
struct lpfc_mbx_resume_rpi *resume_rpi;
memset(mbox, 0, sizeof(*mbox));
resume_rpi = &mbox->u.mqe.un.resume_rpi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
- bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
+ bf_set(lpfc_resume_rpi_index, resume_rpi,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cbb48ee8b0b..10d5b5e4149 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -62,7 +62,6 @@ int
lpfc_mem_alloc(struct lpfc_hba *phba, int align)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- int longs;
int i;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
phba->lpfc_hrb_pool = NULL;
phba->lpfc_drb_pool = NULL;
}
- /* vpi zero is reserved for the physical port so add 1 to max */
- longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
- if (!phba->vpi_bmask)
- goto fail_free_dbq_pool;
return 0;
-
- fail_free_dbq_pool:
- pci_pool_destroy(phba->lpfc_drb_pool);
- phba->lpfc_drb_pool = NULL;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
@@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
int i;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- /* Free VPI bitmask memory */
- kfree(phba->vpi_bmask);
-
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
if (phba->lpfc_drb_pool)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0d92d4205ea..2ddd02f7c60 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,11 +350,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /*
- * Need to unreg_login if we are already in one of these states and
- * change to NPR state. This will block the port until after the ACC
- * completes and the reg_login is issued and completed.
- */
+ /* no need to reg_login if we are already in one of these states */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -363,9 +359,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_unreg_rpi(vport, ndlp);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+ return 1;
}
if ((vport->fc_flag & FC_PT2PT) &&
@@ -657,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
+
/**
* lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
* @phba : Pointer to lpfc_hba structure.
@@ -1399,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
if (mb->mbxStatus) {
/* RegLogin failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0246 RegLogin failed Data: x%x x%x x%x\n",
- did, mb->mbxStatus, vport->port_state);
+ "0246 RegLogin failed Data: x%x x%x x%x x%x "
+ "x%x\n",
+ did, mb->mbxStatus, vport->port_state,
+ mb->un.varRegLogin.vpi,
+ mb->un.varRegLogin.rpi);
/*
* If RegLogin failed due to lack of HBA resources do not
* retry discovery.
@@ -1424,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
return ndlp->nlp_state;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
/* Only if we are not a fabric nport do we issue PRLI */
@@ -2025,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
MAILBOX_t *mb = &pmb->u.mb;
if (!mb->mbxStatus) {
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
} else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 84e4481b240..3ccc97496eb 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
if (bcnt == 0)
continue;
/* Now, post the SCSI buffer list sgls as a block */
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
/* Reset SCSI buffer count for next round of posting */
bcnt = 0;
while (!list_empty(&sblist)) {
@@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
dma_addr_t pdma_phys_fcp_cmd;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
- uint16_t iotag, last_xritag = NO_XRI;
+ uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
int status = 0, index;
int bcnt;
int non_sequential_xri = 0;
@@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
break;
}
- psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
}
+ psb->cur_iocbq.sli4_lxritag = lxri;
+ psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
if (last_xritag != NO_XRI
&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
non_sequential_xri = 1;
@@ -861,6 +870,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
@@ -869,6 +879,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
/* Setup the physical region for the FCP RSP */
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
@@ -914,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
}
}
if (bcnt) {
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
+
+ if (status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "3021 SCSI SGL post error %d\n",
+ status);
+ bcnt = 0;
+ }
/* Reset SCSI buffer count for next round of posting */
while (!list_empty(&sblist)) {
list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
@@ -2081,6 +2106,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dma_len = sg_dma_len(sgel);
sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((num_bde + 1) == nseg)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -2794,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ piocbq->iocb.ulpContext =
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
else
@@ -2807,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
- * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
@@ -2851,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ piocb->ulpContext =
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ }
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
piocb->ulpFCP2Rcvy = 1;
}
@@ -3405,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %d "
- "rpi x%x nlp_flag x%x\n",
+ "rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
- pnode->nlp_rpi, pnode->nlp_flag);
+ pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
+ iocbq->iocb_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -3419,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
+ "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
+ "iocb_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4]);
+ iocbqrsp->iocb.un.ulpWord[4],
+ iocbq->iocb_flag);
} else if (status == IOCB_BUSY)
ret = FAILED;
else
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fd5835e1c03..98999bbd8cb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,9 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
struct lpfc_iocbq *);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
+static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_cqe *);
+
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
{
@@ -456,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
struct lpfc_iocbq * iocbq = NULL;
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
-
if (iocbq)
phba->iocb_cnt++;
if (phba->iocb_cnt > phba->iocb_max)
@@ -479,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
static struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+ phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
return sglq;
}
@@ -504,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
return sglq;
}
@@ -532,7 +528,6 @@ static int
__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
{
- uint16_t adj_xri;
struct lpfc_node_rrq *rrq;
int empty;
uint32_t did = 0;
@@ -553,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/*
* set the active bit even if there is no mem available.
*/
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-
if (NLP_CHK_FREE_REQ(ndlp))
goto out;
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
goto out;
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
- rrq->xritag = xritag;
+ rrq->xritag = phba->sli4_hba.xri_ids[xritag];
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
@@ -603,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
uint16_t xritag,
struct lpfc_node_rrq *rrq)
{
- uint16_t adj_xri;
struct lpfc_nodelist *ndlp = NULL;
if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -619,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -796,12 +787,9 @@ int
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
- uint16_t adj_xri;
-
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (!ndlp)
return 0;
- if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
return 1;
else
return 0;
@@ -841,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* @piocb: Pointer to the iocbq.
*
* This function is called with hbalock held. This function
- * Gets a new driver sglq object from the sglq list. If the
+ * gets a new driver sglq object from the sglq list. If the
* list is not empty then it is successful, it returns pointer to the newly
* allocated sglq object else it returns NULL.
**/
@@ -851,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
struct lpfc_sglq *sglq = NULL;
struct lpfc_sglq *start_sglq = NULL;
- uint16_t adj_xri;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
int found = 0;
@@ -870,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
while (!found) {
if (!sglq)
return NULL;
- adj_xri = sglq->sli4_xritag -
- phba->sli4_hba.max_cfg_param.xri_base;
if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
@@ -888,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
}
sglq->ndlp = ndlp;
found = 1;
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+ phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
sglq->state = SGL_ALLOCATED;
}
return sglq;
@@ -944,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
- sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
+ sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
if (sglq) {
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@@ -971,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+ iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -2113,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
- vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
+ vpi = pmb->u.mb.un.varRegLogin.vpi;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -3881,8 +3868,10 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
list_del_init(&phba->sli4_hba.els_cq->list);
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
- for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
+ qindx = 0;
+ do
list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
+ while (++qindx < phba->cfg_fcp_eq_count);
spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */
@@ -4318,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
continue;
} else if (rc)
break;
+
phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4421,7 +4411,8 @@ int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
uint32_t rc;
- int mode = 3;
+ int mode = 3, i;
+ int longs;
switch (lpfc_sli_mode) {
case 2:
@@ -4491,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
if (rc)
goto lpfc_sli_hba_setup_error;
+ /* Initialize VPIs. */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ /*
+ * The VPI bitmask and physical ID array are allocated
+ * and initialized once only - at driver load. A port
+ * reset doesn't need to reinitialize this memory.
+ */
+ if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
+ longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!phba->vpi_bmask) {
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+
+ phba->vpi_ids = kzalloc(
+ (phba->max_vpi+1) * sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!phba->vpi_ids) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+ for (i = 0; i < phba->max_vpi; i++)
+ phba->vpi_ids[i] = i;
+ }
+ }
+
/* Init HBQs */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
rc = lpfc_sli_hbq_setup(phba);
@@ -4677,9 +4697,11 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+ fcp_eqidx = 0;
+ do
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
LPFC_QUEUE_REARM);
+ while (++fcp_eqidx < phba->cfg_fcp_eq_count);
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -4687,6 +4709,803 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+static int
+lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
+ uint16_t *extnt_count, uint16_t *extnt_size)
+{
+ int rc = 0;
+ uint32_t length;
+ uint32_t mbox_tmo;
+ struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
+ LPFC_MBOXQ_t *mbox;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Find out how many extents are available for this resource type */
+ length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the GET doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2930 Failed to get resource extents "
+ "Status 0x%x Add'l Status 0x%x\n",
+ bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &rsrc_info->header.cfg_shdr.response));
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
+ &rsrc_info->u.rsp);
+ *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
+ &rsrc_info->u.rsp);
+ err_exit:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The extent type to check.
+ *
+ * This function reads the current available extents from the port and checks
+ * if the extent count or extent size has changed since the last access.
+ * Callers use this routine post port reset to understand if there is a
+ * extent reprovisioning requirement.
+ *
+ * Returns:
+ * -Error: error indicates problem.
+ * 1: Extent count or size has changed.
+ * 0: No changes.
+ **/
+static int
+lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
+{
+ uint16_t curr_ext_cnt, rsrc_ext_cnt;
+ uint16_t size_diff, rsrc_ext_size;
+ int rc = 0;
+ struct lpfc_rsrc_blks *rsrc_entry;
+ struct list_head *rsrc_blk_list = NULL;
+
+ size_diff = 0;
+ curr_ext_cnt = 0;
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ rsrc_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ break;
+ }
+
+ list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
+ curr_ext_cnt++;
+ if (rsrc_entry->rsrc_size != rsrc_ext_size)
+ size_diff++;
+ }
+
+ if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
+ rc = 1;
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_cfg_post_extnts -
+ * @phba: Pointer to HBA context object.
+ * @extnt_cnt - number of available extents.
+ * @type - the extent type (rpi, xri, vfi, vpi).
+ * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox - pointer to the caller's allocated mailbox structure.
+ *
+ * This function executes the extents allocation request. It also
+ * takes care of the amount of memory needed to allocate or get the
+ * allocated extents. It is the caller's responsibility to evaluate
+ * the response.
+ *
+ * Returns:
+ * -Error: Error value describes the condition found.
+ * 0: if successful
+ **/
+static int
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
+ uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
+{
+ int rc = 0;
+ uint32_t req_len;
+ uint32_t emb_len;
+ uint32_t alloc_len, mbox_tmo;
+
+ /* Calculate the total requested length of the dma memory */
+ req_len = *extnt_cnt * sizeof(uint16_t);
+
+ /*
+ * Calculate the size of an embedded mailbox. The uint32_t
+ * accounts for extents-specific word.
+ */
+ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+ sizeof(uint32_t);
+
+ /*
+ * Presume the allocation and response will fit into an embedded
+ * mailbox. If not true, reconfigure to a non-embedded mailbox.
+ */
+ *emb = LPFC_SLI4_MBX_EMBED;
+ if (req_len > emb_len) {
+ req_len = *extnt_cnt * sizeof(uint16_t) +
+ sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+ *emb = LPFC_SLI4_MBX_NEMBED;
+ }
+
+ alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
+ req_len, *emb);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "9000 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ return -ENOMEM;
+ }
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
+ if (unlikely(rc))
+ return -EIO;
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+
+ if (unlikely(rc))
+ rc = -EIO;
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type to allocate.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+static int
+lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ bool emb = false;
+ uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
+ uint16_t rsrc_id, rsrc_start, j, k;
+ uint16_t *ids;
+ int i, rc;
+ unsigned long longs;
+ unsigned long *bmask;
+ struct lpfc_rsrc_blks *rsrc_blks;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t length;
+ struct lpfc_id_range *id_array = NULL;
+ void *virtaddr = NULL;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+ struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+ struct list_head *ext_blk_list;
+
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_cnt,
+ &rsrc_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "3009 No available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+ return -ENOMEM;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
+ "2903 Available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ /*
+ * Figure out where the response is located. Then get local pointers
+ * to the response data. The port does not guarantee to respond to
+ * all extents counts request so update the local variable with the
+ * allocated count from the port.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED) {
+ rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+ id_array = &rsrc_ext->u.rsp.id[0];
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+ } else {
+ virtaddr = mbox->sge_array->addr[0];
+ n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+ id_array = &n_rsrc->id;
+ }
+
+ longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ rsrc_id_cnt = rsrc_cnt * rsrc_size;
+
+ /*
+ * Based on the resource size and count, correct the base and max
+ * resource values.
+ */
+ length = sizeof(struct lpfc_rsrc_blks);
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ kfree(phba->sli4_hba.rpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /*
+ * The next_rpi was initialized with the maximum available
+ * count but the port may allocate a smaller number. Catch
+ * that case and update the next_rpi.
+ */
+ phba->sli4_hba.next_rpi = rsrc_id_cnt;
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.rpi_bmask;
+ ids = phba->sli4_hba.rpi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->vpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->vpi_bmask;
+ ids = phba->vpi_ids;
+ ext_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ kfree(phba->sli4_hba.xri_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.xri_bmask;
+ ids = phba->sli4_hba.xri_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ kfree(phba->sli4_hba.vfi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.vfi_bmask;
+ ids = phba->sli4_hba.vfi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ /* Unsupported Opcode. Fail call. */
+ id_array = NULL;
+ bmask = NULL;
+ ids = NULL;
+ ext_blk_list = NULL;
+ goto err_exit;
+ }
+
+ /*
+ * Complete initializing the extent configuration with the
+ * allocated ids assigned to this function. The bitmask serves
+ * as an index into the array and manages the available ids. The
+ * array just stores the ids communicated to the port via the wqes.
+ */
+ for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
+ if ((i % 2) == 0)
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
+ &id_array[k]);
+ else
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
+ &id_array[k]);
+
+ rsrc_blks = kzalloc(length, GFP_KERNEL);
+ if (unlikely(!rsrc_blks)) {
+ rc = -ENOMEM;
+ kfree(bmask);
+ kfree(ids);
+ goto err_exit;
+ }
+ rsrc_blks->rsrc_start = rsrc_id;
+ rsrc_blks->rsrc_size = rsrc_size;
+ list_add_tail(&rsrc_blks->list, ext_blk_list);
+ rsrc_start = rsrc_id;
+ if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
+ phba->sli4_hba.scsi_xri_start = rsrc_start +
+ lpfc_sli4_get_els_iocb_cnt(phba);
+
+ while (rsrc_id < (rsrc_start + rsrc_size)) {
+ ids[j] = rsrc_id;
+ rsrc_id++;
+ j++;
+ }
+ /* Entire word processed. Get next word.*/
+ if ((i % 2) == 1)
+ k++;
+ }
+ err_exit:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: the extent's type.
+ *
+ * This function deallocates all extents of a particular resource type.
+ * SLI4 does not allow for deallocating a particular extent range. It
+ * is the caller's responsibility to release all kernel memory resources.
+ **/
+static int
+lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ int rc;
+ uint32_t length, mbox_tmo = 0;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
+ struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /*
+ * This function sends an embedded mailbox because it only sends the
+ * the resource type. All extents of this type are released by the
+ * port.
+ */
+ length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the dealloc doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2919 Failed to release resource extents "
+ "for type %d - Status 0x%x Add'l Status 0x%x. "
+ "Resource memory not released.\n",
+ type,
+ bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &dealloc_rsrc->header.cfg_shdr.response));
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ /* Release kernel memory resources for the specific type. */
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->lpfc_vpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_xri_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_vfi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ /* RPI bitmask and physical id array are cleaned up earlier. */
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_rpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ default:
+ break;
+ }
+
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+
+ out_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+int
+lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ int i, rc, error = 0;
+ uint16_t count, base;
+ unsigned long longs;
+
+ if (phba->sli4_hba.extents_in_use) {
+ /*
+ * The port supports resource extents. The XRI, VPI, VFI, RPI
+ * resource extent count must be read and allocated before
+ * provisioning the resource id arrays.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY) {
+ /*
+ * Extent-based resources are set - the driver could
+ * be in a port reset. Figure out if any corrective
+ * actions need to be taken.
+ */
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ if (rc != 0)
+ error++;
+
+ /*
+ * It's possible that the number of resources
+ * provided to this port instance changed between
+ * resets. Detect this condition and reallocate
+ * resources. Otherwise, there is no action.
+ */
+ if (error) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_MBOX | LOG_INIT,
+ "2931 Detected extent resource "
+ "change. Reallocating all "
+ "extents.\n");
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ } else
+ return 0;
+ }
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ if (unlikely(rc))
+ goto err_exit;
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return rc;
+ } else {
+ /*
+ * The port does not support resource extents. The XRI, VPI,
+ * VFI, RPI resource ids were determined from READ_CONFIG.
+ * Just allocate the bitmasks and provision the resource id
+ * arrays. If a port reset is active, the resources don't
+ * need any action - just exit.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY)
+ return 0;
+
+ /* RPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_rpi;
+ base = phba->sli4_hba.max_cfg_param.rpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ rc = -ENOMEM;
+ goto free_rpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.rpi_ids[i] = base + i;
+
+ /* VPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vpi;
+ base = phba->sli4_hba.max_cfg_param.vpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto free_rpi_ids;
+ }
+ phba->vpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ rc = -ENOMEM;
+ goto free_vpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->vpi_ids[i] = base + i;
+
+ /* XRIs. */
+ count = phba->sli4_hba.max_cfg_param.max_xri;
+ base = phba->sli4_hba.max_cfg_param.xri_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto free_vpi_ids;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ rc = -ENOMEM;
+ goto free_xri_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.xri_ids[i] = base + i;
+
+ /* VFIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vfi;
+ base = phba->sli4_hba.max_cfg_param.vfi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto free_xri_ids;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ rc = -ENOMEM;
+ goto free_vfi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.vfi_ids[i] = base + i;
+
+ /*
+ * Mark all resources ready. An HBA reset doesn't need
+ * to reset the initialization.
+ */
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return 0;
+ }
+
+ free_vfi_bmask:
+ kfree(phba->sli4_hba.vfi_bmask);
+ free_xri_ids:
+ kfree(phba->sli4_hba.xri_ids);
+ free_xri_bmask:
+ kfree(phba->sli4_hba.xri_bmask);
+ free_vpi_ids:
+ kfree(phba->vpi_ids);
+ free_vpi_bmask:
+ kfree(phba->vpi_bmask);
+ free_rpi_ids:
+ kfree(phba->sli4_hba.rpi_ids);
+ free_rpi_bmask:
+ kfree(phba->sli4_hba.rpi_bmask);
+ err_exit:
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+int
+lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ if (phba->sli4_hba.extents_in_use) {
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ } else {
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -4708,10 +5527,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
- /*
- * TODO: Why does this routine execute these task in a different
- * order from probe?
- */
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc))
@@ -4740,7 +5555,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
* to read FCoE param config regions
*/
if (lpfc_sli4_read_fcoe_params(phba, mboxq))
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
@@ -4873,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /*
+ * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
+ * calls depends on these resources to complete port setup.
+ */
+ rc = lpfc_sli4_alloc_resource_identifiers(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2920 Failed to alloc Resource IDs "
+ "rc = x%x\n", rc);
+ goto out_free_mbox;
+ }
+
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
if (rc) {
@@ -4906,35 +5733,37 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
/* Register SGL pool to the device using non-embedded mailbox command */
- rc = lpfc_sli4_post_sgl_list(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0582 Error %d during sgl post operation\n",
- rc);
- rc = -ENODEV;
- goto out_free_mbox;
+ if (!phba->sli4_hba.extents_in_use) {
+ rc = lpfc_sli4_post_els_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0582 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+ } else {
+ rc = lpfc_sli4_post_els_sgl_list_ext(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2560 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
}
/* Register SCSI SGL pool to the device */
rc = lpfc_sli4_repost_scsi_sgl_list(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0383 Error %d during scsi sgl post "
"operation\n", rc);
/* Some Scsi buffers were moved to the abort scsi list */
@@ -5747,10 +6576,15 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
sizeof(struct lpfc_mcqe));
mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
-
- /* Prefix the mailbox status with range x4000 to note SLI4 status. */
+ /*
+ * When the CQE status indicates a failure and the mailbox status
+ * indicates success then copy the CQE status into the mailbox status
+ * (and prefix it with x4000).
+ */
if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
- bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
+ if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
+ bf_set(lpfc_mqe_status, mb,
+ (LPFC_MBX_ERROR_RANGE | mcqe_status));
rc = MBXERR_ERROR;
} else
lpfc_sli4_swap_str(phba, mboxq);
@@ -5819,7 +6653,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
else
rc = -EIO;
if (rc != MBX_SUCCESS)
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2541 Mailbox command x%x "
"(x%x) cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
@@ -6307,6 +7141,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
sgl->addr_hi = bpl->addrHigh;
sgl->addr_lo = bpl->addrLow;
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -6343,6 +7178,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len =
@@ -6474,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
}
- bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi);
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6623,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpContext);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- iocbq->vport->vpi + phba->vpi_base);
+ phba->vpi_ids[iocbq->vport->vpi]);
bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
LPFC_WQE_LENLOC_WORD3);
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
- bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi);
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -6729,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
return IOCB_ERROR;
break;
}
+
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6776,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_BUSY;
}
} else {
- sglq = __lpfc_sli_get_sglq(phba, piocb);
+ sglq = __lpfc_sli_get_sglq(phba, piocb);
if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
@@ -6789,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
}
} else if (piocb->iocb_flag & LPFC_IO_FCP) {
- sglq = NULL; /* These IO's already have an XRI and
- * a mapped sgl.
- */
+ /* These IO's already have an XRI and a mapped sgl. */
+ sglq = NULL;
} else {
- /* This is a continuation of a commandi,(CX) so this
+ /*
+ * This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6802,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
if (sglq) {
+ piocb->sli4_lxritag = sglq->sli4_lxritag;
piocb->sli4_xritag = sglq->sli4_xritag;
-
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
return IOCB_ERROR;
}
@@ -9799,7 +10638,12 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
+ if (cq->subtype == LPFC_FCP)
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
+ cqe);
+ else
+ workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
+ cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
@@ -11446,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
LPFC_MBOXQ_t *mbox;
int rc;
uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
union lpfc_sli4_cfg_shdr *shdr;
if (xritag == NO_XRI) {
@@ -11479,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11498,6 +12345,76 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * port for those SLI4 ports that do not support extents. This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
+ * and should be called only when interrupts are disabled.
+ *
+ * Return codes
+ * 0 - successful
+ * -ERROR - otherwise.
+ */
+uint16_t
+lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
+{
+ unsigned long xri;
+
+ /*
+ * Fetch the next logical xri. Because this index is logical,
+ * the driver starts at 0 each time.
+ */
+ spin_lock_irq(&phba->hbalock);
+ xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri, 0);
+ if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
+ spin_unlock_irq(&phba->hbalock);
+ return NO_XRI;
+ } else {
+ set_bit(xri, phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.max_cfg_param.xri_used++;
+ phba->sli4_hba.xri_count++;
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ return xri;
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
+ phba->sli4_hba.xri_count--;
+ phba->sli4_hba.max_cfg_param.xri_used--;
+ }
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ spin_lock_irq(&phba->hbalock);
+ __lpfc_sli4_free_xri(phba, xri);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
* lpfc_sli4_next_xritag - Get an xritag for the io
* @phba: Pointer to HBA context object.
*
@@ -11510,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba *phba)
{
- uint16_t xritag;
+ uint16_t xri_index;
- spin_lock_irq(&phba->hbalock);
- xritag = phba->sli4_hba.next_xri;
- if ((xritag != (uint16_t) -1) && xritag <
- (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base)) {
- phba->sli4_hba.next_xri++;
- phba->sli4_hba.max_cfg_param.xri_used++;
- spin_unlock_irq(&phba->hbalock);
- return xritag;
- }
- spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ xri_index = lpfc_sli4_alloc_xri(phba);
+ if (xri_index != NO_XRI)
+ return xri_index;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2004 Failed to allocate XRI.last XRITAG is %d"
" Max XRI is %d, Used XRI is %d\n",
- phba->sli4_hba.next_xri,
+ xri_index,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.xri_used);
- return -1;
+ return NO_XRI;
}
/**
- * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
+ * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post a block of driver's sgl pages to the
@@ -11542,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
* stopped.
**/
int
-lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
{
struct lpfc_sglq *sglq_entry;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11551,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, pg_pairs;
uint32_t mbox_tmo;
- uint16_t xritag_start = 0;
+ uint16_t xritag_start = 0, lxri = 0;
int els_xri_cnt, rc = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
@@ -11568,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
return -ENOMEM;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2560 Failed to allocate mbox cmd memory\n");
+ if (!mbox)
return -ENOMEM;
- }
/* Allocate DMA memory and set up the non-embedded mailbox command */
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11587,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
- /* Get the first SGE entry from the non-embedded DMA memory */
- viraddr = mbox->sge_array->addr[0];
-
/* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
sgl_pg_pairs = &sgl->sgl_pg_pairs;
for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+
+ /*
+ * Assign the sglq a physical xri only if the driver has not
+ * initialized those resources. A port reset only needs
+ * the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+
/* Set up the sge entry */
sgl_pg_pairs->sgl_pg0_addr_lo =
cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11605,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
cpu_to_le32(putPaddrLow(0));
sgl_pg_pairs->sgl_pg1_addr_hi =
cpu_to_le32(putPaddrHigh(0));
+
/* Keep the first xritag on the list */
if (pg_pairs == 0)
xritag_start = sglq_entry->sli4_xritag;
sgl_pg_pairs++;
}
+
+ /* Complete initialization and perform endian conversion. */
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
- /* Perform endian conversion if necessary */
sgl->word0 = cpu_to_le32(sgl->word0);
-
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
@@ -11633,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
+
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, index;
+ uint32_t mbox_tmo;
+ uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+ uint16_t xritag_start = 0, lxri = 0;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ int cnt, ttl_cnt, rc = 0;
+ int loop_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* The number of sgls to be posted */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+ reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2989 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ cnt = 0;
+ ttl_cnt = 0;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_start = rsrc_blk->rsrc_start;
+ rsrc_size = rsrc_blk->rsrc_size;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3014 Working ELS Extent start %d, cnt %d\n",
+ rsrc_start, rsrc_size);
+
+ loop_cnt = min(els_xri_cnt, rsrc_size);
+ if (ttl_cnt + loop_cnt >= els_xri_cnt) {
+ loop_cnt = els_xri_cnt - ttl_cnt;
+ ttl_cnt = els_xri_cnt;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command.
+ */
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen, LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2987 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /*
+ * The starting resource may not begin at zero. Control
+ * the loop variants via the block resource parameters,
+ * but handle the sge pointers with a zero-based index
+ * that doesn't get reset per loop pass.
+ */
+ for (index = rsrc_start;
+ index < rsrc_start + loop_cnt;
+ index++) {
+ sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
+
+ /*
+ * Assign the sglq a physical xri only if the driver
+ * has not initialized those resources. A port reset
+ * only needs the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy,
+ &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag =
+ phba->sli4_hba.xri_ids[lxri];
+ }
+
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(0));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(0));
+
+ /* Track the starting physical XRI for the mailbox. */
+ if (index == rsrc_start)
+ xritag_start = sglq_entry->sli4_xritag;
+ sgl_pg_pairs++;
+ cnt++;
+ }
+
+ /* Complete initialization and perform endian conversion. */
+ rsrc_blk->rsrc_used += loop_cnt;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
+ sgl->word0 = cpu_to_le32(sgl->word0);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3015 Post ELS Extent SGL, start %d, "
+ "cnt %d, used %d\n",
+ xritag_start, loop_cnt, rsrc_blk->rsrc_used);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2988 POST_SGL_BLOCK mailbox "
+ "command failed status x%x "
+ "add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ goto err_exit;
+ }
+ if (ttl_cnt >= els_xri_cnt)
+ break;
+ }
+
+ err_exit:
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
return rc;
}
@@ -11693,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
+
/* Get the first SGE entry from the non-embedded DMA memory */
viraddr = mbox->sge_array->addr[0];
@@ -11748,6 +12847,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
}
/**
+ * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
+ int cnt)
+{
+ struct lpfc_scsi_buf *psb = NULL;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xri_start = 0, scsi_xri_start;
+ uint16_t rsrc_range;
+ int rc = 0, avail_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ dma_addr_t pdma_phys_bpl1;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ uint32_t xri_cnt = 0;
+
+ /* Calculate the total requested length of the dma memory */
+ reqlen = cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2932 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ /*
+ * The use of extents requires the driver to post the sgl headers
+ * in multiple postings to meet the contiguous resource assignment.
+ */
+ psb = list_prepare_entry(psb, sblist, list);
+ scsi_xri_start = phba->sli4_hba.scsi_xri_start;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
+ if (rsrc_range < scsi_xri_start)
+ continue;
+ else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
+ continue;
+ else
+ avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
+
+ reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command. The mbox is used to post an SGL page per loop
+ * but the DMA memory has a use-once semantic so the mailbox
+ * is used and freed per loop pass.
+ */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2933 Failed to allocate mbox cmd "
+ "memory\n");
+ return -ENOMEM;
+ }
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2934 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ viraddr = mbox->sge_array->addr[0];
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /* pg_pairs tracks posted SGEs per loop iteration. */
+ pg_pairs = 0;
+ list_for_each_entry_continue(psb, sblist, list) {
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = psb->dma_phys_bpl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+ /* Keep the first xri for this extent. */
+ if (pg_pairs == 0)
+ xri_start = psb->cur_iocbq.sli4_xritag;
+ sgl_pg_pairs++;
+ pg_pairs++;
+ xri_cnt++;
+
+ /*
+ * Track two exit conditions - the loop has constructed
+ * all of the caller's SGE pairs or all available
+ * resource IDs in this extent are consumed.
+ */
+ if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
+ break;
+ }
+ rsrc_blk->rsrc_used += pg_pairs;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3016 Post SCSI Extent SGL, start %d, cnt %d "
+ "blk use %d\n",
+ xri_start, pg_pairs, rsrc_blk->rsrc_used);
+ /* Perform endian conversion if necessary */
+ sgl->word0 = cpu_to_le32(sgl->word0);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2935 POST_SGL_BLOCK mailbox command "
+ "failed status x%x add_status x%x "
+ "mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ return -ENXIO;
+ }
+
+ /* Post only what is requested. */
+ if (xri_cnt >= cnt)
+ break;
+ }
+ return rc;
+}
+
+/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -12137,6 +13399,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
+ * @phba: Pointer to HBA context object.
+ * @xri: xri id in transaction.
+ *
+ * This function validates the xri maps to the known range of XRIs allocated an
+ * used by the driver.
+ **/
+static uint16_t
+lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
+ uint16_t xri)
+{
+ int i;
+
+ for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
+ if (xri == phba->sli4_hba.xri_ids[i])
+ return i;
+ }
+ return NO_XRI;
+}
+
+
+/**
* lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
* @phba: Pointer to HBA context object.
* @fc_hdr: pointer to a FC frame header.
@@ -12169,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
"SID:x%x\n", oxid, sid);
return;
}
- if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
- && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base))
+ if (lpfc_sli4_xri_inrange(phba, rxid))
lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
/* Allocate buffer for rsp iocb */
@@ -12194,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
icmd->ulpBdeCount = 0;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
- icmd->ulpContext = ndlp->nlp_rpi;
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = ndlp;
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
/* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12380,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
- first_iocbq->iocb.unsli3.rcvsli3.vpi =
- vport->vpi + vport->phba->vpi_base;
+ /* iocbq is prepped for internal consumption. Logical vpi. */
+ first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
/* put the first buffer into the first IOCBq */
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
@@ -12461,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
&phba->sli.ring[LPFC_ELS_RING],
iocbq, fc_hdr->fh_r_ctl,
fc_hdr->fh_type))
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
@@ -12558,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_page;
uint32_t rc = 0;
+ uint16_t lrpi = 0;
+
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
- /* Post all rpi memory regions to the port. */
list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+ /*
+ * Assign the rpi headers a physical rpi only if the driver
+ * has not initialized those resources. A port reset only
+ * needs the headers posted.
+ */
+ if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_RPI_RSRC_RDY)
+ rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+
rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12571,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
}
}
+ exit:
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_RPI_RSRC_RDY);
return rc;
}
@@ -12594,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
LPFC_MBOXQ_t *mboxq;
struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
uint32_t rc = 0;
- uint32_t mbox_tmo;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return rc;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
+
/* The port is notified of the header region via a mailbox command. */
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
@@ -12609,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
/* Post all rpi memory regions to the port. */
hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
sizeof(struct lpfc_sli4_cfg_mhdr),
LPFC_SLI4_MBX_EMBED);
- bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
- hdr_tmpl, rpi_page->page_count);
+
+
+ /* Post the physical rpi to the port for this rpi header. */
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
rpi_page->start_rpi);
+ bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+ hdr_tmpl, rpi_page->page_count);
+
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12653,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
int
lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
{
- int rpi;
- uint16_t max_rpi, rpi_base, rpi_limit;
- uint16_t rpi_remaining;
+ unsigned long rpi;
+ uint16_t max_rpi, rpi_limit;
+ uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
- rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
rpi_limit = phba->sli4_hba.next_rpi;
/*
- * The valid rpi range is not guaranteed to be zero-based. Start
- * the search at the rpi_base as reported by the port.
+ * Fetch the next logical rpi. Because this index is logical,
+ * the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
- if (rpi >= rpi_limit || rpi < rpi_base)
+ rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12678,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
/*
* Don't try to allocate more rpi header regions if the device limit
- * on available rpis max has been exhausted.
+ * has been exhausted.
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12687,13 +13995,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
}
/*
+ * RPI header postings are not required for SLI4 ports capable of
+ * extents.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ spin_unlock_irq(&phba->hbalock);
+ return rpi;
+ }
+
+ /*
* If the driver is running low on rpi resources, allocate another
* page now. Note that the next_rpi value is used because
* it represents how many are actually in use whereas max_rpi notes
* how many are supported max by the device.
*/
- rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
- phba->sli4_hba.rpi_count;
+ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
spin_unlock_irq(&phba->hbalock);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12702,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
"2002 Error Could not grow rpi "
"count\n");
} else {
+ lrpi = rpi_hdr->start_rpi;
+ rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
}
}
@@ -12751,6 +14069,8 @@ void
lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
{
kfree(phba->sli4_hba.rpi_bmask);
+ kfree(phba->sli4_hba.rpi_ids);
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
}
/**
@@ -13490,6 +14810,96 @@ out:
}
/**
+ * lpfc_wr_object - write an object to the firmware
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @dmabuf_list: list of dmabufs to write to the port.
+ * @size: the total byte value of the objects to write to the port.
+ * @offset: the current offset to be used to start the transfer.
+ *
+ * This routine will create a wr_object mailbox command to send to the port.
+ * the mailbox command will be constructed using the dma buffers described in
+ * @dmabuf_list to create a list of BDEs. This routine will fill in as many
+ * BDEs that the imbedded mailbox can support. The @offset variable will be
+ * used to indicate the starting offset of the transfer and will also return
+ * the offset after the write object mailbox has completed. @size is used to
+ * determine the end of the object and whether the eof bit should be set.
+ *
+ * Return 0 is successful and offset will contain the the new offset to use
+ * for the next write.
+ * Return negative value for error cases.
+ **/
+int
+lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ uint32_t size, uint32_t *offset)
+{
+ struct lpfc_mbx_wr_object *wr_object;
+ LPFC_MBOXQ_t *mbox;
+ int rc = 0, i = 0;
+ uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t written = 0;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_WRITE_OBJECT,
+ sizeof(struct lpfc_mbx_wr_object) -
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+ wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
+ wr_object->u.request.write_offset = *offset;
+ sprintf((uint8_t *)wr_object->u.request.object_name, "/");
+ wr_object->u.request.object_name[0] =
+ cpu_to_le32(wr_object->u.request.object_name[0]);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
+ list_for_each_entry(dmabuf, dmabuf_list, list) {
+ if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
+ break;
+ wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
+ wr_object->u.request.bde[i].addrHigh =
+ putPaddrHigh(dmabuf->phys);
+ if (written + SLI4_PAGE_SIZE >= size) {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ (size - written);
+ written += (size - written);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+ } else {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ SLI4_PAGE_SIZE;
+ written += SLI4_PAGE_SIZE;
+ }
+ i++;
+ }
+ wr_object->u.request.bde_count = i;
+ bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3025 Write Object mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ } else
+ *offset += wr_object->u.response.actual_write_length;
+ return rc;
+}
+
+/**
* lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
* @vport: pointer to vport data structure.
*
@@ -13644,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
* never happen
*/
sglq = __lpfc_clear_active_sglq(phba,
- sglq->sli4_xritag);
+ sglq->sli4_lxritag);
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2823 txq empty and txq_cnt is %d\n ",
@@ -13656,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
/* The xri and iocb resources secured,
* attempt to issue request
*/
+ piocbq->sli4_lxritag = sglq->sli4_lxritag;
piocbq->sli4_xritag = sglq->sli4_xritag;
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
fail_msg = "to convert bpl to sgl";
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 453577c21c1..a0075b0af14 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -52,6 +52,7 @@ struct lpfc_iocbq {
struct list_head clist;
struct list_head dlist;
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct lpfc_cq_event cq_event;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1a3cbf88f2c..4b1703554a2 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
uint16_t vfi_base;
uint16_t vfi_used;
uint16_t max_fcfi;
- uint16_t fcfi_base;
uint16_t fcfi_used;
uint16_t max_eq;
uint16_t max_rq;
@@ -365,6 +364,11 @@ struct lpfc_pc_sli4_params {
uint8_t rqv;
};
+struct lpfc_iov {
+ uint32_t pf_number;
+ uint32_t vf_number;
+};
+
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -444,10 +448,13 @@ struct lpfc_sli4_hba {
uint32_t intr_enable;
struct lpfc_bmbx bmbx;
struct lpfc_max_cfg_param max_cfg_param;
+ uint16_t extents_in_use; /* must allocate resource extents. */
+ uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi;
uint16_t scsi_xri_max;
uint16_t scsi_xri_cnt;
+ uint16_t scsi_xri_start;
struct list_head lpfc_free_sgl_list;
struct list_head lpfc_sgl_list;
struct lpfc_sglq **lpfc_els_sgl_array;
@@ -458,7 +465,17 @@ struct lpfc_sli4_hba {
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
+ uint16_t *rpi_ids;
uint16_t rpi_count;
+ struct list_head lpfc_rpi_blk_list;
+ unsigned long *xri_bmask;
+ uint16_t *xri_ids;
+ uint16_t xri_count;
+ struct list_head lpfc_xri_blk_list;
+ unsigned long *vfi_bmask;
+ uint16_t *vfi_ids;
+ uint16_t vfi_count;
+ struct list_head lpfc_vfi_blk_list;
struct lpfc_sli4_flags sli4_flags;
struct list_head sp_queue_event;
struct list_head sp_cqe_event_pool;
@@ -467,6 +484,7 @@ struct lpfc_sli4_hba {
struct list_head sp_els_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
+ struct lpfc_iov iov;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
};
@@ -490,6 +508,7 @@ struct lpfc_sglq {
enum lpfc_sgl_state state;
struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned xri. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct sli4_sge *sgl; /* pre-assigned SGL */
void *virt; /* virtual address. */
@@ -504,6 +523,13 @@ struct lpfc_rpi_hdr {
uint32_t start_rpi;
};
+struct lpfc_rsrc_blks {
+ struct list_head list;
+ uint16_t rsrc_start;
+ uint16_t rsrc_size;
+ uint16_t rsrc_used;
+};
+
/*
* SLI4 specific function prototypes
*/
@@ -543,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
-int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
+ int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 30ba5440c67..1feb551a57b 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
static int
lpfc_alloc_vpi(struct lpfc_hba *phba)
{
- int vpi;
+ unsigned long vpi;
spin_lock_irq(&phba->hbalock);
/* Start at bit 1 because vpi zero is reserved for the physical port */