diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-18 16:54:31 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-18 16:54:31 -0700 |
commit | 961cde93dee2658000ead32abffb8ddf0727abe0 (patch) | |
tree | 2419e204132abe2ec2bb7f08bd20042573cc9bd6 /drivers/scsi/lpfc | |
parent | f82c37e7bb4c4d9b6a476c642d5c2d2efbd6f240 (diff) | |
parent | 0d9dc7c8b9b7fa0f53647423b41056ee1beed735 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (69 commits)
[SCSI] scsi_transport_fc: Fix synchronization issue while deleting vport
[SCSI] bfa: Update the driver version to 2.1.2.1.
[SCSI] bfa: Remove unused header files and did some cleanup.
[SCSI] bfa: Handle SCSI IO underrun case.
[SCSI] bfa: FCS and include file changes.
[SCSI] bfa: Modified the portstats get/clear logic
[SCSI] bfa: Replace bfa_get_attr() with specific APIs
[SCSI] bfa: New portlog entries for events (FIP/FLOGI/FDISC/LOGO).
[SCSI] bfa: Rename pport to fcport in BFA FCS.
[SCSI] bfa: IOC fixes, check for IOC down condition.
[SCSI] bfa: In MSIX mode, ignore spurious RME interrupts when FCoE ports are in FW mismatch state.
[SCSI] bfa: Fix Command Queue (CPE) full condition check and ack CPE interrupt.
[SCSI] bfa: IOC recovery fix in fcmode.
[SCSI] bfa: AEN and byte alignment fixes.
[SCSI] bfa: Introduce a link notification state machine.
[SCSI] bfa: Added firmware save clear feature for BFA driver.
[SCSI] bfa: FCS authentication related changes.
[SCSI] bfa: PCI VPD, FIP and include file changes.
[SCSI] bfa: Fix to copy fpma MAC when requested by user space application.
[SCSI] bfa: RPORT state machine: direct attach mode fix.
...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 10 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 7 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 332 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.h | 12 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 7 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 142 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 527 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 277 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_logmsg.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 8 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 49 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 413 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 38 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.c | 7 |
16 files changed, 1556 insertions, 277 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 84b696463a5..565e16dd74f 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -37,6 +37,9 @@ struct lpfc_sli2_slim; the NameServer before giving up. */ #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ #define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ +#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi + cmnd for menlo needs nearly twice as for firmware + downloads using bsg */ #define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ @@ -509,7 +512,6 @@ struct lpfc_hba { int (*lpfc_hba_down_link) (struct lpfc_hba *); - /* SLI4 specific HBA data structure */ struct lpfc_sli4_hba sli4_hba; @@ -623,6 +625,9 @@ struct lpfc_hba { uint32_t cfg_log_verbose; uint32_t cfg_aer_support; uint32_t cfg_suppress_link_up; +#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ +#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ +#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ lpfc_vpd_t vpd; /* vital product data */ @@ -804,6 +809,9 @@ struct lpfc_hba { struct list_head ct_ev_waiters; struct unsol_rcv_ct_ctx ct_ctx[64]; uint32_t ctx_idx; + + uint8_t menlo_flag; /* menlo generic flags */ +#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */ }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c992e8328f9..64cd17eedb6 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1939,7 +1939,9 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, # 0x2 = never bring up link # Default value is 0. */ -LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization"); +LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, + LPFC_DELAY_INIT_LINK_INDEFINITELY, + "Suppress Link Up at initialization"); /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear @@ -1966,8 +1968,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - int val = 0; - val = vport->cfg_devloss_tmo; + return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); } diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index f3f1bf1a0a7..692c29f6048 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -83,15 +83,28 @@ struct lpfc_bsg_mbox { struct fc_bsg_job *set_job; }; +#define MENLO_DID 0x0000FC0E + +struct lpfc_bsg_menlo { + struct lpfc_iocbq *cmdiocbq; + struct lpfc_iocbq *rspiocbq; + struct lpfc_dmabuf *bmp; + + /* job waiting for this iocb to finish */ + struct fc_bsg_job *set_job; +}; + #define TYPE_EVT 1 #define TYPE_IOCB 2 #define TYPE_MBOX 3 +#define TYPE_MENLO 4 struct bsg_job_data { uint32_t type; union { struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb iocb; struct lpfc_bsg_mbox mbox; + struct lpfc_bsg_menlo menlo; } context_un; }; @@ -2456,6 +2469,18 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, case MBX_PORT_IOV_CONTROL: break; case MBX_SET_VARIABLE: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1226 mbox: set_variable 0x%x, 0x%x\n", + mb->un.varWords[0], + mb->un.varWords[1]); + if ((mb->un.varWords[0] == SETVAR_MLOMNT) + && (mb->un.varWords[1] == 1)) { + phba->wait_4_mlo_maint_flg = 1; + } else if (mb->un.varWords[0] == SETVAR_MLORST) { + phba->link_flag &= ~LS_LOOPBACK_MODE; + phba->fc_topology = TOPOLOGY_PT_PT; + } + break; case MBX_RUN_BIU_DIAG64: case MBX_READ_EVENT_LOG: case MBX_READ_SPARM64: @@ -2638,6 +2663,297 @@ job_error: } /** + * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler + * @phba: Pointer to HBA context object. + * @cmdiocbq: Pointer to command iocb. + * @rspiocbq: Pointer to response iocb. + * + * This function is the completion handler for iocbs issued using + * lpfc_menlo_cmd function. This function is called by the + * ring event handler function without any lock held. This function + * can be called from both worker thread context and interrupt + * context. This function also can be called from another thread which + * cleans up the SLI layer objects. + * This function copies the contents of the response iocb to the + * response iocb memory object provided by the caller of + * lpfc_sli_issue_iocb_wait and then wakes up the thread which + * sleeps for the iocb completion. + **/ +static void +lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_iocbq *rspiocbq) +{ + struct bsg_job_data *dd_data; + struct fc_bsg_job *job; + IOCB_t *rsp; + struct lpfc_dmabuf *bmp; + struct lpfc_bsg_menlo *menlo; + unsigned long flags; + struct menlo_response *menlo_resp; + int rc = 0; + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + dd_data = cmdiocbq->context1; + if (!dd_data) { + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + return; + } + + menlo = &dd_data->context_un.menlo; + job = menlo->set_job; + job->dd_data = NULL; /* so timeout handler does not reply */ + + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag |= LPFC_IO_WAKE; + if (cmdiocbq->context2 && rspiocbq) + memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, + &rspiocbq->iocb, sizeof(IOCB_t)); + spin_unlock_irqrestore(&phba->hbalock, flags); + + bmp = menlo->bmp; + rspiocbq = menlo->rspiocbq; + rsp = &rspiocbq->iocb; + + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + + /* always return the xri, this would be used in the case + * of a menlo download to allow the data to be sent as a continuation + * of the exchange. + */ + menlo_resp = (struct menlo_response *) + job->reply->reply_data.vendor_reply.vendor_rsp; + menlo_resp->xri = rsp->ulpContext; + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & 0xff) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else + rc = -EACCES; + } else + job->reply->reply_payload_rcv_len = + rsp->un.genreq64.bdl.bdeSize; + + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + lpfc_sli_release_iocbq(phba, rspiocbq); + lpfc_sli_release_iocbq(phba, cmdiocbq); + kfree(bmp); + kfree(dd_data); + /* make error code available to userspace */ + job->reply->result = rc; + /* complete the job back to userspace */ + job->job_done(job); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + return; +} + +/** + * lpfc_menlo_cmd - send an ioctl for menlo hardware + * @job: fc_bsg_job to handle + * + * This function issues a gen request 64 CR ioctl for all menlo cmd requests, + * all the command completions will return the xri for the command. + * For menlo data requests a gen request 64 CX is used to continue the exchange + * supplied in the menlo request header xri field. + **/ +static int +lpfc_menlo_cmd(struct fc_bsg_job *job) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocbq, *rspiocbq; + IOCB_t *cmd, *rsp; + int rc = 0; + struct menlo_command *menlo_cmd; + struct menlo_response *menlo_resp; + struct lpfc_dmabuf *bmp = NULL; + int request_nseg; + int reply_nseg; + struct scatterlist *sgel = NULL; + int numbde; + dma_addr_t busaddr; + struct bsg_job_data *dd_data; + struct ulp_bde64 *bpl = NULL; + + /* in case no data is returned return just the return code */ + job->reply->reply_payload_rcv_len = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct menlo_command)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2784 Received MENLO_CMD request below " + "minimum size\n"); + rc = -ERANGE; + goto no_dd_data; + } + + if (job->reply_len < + sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2785 Received MENLO_CMD reply below " + "minimum size\n"); + rc = -ERANGE; + goto no_dd_data; + } + + if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2786 Adapter does not support menlo " + "commands\n"); + rc = -EPERM; + goto no_dd_data; + } + + menlo_cmd = (struct menlo_command *) + job->request->rqst_data.h_vendor.vendor_cmd; + + menlo_resp = (struct menlo_response *) + job->reply->reply_data.vendor_reply.vendor_rsp; + + /* allocate our bsg tracking structure */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2787 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto no_dd_data; + } + + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + rc = -ENOMEM; + goto free_dd; + } + + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + rc = -ENOMEM; + goto free_bmp; + } + + rspiocbq = lpfc_sli_get_iocbq(phba); + if (!rspiocbq) { + rc = -ENOMEM; + goto free_cmdiocbq; + } + + rsp = &rspiocbq->iocb; + + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) { + rc = -ENOMEM; + goto free_rspiocbq; + } + + INIT_LIST_HEAD(&bmp->list); + bpl = (struct ulp_bde64 *) bmp->virt; + request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + cmd = &cmdiocbq->iocb; + cmd->un.genreq64.bdl.ulpIoTag32 = 0; + cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); + cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + cmd->un.genreq64.bdl.bdeSize = + (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); + cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); + cmd->un.genreq64.w5.hcsw.Dfctl = 0; + cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; + cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ + cmd->ulpBdeCount = 1; + cmd->ulpClass = CLASS3; + cmd->ulpOwner = OWN_CHIP; + cmd->ulpLe = 1; /* Limited Edition */ + cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; + cmdiocbq->vport = phba->pport; + /* We want the firmware to timeout before we do */ + cmd->ulpTimeout = MENLO_TIMEOUT - 5; + cmdiocbq->context3 = bmp; + cmdiocbq->context2 = rspiocbq; + cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; + cmdiocbq->context1 = dd_data; + cmdiocbq->context2 = rspiocbq; + if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { + cmd->ulpCommand = CMD_GEN_REQUEST64_CR; + cmd->ulpPU = MENLO_PU; /* 3 */ + cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ + cmd->ulpContext = MENLO_CONTEXT; /* 0 */ + } else { + cmd->ulpCommand = CMD_GEN_REQUEST64_CX; + cmd->ulpPU = 1; + cmd->un.ulpWord[4] = 0; + cmd->ulpContext = menlo_cmd->xri; + } + + dd_data->type = TYPE_MENLO; + dd_data->context_un.menlo.cmdiocbq = cmdiocbq; + dd_data->context_un.menlo.rspiocbq = rspiocbq; + dd_data->context_un.menlo.set_job = job; + dd_data->context_un.menlo.bmp = bmp; + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, + MENLO_TIMEOUT - 5); + if (rc == IOCB_SUCCESS) + return 0; /* done for now */ + + /* iocb failed so cleanup */ + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + +free_rspiocbq: + lpfc_sli_release_iocbq(phba, rspiocbq); +free_cmdiocbq: + lpfc_sli_release_iocbq(phba, cmdiocbq); +free_bmp: + kfree(bmp); +free_dd: + kfree(dd_data); +no_dd_data: + /* make error code available to userspace */ + job->reply->result = rc; + job->dd_data = NULL; + return rc; +} +/** * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * @job: fc_bsg_job to handle **/ @@ -2669,6 +2985,10 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job) case LPFC_BSG_VENDOR_MBOX: rc = lpfc_bsg_mbox_cmd(job); break; + case LPFC_BSG_VENDOR_MENLO_CMD: + case LPFC_BSG_VENDOR_MENLO_DATA: + rc = lpfc_menlo_cmd(job); + break; default: rc = -EINVAL; job->reply->reply_payload_rcv_len = 0; @@ -2728,6 +3048,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb *iocb; struct lpfc_bsg_mbox *mbox; + struct lpfc_bsg_menlo *menlo; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct bsg_job_data *dd_data; unsigned long flags; @@ -2775,6 +3096,17 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) spin_unlock_irqrestore(&phba->ct_ev_lock, flags); job->job_done(job); break; + case TYPE_MENLO: + menlo = &dd_data->context_un.menlo; + cmdiocb = menlo->cmdiocbq; + /* hint to completion handler that the job timed out */ + job->reply->result = -EAGAIN; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + /* this will call our completion handler */ + spin_lock_irq(&phba->hbalock); + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); + spin_unlock_irq(&phba->hbalock); + break; default: spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index 6c8f87e39b9..5bc630819b9 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -31,6 +31,8 @@ #define LPFC_BSG_VENDOR_DIAG_TEST 5 #define LPFC_BSG_VENDOR_GET_MGMT_REV 6 #define LPFC_BSG_VENDOR_MBOX 7 +#define LPFC_BSG_VENDOR_MENLO_CMD 8 +#define LPFC_BSG_VENDOR_MENLO_DATA 9 struct set_ct_event { uint32_t command; @@ -96,3 +98,13 @@ struct dfc_mbox_req { uint8_t mbOffset; }; +/* Used for menlo command or menlo data. The xri is only used for menlo data */ +struct menlo_command { + uint32_t cmd; + uint32_t xri; +}; + +struct menlo_response { + uint32_t xri; /* return the xri of the iocb exchange */ +}; + diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 6f0fb51eb46..5087c4211b4 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -63,6 +63,7 @@ void lpfc_linkdown_port(struct lpfc_vport *); void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); void lpfc_retry_pport_discovery(struct lpfc_hba *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -221,6 +222,10 @@ void lpfc_unregister_fcf_rescan(struct lpfc_hba *); void lpfc_unregister_unused_fcf(struct lpfc_hba *); int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); +void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); +uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); +int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); +void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); int lpfc_mem_alloc(struct lpfc_hba *, int align); void lpfc_mem_free(struct lpfc_hba *); @@ -385,7 +390,7 @@ void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); void lpfc_start_fdiscs(struct lpfc_hba *phba); struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); - +struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t); #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 #define HBA_EVENT_LINK_UP 2 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 2a40a6eabf4..ee980bd6686 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -771,6 +771,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp = cmdiocb->context1; struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; struct serv_parm *sp; + uint16_t fcf_index; int rc; /* Check to see if link went down during discovery */ @@ -788,6 +789,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->port_state); if (irsp->ulpStatus) { + /* + * In case of FIP mode, perform round robin FCF failover + * due to new FCF discovery + */ + if ((phba->hba_flag & HBA_FIP_SUPPORT) && + (phba->fcf.fcf_flag & FCF_DISCOVERY)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, + "2611 FLOGI failed on registered " + "FCF record fcf_index:%d, trying " + "to perform round robin failover\n", + phba->fcf.current_rec.fcf_indx); + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); + if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { + /* + * Exhausted the eligible FCF record list, + * fail through to retry FLOGI on current + * FCF record. + */ + lpfc_printf_log(phba, KERN_WARNING, + LOG_FIP | LOG_ELS, + "2760 FLOGI exhausted FCF " + "round robin failover list, " + "retry FLOGI on the current " + "registered FCF index:%d\n", + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + } else { + rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, + fcf_index); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_FIP | LOG_ELS, + "2761 FLOGI round " + "robin FCF failover " + "read FCF failed " + "rc:x%x, fcf_index:" + "%d\n", rc, + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + } else + goto out; + } + } + /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; @@ -806,9 +855,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } /* FLOGI failure */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0100 FLOGI failure Data: x%x x%x " - "x%x\n", + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); goto flogifail; @@ -842,8 +890,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, else rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); - if (!rc) + if (!rc) { + /* Mark the FCF discovery process done */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS, + "2769 FLOGI successful on FCF record: " + "current_fcf_index:x%x, terminate FCF " + "round robin failover process\n", + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); goto out; + } } flogifail: @@ -1409,6 +1467,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* PLOGI failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) rc = NLP_STE_FREED_NODE; @@ -1577,6 +1639,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* PRLI failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2754 PRLI failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) goto out; @@ -1860,6 +1926,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* ADISC failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2755 ADISC failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (!lpfc_error_lost_link(irsp)) lpfc_disc_state_machine(vport, ndlp, cmdiocb, @@ -2009,6 +2079,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS command is being retried */ goto out; /* LOGO failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2756 LOGO failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) goto out; @@ -5989,7 +6063,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) lpfc_issue_fabric_reglogin(vport); else { - lpfc_start_fdiscs(phba); + /* + * If the physical port is instantiated using + * FDISC, do not start vport discovery. + */ + if (vport->port_state != LPFC_FDISC) + lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } } else @@ -6055,21 +6134,18 @@ mbox_err_exit: } /** - * lpfc_retry_pport_discovery - Start timer to retry FLOGI. + * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer * @phba: pointer to lpfc hba data structure. * - * This routine abort all pending discovery commands and - * start a timer to retry FLOGI for the physical port - * discovery. + * This routine cancels the retry delay timers to all the vports. **/ void -lpfc_retry_pport_discovery(struct lpfc_hba *phba) +lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; - struct Scsi_Host *shost; - int i; uint32_t link_state; + int i; /* Treat this failure as linkdown for all vports */ link_state = phba->link_state; @@ -6087,13 +6163,30 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) } lpfc_destroy_vport_work_array(phba, vports); } +} + +/** + * lpfc_retry_pport_discovery - Start timer to retry FLOGI. + * @phba: pointer to lpfc hba data structure. + * + * This routine abort all pending discovery commands and + * start a timer to retry FLOGI for the physical port + * discovery. + **/ +void +lpfc_retry_pport_discovery(struct lpfc_hba *phba) +{ + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + + /* Cancel the all vports retry delay retry timers */ + lpfc_cancel_all_vport_retry_delay_timer(phba); /* If fabric require FLOGI, then re-instantiate physical login */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (!ndlp) return; - shost = lpfc_shost_from_vport(phba->pport); mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); spin_lock_irq(shost->host_lock); @@ -6219,7 +6312,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if (phba->sli_rev == LPFC_SLI_REV4) + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } @@ -6797,21 +6891,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; unsigned long iflag = 0; - spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); list_for_each_entry_safe(sglq_entry, sglq_next, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { if (sglq_entry->sli4_xritag == xri) { list_del(&sglq_entry->list); - spin_unlock_irqrestore( - &phba->sli4_hba.abts_sgl_list_lock, - iflag); - spin_lock_irqsave(&phba->hbalock, iflag); - list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); + sglq_entry->state = SGL_FREED; + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } } - spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + sglq_entry = __lpfc_get_active_sglq(phba, xri); + if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + } + sglq_entry->state = SGL_XRI_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2359d0bfb73..c555e3b7f20 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1481,8 +1481,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, int lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) { - LPFC_MBOXQ_t *mbox; - int rc; /* * If the Link is up and no FCoE events while in the * FCF discovery, no need to restart FCF discovery. @@ -1491,86 +1489,70 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) return 0; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2768 Pending link or FCF event during current " + "handling of the previous event: link_state:x%x, " + "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", + phba->link_state, phba->fcoe_eventtag_at_fcf_scan, + phba->fcoe_eventtag); + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_AVAILABLE; spin_unlock_irq(&phba->hbalock); - if (phba->link_state >= LPFC_LINK_UP) - lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); - else { + if (phba->link_state >= LPFC_LINK_UP) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2780 Restart FCF table scan due to " + "pending FCF event:evt_tag_at_scan:x%x, " + "evt_tag_current:x%x\n", + phba->fcoe_eventtag_at_fcf_scan, + phba->fcoe_eventtag); + lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); + } else { /* * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS * flag */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); spin_unlock_irq(&phba->hbalock); } + /* Unregister the currently registered FCF if required */ if (unreg_fcf) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, - LOG_DISCOVERY|LOG_MBOX, - "2610 UNREG_FCFI mbox allocation failed\n"); - return 1; - } - lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); - mbox->vport = phba->pport; - mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2611 UNREG_FCFI issue mbox failed\n"); - mempool_free(mbox, phba->mbox_mem_pool); - } + lpfc_sli4_unregister_fcf(phba); } - return 1; } /** - * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. + * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. + * @next_fcf_index: pointer to holder of next fcf index. * - * This function iterate through all the fcf records available in - * HBA and choose the optimal FCF record for discovery. After finding - * the FCF for discovery it register the FCF record and kick start - * discovery. - * If FCF_IN_USE flag is set in currently used FCF, the routine try to - * use a FCF record which match fabric name and mac address of the - * currently used FCF record. - * If the driver support only one FCF, it will try to use the FCF record - * used by BOOT_BIOS. + * This routine parses the non-embedded fcf mailbox command by performing the + * necessarily error checking, non-embedded read FCF record mailbox command + * SGE parsing, and endianness swapping. + * + * Returns the pointer to the new FCF record in the non-embedded mailbox + * command DMA memory if successfully, other NULL. */ -void -lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +static struct fcf_record * +lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint16_t *next_fcf_index) { void *virt_addr; dma_addr_t phys_addr; - uint8_t *bytep; struct lpfc_mbx_sge sge; struct lpfc_mbx_read_fcf_tbl *read_fcf; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; struct fcf_record *new_fcf_record; - uint32_t boot_flag, addr_mode; - uint32_t next_fcf_index; - struct lpfc_fcf_rec *fcf_rec = NULL; - unsigned long iflags; - uint16_t vlan_id; - int rc; - - /* If there is pending FCoE event restart FCF table scan */ - if (lpfc_check_pending_fcoe_event(phba, 0)) { - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return; - } /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. @@ -1581,59 +1563,183 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2524 Failed to get the non-embedded SGE " "virtual address\n"); - goto out; + return NULL; } virt_addr = mboxq->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &shdr->response); - /* - * The FCF Record was read and there is no reason for the driver - * to maintain the FCF record data or memory. Instead, just need - * to book keeping the FCFIs can be used. - */ + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status) { - if (shdr_status == STATUS_FCF_TABLE_EMPTY) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + if (shdr_status == STATUS_FCF_TABLE_EMPTY) + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2726 READ_FCF_RECORD Indicates empty " "FCF table.\n"); - } else { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + else + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2521 READ_FCF_RECORD mailbox failed " - "with status x%x add_status x%x, mbx\n", - shdr_status, shdr_add_status); - } - goto out; + "with status x%x add_status x%x, " + "mbx\n", shdr_status, shdr_add_status); + return NULL; } - /* Interpreting the returned information of FCF records */ + + /* Interpreting the returned information of the FCF record */ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, sizeof(struct lpfc_mbx_read_fcf_tbl)); - next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); - + *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); new_fcf_record = (struct fcf_record *)(virt_addr + sizeof(struct lpfc_mbx_read_fcf_tbl)); lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, sizeof(struct fcf_record)); - bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + return new_fcf_record; +} + +/** + * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the fcf record. + * @vlan_id: the lowest vlan identifier associated to this fcf record. + * @next_fcf_index: the index to the next fcf record in hba's fcf table. + * + * This routine logs the detailed FCF record if the LOG_FIP loggin is + * enabled. + **/ +static void +lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, + struct fcf_record *fcf_record, + uint16_t vlan_id, + uint16_t next_fcf_index) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2764 READ_FCF_RECORD:\n" + "\tFCF_Index : x%x\n" + "\tFCF_Avail : x%x\n" + "\tFCF_Valid : x%x\n" + "\tFIP_Priority : x%x\n" + "\tMAC_Provider : x%x\n" + "\tLowest VLANID : x%x\n" + "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" + "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" + "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" + "\tNext_FCF_Index: x%x\n", + bf_get(lpfc_fcf_record_fcf_index, fcf_record), + bf_get(lpfc_fcf_record_fcf_avail, fcf_record), + bf_get(lpfc_fcf_record_fcf_valid, fcf_record), + fcf_record->fip_priority, + bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), + vlan_id, + bf_get(lpfc_fcf_record_mac_0, fcf_record), + bf_get(lpfc_fcf_record_mac_1, fcf_record), + bf_get(lpfc_fcf_record_mac_2, fcf_record), + bf_get(lpfc_fcf_record_mac_3, fcf_record), + bf_get(lpfc_fcf_record_mac_4, fcf_record), + bf_get(lpfc_fcf_record_mac_5, fcf_record), + bf_get(lpfc_fcf_record_fab_name_0, fcf_record), + bf_get(lpfc_fcf_record_fab_name_1, fcf_record), + bf_get(lpfc_fcf_record_fab_name_2, fcf_record), + bf_get(lpfc_fcf_record_fab_name_3, fcf_record), + bf_get(lpfc_fcf_record_fab_name_4, fcf_record), + bf_get(lpfc_fcf_record_fab_name_5, fcf_record), + bf_get(lpfc_fcf_record_fab_name_6, fcf_record), + bf_get(lpfc_fcf_record_fab_name_7, fcf_record), + bf_get(lpfc_fcf_record_switch_name_0, fcf_record), + bf_get(lpfc_fcf_record_switch_name_1, fcf_record), + bf_get(lpfc_fcf_record_switch_name_2, fcf_record), + bf_get(lpfc_fcf_record_switch_name_3, fcf_record), + bf_get(lpfc_fcf_record_switch_name_4, fcf_record), + bf_get(lpfc_fcf_record_switch_name_5, fcf_record), + bf_get(lpfc_fcf_record_switch_name_6, fcf_record), + bf_get(lpfc_fcf_record_switch_name_7, fcf_record), + next_fcf_index); +} + +/** + * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This function iterates through all the fcf records available in + * HBA and chooses the optimal FCF record for discovery. After finding + * the FCF for discovery it registers the FCF record and kicks start + * discovery. + * If FCF_IN_USE flag is set in currently used FCF, the routine tries to + * use an FCF record which matches fabric name and mac address of the + * currently used FCF record. + * If the driver supports only one FCF, it will try to use the FCF record + * used by BOOT_BIOS. + */ +void +lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t fcf_index, next_fcf_index; + struct lpfc_fcf_rec *fcf_rec = NULL; + uint16_t vlan_id; + int rc; + + /* If there is pending FCoE event restart FCF table scan */ + if (lpfc_check_pending_fcoe_event(phba, 0)) { + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2765 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + /* Let next new FCF event trigger fast failover */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCF_DISC_INPROGRESS; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Check the FCF record against the connection list */ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + /* * If the fcf record does not match with connect list entries - * read the next entry. + * read the next entry; otherwise, this is an eligible FCF + * record for round robin FCF failover. */ - if (!rc) + if (!rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2781 FCF record fcf_index:x%x failed FCF " + "connection list check, fcf_avail:x%x, " + "fcf_valid:x%x\n", + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_avail, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_valid, + new_fcf_record)); goto read_next_fcf; + } else { + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); + if (rc) + goto read_next_fcf; + } + /* * If this is not the first FCF discovery of the HBA, use last * FCF record for the discovery. The condition that a rescan * matches the in-use FCF record: fabric name, switch name, mac * address, and vlan_id. */ - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_IN_USE) { if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, new_fcf_record) && @@ -1649,8 +1755,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) /* If in fast failover, mark it's completed */ - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; - spin_unlock_irqrestore(&phba->hbalock, iflags); + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | + FCF_DISCOVERY); + spin_unlock_irq(&phba->hbalock); goto out; } /* @@ -1661,7 +1768,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * next candidate. */ if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } } @@ -1669,14 +1776,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * Update on failover FCF record only if it's in FCF fast-failover * period; otherwise, update on current FCF record. */ - if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { - /* Fast FCF failover only to the same fabric name */ - if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, - new_fcf_record)) - fcf_rec = &phba->fcf.failover_rec; - else - goto read_next_fcf; - } else + if (phba->fcf.fcf_flag & FCF_REDISC_FOV) + fcf_rec = &phba->fcf.failover_rec; + else fcf_rec = &phba->fcf.current_rec; if (phba->fcf.fcf_flag & FCF_AVAILABLE) { @@ -1689,7 +1791,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Choose this FCF record */ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, BOOT_ENABLE); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* @@ -1698,20 +1800,19 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * the next FCF record. */ if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* * If the new hba FCF record has lower priority value * than the driver FCF record, use the new record. */ - if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) && - (new_fcf_record->fip_priority < fcf_rec->priority)) { + if (new_fcf_record->fip_priority < fcf_rec->priority) { /* Choose this FCF record */ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, 0); } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* @@ -1724,7 +1825,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) BOOT_ENABLE : 0)); phba->fcf.fcf_flag |= FCF_AVAILABLE; } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; read_next_fcf: @@ -1740,9 +1841,22 @@ read_next_fcf: * FCF scan inprogress, and do nothing */ if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { - spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2782 No suitable FCF record " + "found during this round of " + "post FCF rediscovery scan: " + "fcf_evt_tag:x%x, fcf_index: " + "x%x\n", + phba->fcoe_eventtag_at_fcf_scan, + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); + /* + * Let next new FCF event trigger fast + * failover + */ + spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); return; } /* @@ -1754,16 +1868,23 @@ read_next_fcf: * record. */ - /* unregister the current in-use FCF record */ + /* Unregister the current in-use FCF record */ lpfc_unregister_fcf(phba); - /* replace in-use record with the new record */ + + /* Replace in-use record with the new record */ memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, sizeof(struct lpfc_fcf_rec)); /* mark the FCF fast failover completed */ - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); + /* + * Set up the initial registered FCF index for FLOGI + * round robin FCF failover. + */ + phba->fcf.fcf_rr_init_indx = + phba->fcf.failover_rec.fcf_indx; /* Register to the new FCF record */ lpfc_register_fcf(phba); } else { @@ -1776,13 +1897,25 @@ read_next_fcf: return; /* * Otherwise, initial scan or post linkdown rescan, - * register with the best fit FCF record found so - * far through the scanning process. + * register with the best FCF record found so far + * through the FCF scanning process. + */ + + /* mark the initial FCF discovery completed */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * Set up the initial registered FCF index for FLOGI + * round robin FCF failover */ + phba->fcf.fcf_rr_init_indx = + phba->fcf.current_rec.fcf_indx; + /* Register to the new FCF record */ lpfc_register_fcf(phba); } } else - lpfc_sli4_read_fcf_record(phba, next_fcf_index); + lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); return; out: @@ -1793,6 +1926,141 @@ out: } /** + * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This is the callback function for FLOGI failure round robin FCF failover + * read FCF record mailbox command from the eligible FCF record bmask for + * performing the failover. If the FCF read back is not valid/available, it + * fails through to retrying FLOGI to the currently registered FCF again. + * Otherwise, if the FCF read back is valid and available, it will set the + * newly read FCF record to the failover FCF record, unregister currently + * registered FCF record, copy the failover FCF record to the current + * FCF record, and then register the current FCF record before proceeding + * to trying FLOGI on the new failover FCF. + */ +void +lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t next_fcf_index; + uint16_t current_fcf_index; + uint16_t vlan_id; + + /* If link state is not up, stop the round robin failover process */ + if (phba->link_state < LPFC_LINK_UP) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2766 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + goto out; + } + + /* Get the needed parameters from FCF record */ + lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + + /* Upload new FCF record to the failover FCF record */ + spin_lock_irq(&phba->hbalock); + __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, + new_fcf_record, addr_mode, vlan_id, + (boot_flag ? BOOT_ENABLE : 0)); + spin_unlock_irq(&phba->hbalock); + + current_fcf_index = phba->fcf.current_rec.fcf_indx; + + /* Unregister the current in-use FCF record */ + lpfc_unregister_fcf(phba); + + /* Replace in-use record with the new record */ + memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, + sizeof(struct lpfc_fcf_rec)); + + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2783 FLOGI round robin FCF failover from FCF " + "(index:x%x) to FCF (index:x%x).\n", + current_fcf_index, + bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + lpfc_register_fcf(phba); +} + +/** + * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This is the callback function of read FCF record mailbox command for + * updating the eligible FCF bmask for FLOGI failure round robin FCF + * failover when a new FCF event happened. If the FCF read back is + * valid/available and it passes the connection list check, it updates + * the bmask for the eligible FCF record for round robin failover. + */ +void +lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t fcf_index, next_fcf_index; + uint16_t vlan_id; + int rc; + + /* If link state is not up, no need to proceed */ + if (phba->link_state < LPFC_LINK_UP) + goto out; + + /* If FCF discovery period is over, no need to proceed */ + if (phba->fcf.fcf_flag & FCF_DISCOVERY) + goto out; + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2767 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + goto out; + } + + /* Check the connection list for eligibility */ + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + + if (!rc) + goto out; + + /* Update the eligible FCF record index bmask */ + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); +} + +/** * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox data structure. @@ -2024,8 +2292,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) int rc; struct fcf_record *fcf_record; - sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - spin_lock_irq(&phba->hbalock); switch (la->UlnkSpeed) { case LA_1GHZ_LINK: @@ -2117,18 +2383,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) spin_unlock_irq(&phba->hbalock); lpfc_linkup(phba); - if (sparam_mbox) { - lpfc_read_sparam(phba, sparam_mbox, 0); - sparam_mbox->vport = vport; - sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; - rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mp = (struct lpfc_dmabuf *) sparam_mbox->context1; - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - mempool_free(sparam_mbox, phba->mbox_mem_pool); - goto out; - } + sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!sparam_mbox) + goto out; + + rc = lpfc_read_sparam(phba, sparam_mbox, 0); + if (rc) { + mempool_free(sparam_mbox, phba->mbox_mem_pool); + goto out; + } + sparam_mbox->vport = vport; + sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; + rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mp = (struct lpfc_dmabuf *) sparam_mbox->context1; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mempool_free(sparam_mbox, phba->mbox_mem_pool); + goto out; } if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { @@ -2186,10 +2458,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) spin_unlock_irq(&phba->hbalock); return; } + /* This is the initial FCF discovery scan */ + phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); - if (rc) + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2778 Start FCF table scan at linkup\n"); + + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); goto out; + } } return; @@ -3379,8 +3661,12 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (ndlp->nlp_flag & NLP_RPI_VALID) + if (ndlp->nlp_flag & NLP_RPI_VALID) { + /* The mempool_alloc might sleep */ + spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vports[i], ndlp); + spin_lock_irq(shost->host_lock); + } } spin_unlock_irq(shost->host_lock); } @@ -4756,6 +5042,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) return; /* Reset HBA FCF states after successful unregister FCF */ phba->fcf.fcf_flag = 0; + phba->fcf.current_rec.flag = 0; /* * If driver is not unloading, check if there is any other @@ -4765,13 +5052,21 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) (phba->link_state < LPFC_LINK_UP)) return; - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + /* This is considered as the initial FCF discovery scan */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag |= FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); - if (rc) + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2553 lpfc_unregister_unused_fcf failed " "to read FCF record HBA state x%x\n", phba->pport->port_state); + } } /** diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d29ac7c317d..ea44239eeb3 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -350,7 +350,12 @@ lpfc_config_port_post(struct lpfc_hba *phba) mb = &pmb->u.mb; /* Get login parameters for NID. */ - lpfc_read_sparam(phba, pmb, 0); + rc = lpfc_read_sparam(phba, pmb, 0); + if (rc) { + mempool_free(pmb, phba->mbox_mem_pool); + return -ENOMEM; + } + pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -359,7 +364,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) mb->mbxCommand, mb->mbxStatus); phba->link_state = LPFC_HBA_ERROR; mp = (struct lpfc_dmabuf *) pmb->context1; - mempool_free( pmb, phba->mbox_mem_pool); + mempool_free(pmb, phba->mbox_mem_pool); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); return -EIO; @@ -544,7 +549,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } - } else if (phba->cfg_suppress_link_up == 0) { + } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -571,6 +576,11 @@ lpfc_config_port_post(struct lpfc_hba *phba) } /* MBOX buffer will be freed in mbox compl */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + lpfc_config_async(phba, pmb, LPFC_ELS_RING); pmb->mbox_cmpl = lpfc_config_async_cmpl; pmb->vport = phba->pport; @@ -588,6 +598,11 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Get Option rom version */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + lpfc_dump_wakeup_param(phba, pmb); pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; pmb->vport = phba->pport; @@ -652,7 +667,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } - phba->cfg_suppress_link_up = 0; + phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; return 0; } @@ -807,6 +822,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) LIST_HEAD(aborts); int ret; unsigned long iflag = 0; + struct lpfc_sglq *sglq_entry = NULL; + ret = lpfc_hba_down_post_s3(phba); if (ret) return ret; @@ -822,6 +839,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) * list. */ spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + list_for_each_entry(sglq_entry, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) + sglq_entry->state = SGL_FREED; + list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); @@ -2178,8 +2199,10 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) { - /* Clear pending FCF rediscovery wait timer */ - phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; + /* Clear pending FCF rediscovery wait and failover in progress flags */ + phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | + FCF_DEAD_DISC | + FCF_ACVL_DISC); /* Now, try to stop the timer */ del_timer(&phba->fcf.redisc_wait); } @@ -2576,6 +2599,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) init_timer(&vport->els_tmofunc); vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; + if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { + phba->menlo_flag |= HBA_MENLO_SUPPORT; + /* check for menlo minimum sg count */ + if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) { + phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; + shost->sg_tablesize = phba->cfg_sg_seg_cnt; + } + } error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) @@ -2912,6 +2943,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) /* FCF rediscovery event to worker thread */ phba->fcf.fcf_flag |= FCF_REDISC_EVT; spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2776 FCF rediscover wait timer expired, post " + "a worker thread event for FCF table scan\n"); /* wake up worker thread */ lpfc_worker_wake_up(phba); } @@ -3183,6 +3217,68 @@ out_free_pmb: } /** + * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport + * @vport: pointer to vport data structure. + * + * This routine is to perform Clear Virtual Link (CVL) on a vport in + * response to a CVL event. + * + * Return the pointer to the ndlp with the vport if successful, otherwise + * return NULL. + **/ +static struct lpfc_nodelist * +lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + struct lpfc_hba *phba; + + if (!vport) + return NULL; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) + return NULL; + phba = vport->phba; + if (!phba) + return NULL; + if (phba->pport->port_state <= LPFC_FLOGI) + return NULL; + /* If virtual link is not yet instantiated ignore CVL */ + if (vport->port_state <= LPFC_FDISC) + return NULL; + shost = lpfc_shost_from_vport(vport); + if (!shost) + return NULL; + lpfc_linkdown_port(vport); + lpfc_cleanup_pending_mbox(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_CVL_RCVD; + spin_unlock_irq(shost->host_lock); + + return ndlp; +} + +/** + * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports + * @vport: pointer to lpfc hba data structure. + * + * This routine is to perform Clear Virtual Link (CVL) on all vports in + * response to a FCF dead event. + **/ +static void +lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (vports) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + lpfc_sli4_perform_vport_cvl(vports[i]); + lpfc_destroy_vport_work_array(phba, vports); +} + +/** * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async fcoe completion queue entry. @@ -3198,7 +3294,6 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; - uint32_t link_state; int active_vlink_present; struct lpfc_vport **vports; int i; @@ -3208,10 +3303,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, switch (event_type) { case LPFC_FCOE_EVENT_TYPE_NEW_FCF: case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2546 New FCF found index 0x%x tag 0x%x\n", - acqe_fcoe->index, - acqe_fcoe->event_tag); + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + "2546 New FCF found/FCF parameter modified event: " + "evt_tag:x%x, fcf_index:x%x\n", + acqe_fcoe->event_tag, acqe_fcoe->index); + spin_lock_irq(&phba->hbalock); if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || (phba->hba_flag & FCF_DISC_INPROGRESS)) { @@ -3222,6 +3318,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, spin_unlock_irq(&phba->hbalock); break; } + if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { /* * If fast FCF failover rescan event is pending, @@ -3232,12 +3329,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, } spin_unlock_irq(&phba->hbalock); - /* Read the FCF table and re-discover SAN. */ - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && + !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { + /* + * During period of FCF discovery, read the FCF + * table record indexed by the event to update + * FCF round robin failover eligible FCF bmask. + */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | + LOG_DISCOVERY, + "2779 Read new FCF record with " + "fcf_index:x%x for updating FCF " + "round robin failover bmask\n", + acqe_fcoe->index); + rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); + } + + /* Otherwise, scan the entire FCF table and re-discover SAN */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2770 Start FCF table scan due to new FCF " + "event: evt_tag:x%x, fcf_index:x%x\n", + acqe_fcoe->event_tag, acqe_fcoe->index); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2547 Read FCF record failed 0x%x\n", - rc); + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + "2547 Issue FCF scan read FCF mailbox " + "command failed 0x%x\n", rc); break; case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: @@ -3248,47 +3366,63 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, break; case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2549 FCF disconnected from network index 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); /* If the event is not for currently used fcf do nothing */ if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) break; - /* - * Currently, driver support only one FCF - so treat this as - * a link down, but save the link state because we don't want - * it to be changed to Link Down unless it is already down. + /* We request port to rediscover the entire FCF table for + * a fast recovery from case that the current FCF record + * is no longer valid if we are not in the middle of FCF + * failover process already. */ - link_state = phba->link_state; - lpfc_linkdown(phba); - phba->link_state = link_state; - /* Unregister FCF if no devices connected to it */ - lpfc_unregister_unused_fcf(phba); + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + spin_unlock_irq(&phba->hbalock); + /* Update FLOGI FCF failover eligible FCF bmask */ + lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); + break; + } + /* Mark the fast failover process in progress */ + phba->fcf.fcf_flag |= FCF_DEAD_DISC; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2771 Start FCF fast failover process due to " + "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " + "\n", acqe_fcoe->event_tag, acqe_fcoe->index); + rc = lpfc_sli4_redisc_fcf_table(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | + LOG_DISCOVERY, + "2772 Issue FCF rediscover mabilbox " + "command failed, fail through to FCF " + "dead event\n"); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * Last resort will fail over by treating this + * as a link down to FCF registration. + */ + lpfc_sli4_fcf_dead_failthrough(phba); + } else + /* Handling fast FCF failover to a DEAD FCF event + * is considered equalivant to receiving CVL to all + * vports. + */ + lpfc_sli4_perform_all_vport_cvl(phba); break; case LPFC_FCOE_EVENT_TYPE_CVL: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2718 Clear Virtual Link Received for VPI 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); vport = lpfc_find_vport_by_vpid(phba, acqe_fcoe->index - phba->vpi_base); - if (!vport) - break; - ndlp = lpfc_findnode_did(vport, Fabric_DID); + ndlp = lpfc_sli4_perform_vport_cvl(vport); if (!ndlp) break; - shost = lpfc_shost_from_vport(vport); - if (phba->pport->port_state <= LPFC_FLOGI) - break; - /* If virtual link is not yet instantiated ignore CVL */ - if (vport->port_state <= LPFC_FDISC) - break; - - lpfc_linkdown_port(vport); - lpfc_cleanup_pending_mbox(vport); - spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_VPORT_CVL_RCVD; - spin_unlock_irq(shost->host_lock); active_vlink_present = 0; vports = lpfc_create_vport_work_array(phba); @@ -3311,6 +3445,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -3321,15 +3456,38 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * Otherwise, we request port to rediscover * the entire FCF table for a fast recovery * from possible case that the current FCF - * is no longer valid. + * is no longer valid if we are not already + * in the FCF failover process. */ + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + spin_unlock_irq(&phba->hbalock); + break; + } + /* Mark the fast failover process in progress */ + phba->fcf.fcf_flag |= FCF_ACVL_DISC; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | + LOG_DISCOVERY, + "2773 Start FCF fast failover due " + "to CVL event: evt_tag:x%x\n", + acqe_fcoe->event_tag); rc = lpfc_sli4_redisc_fcf_table(phba); - if (rc) + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | + LOG_DISCOVERY, + "2774 Issue FCF rediscover " + "mabilbox command failed, " + "through to CVL event\n"); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; + spin_unlock_irq(&phba->hbalock); /* * Last resort will be re-try on the * the current registered FCF entry. */ lpfc_retry_pport_discovery(phba); + } } break; default: @@ -3426,11 +3584,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); /* Scan FCF table from the first entry to re-discover SAN */ - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2777 Start FCF table scan after FCF " + "rediscovery quiescent period over\n"); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2747 Post FCF rediscovery read FCF record " - "failed 0x%x\n", rc); + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + "2747 Issue FCF scan read FCF mailbox " + "command failed 0x%x\n", rc); } /** @@ -3722,6 +3883,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; + int longs; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); @@ -3898,13 +4060,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_active_sgl; } + /* Allocate eligible FCF bmask memory for FCF round robin failover */ + longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; + phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), + GFP_KERNEL); + if (!phba->fcf.fcf_rr_bmask) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2759 Failed allocate memory for FCF round " + "robin failover bmask\n"); + goto out_remove_rpi_hdrs; + } + phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * phba->cfg_fcp_eq_count), GFP_KERNEL); if (!phba->sli4_hba.fcp_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for fast-path " "per-EQ handle array\n"); - goto out_remove_rpi_hdrs; + goto out_free_fcf_rr_bmask; } phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * @@ -3957,6 +4130,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); +out_free_fcf_rr_bmask: + kfree(phba->fcf.fcf_rr_bmask); out_remove_rpi_hdrs: lpfc_sli4_remove_rpi_hdrs(phba); out_free_active_sgl: @@ -4002,6 +4177,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) lpfc_sli4_remove_rpi_hdrs(phba); lpfc_sli4_remove_rpis(phba); + /* Free eligible FCF index bmask */ + kfree(phba->fcf.fcf_rr_bmask); + /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); lpfc_free_sgl_list(phba); @@ -4397,6 +4575,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba) /* The list order is used by later block SGL registraton */ spin_lock_irq(&phba->hbalock); + sglq_entry->state = SGL_FREED; list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; phba->sli4_hba.total_sglq_bufs++; diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 954ba57970a..bb59e927312 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -35,6 +35,7 @@ #define LOG_VPORT 0x00004000 /* NPIV events */ #define LOF_SECURITY 0x00008000 /* Security events */ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ +#define LOG_FIP 0x00020000 /* FIP events */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 6c4dce1a30c..1e61ae3bc4e 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1748,7 +1748,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) } /** - * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd + * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd * @phba: pointer to lpfc hba data structure. * @fcf_index: index to fcf table. * @@ -1759,9 +1759,9 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) * NULL. **/ int -lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba, - struct lpfcMboxq *mboxq, - uint16_t fcf_index) +lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba, + struct lpfcMboxq *mboxq, + uint16_t fcf_index) { void *virt_addr; dma_addr_t phys_addr; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 483fb74bc59..b16bb2c9978 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -620,23 +620,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); struct lpfc_scsi_buf *psb, *next_psb; unsigned long iflag = 0; + struct lpfc_iocbq *iocbq; + int i; - spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { if (psb->cur_iocbq.sli4_xritag == xri) { list_del(&psb->list); psb->exch_busy = 0; psb->status = IOSTAT_SUCCESS; - spin_unlock_irqrestore( - &phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + spin_unlock( + &phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_release_scsi_buf_s4(phba, psb); return; } } - spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + for (i = 1; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || + (iocbq->iocb_flag & LPFC_IO_LIBDFC)) + continue; + if (iocbq->sli4_xritag != xri) + continue; + psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); + psb->exch_busy = 0; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + + } + spin_unlock_irqrestore(&phba->hbalock, iflag); } /** @@ -1006,6 +1023,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; + struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; dma_addr_t physaddr; @@ -1056,6 +1074,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) physaddr = sg_dma_address(sgel); if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + !(iocbq->iocb_flag & DSS_SECURITY_OP) && nseg <= LPFC_EXT_DATA_BDE_COUNT) { data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; data_bde->tus.f.bdeSize = sg_dma_len(sgel); @@ -1082,7 +1101,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * explicitly reinitialized since all iocb memory resources are reused. */ if (phba->sli_rev == 3 && - !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + !(iocbq->iocb_flag & DSS_SECURITY_OP)) { if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { /* * The extended IOCB format can only fit 3 BDE or a BPL. @@ -1107,6 +1127,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } else { iocb_cmd->un.fcpi64.bdl.bdeSize = ((num_bde + 2) * sizeof(struct ulp_bde64)); + iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); } fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); @@ -2079,8 +2100,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (resp_info & RSP_LEN_VALID) { rsplen = be32_to_cpu(fcprsp->rspRspLen); - if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || - (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { + if (rsplen != 0 && rsplen != 4 && rsplen != 8) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "2719 Invalid response length: " "tgt x%x lun x%x cmnd x%x rsplen x%x\n", @@ -2090,6 +2110,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, host_status = DID_ERROR; goto out; } + if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "2757 Protocol failure detected during " + "processing of FCP I/O op: " + "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n", + cmnd->device->id, + cmnd->device->lun, cmnd->cmnd[0], + fcprsp->rspInfo3); + host_status = DID_ERROR; + goto out; + } } if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 35e3b96d4e0..fe6660ca645 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -494,7 +494,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) * * Returns sglq ponter = success, NULL = Failure. **/ -static struct lpfc_sglq * +struct lpfc_sglq * __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) { uint16_t adj_xri; @@ -526,6 +526,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba) return NULL; adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; + sglq->state = SGL_ALLOCATED; return sglq; } @@ -580,15 +581,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) else sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); if (sglq) { - if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { + if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && + (sglq->state != SGL_XRI_ABORTED)) { spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); list_add(&sglq->list, &phba->sli4_hba.lpfc_abts_els_sgl_list); spin_unlock_irqrestore( &phba->sli4_hba.abts_sgl_list_lock, iflag); - } else + } else { + sglq->state = SGL_FREED; list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + } } @@ -2258,41 +2262,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, spin_unlock_irqrestore(&phba->hbalock, iflag); } - if ((phba->sli_rev == LPFC_SLI_REV4) && - (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { - /* Set cmdiocb flag for the exchange - * busy so sgl (xri) will not be - * released until the abort xri is - * received from hba, clear the - * LPFC_DRIVER_ABORTED bit in case - * it was driver initiated abort. - */ - spin_lock_irqsave(&phba->hbalock, - iflag); - cmdiocbp->iocb_flag &= - ~LPFC_DRIVER_ABORTED; - cmdiocbp->iocb_flag |= - LPFC_EXCHANGE_BUSY; - spin_unlock_irqrestore(&phba->hbalock, - iflag); - cmdiocbp->iocb.ulpStatus = - IOSTAT_LOCAL_REJECT; - cmdiocbp->iocb.un.ulpWord[4] = - IOERR_ABORT_REQUESTED; - /* - * For SLI4, irsiocb contains NO_XRI - * in sli_xritag, it shall not affect - * releasing sgl (xri) process. - */ - saveq->iocb.ulpStatus = - IOSTAT_LOCAL_REJECT; - saveq->iocb.un.ulpWord[4] = - IOERR_SLI_ABORTED; - spin_lock_irqsave(&phba->hbalock, - iflag); - saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; - spin_unlock_irqrestore(&phba->hbalock, - iflag); + if (phba->sli_rev == LPFC_SLI_REV4) { + if (saveq->iocb_flag & + LPFC_EXCHANGE_BUSY) { + /* Set cmdiocb flag for the + * exchange busy so sgl (xri) + * will not be released until + * the abort xri is received + * from hba. + */ + spin_lock_irqsave( + &phba->hbalock, iflag); + cmdiocbp->iocb_flag |= + LPFC_EXCHANGE_BUSY; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + } + if (cmdiocbp->iocb_flag & + LPFC_DRIVER_ABORTED) { + /* + * Clear LPFC_DRIVER_ABORTED + * bit in case it was driver + * initiated abort. + */ + spin_lock_irqsave( + &phba->hbalock, iflag); + cmdiocbp->iocb_flag &= + ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + cmdiocbp->iocb.ulpStatus = + IOSTAT_LOCAL_REJECT; + cmdiocbp->iocb.un.ulpWord[4] = + IOERR_ABORT_REQUESTED; + /* + * For SLI4, irsiocb contains + * NO_XRI in sli_xritag, it + * shall not affect releasing + * sgl (xri) process. + */ + saveq->iocb.ulpStatus = + IOSTAT_LOCAL_REJECT; + saveq->iocb.un.ulpWord[4] = + IOERR_SLI_ABORTED; + spin_lock_irqsave( + &phba->hbalock, iflag); + saveq->iocb_flag |= + LPFC_DELAY_MEM_FREE; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + } } } (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); @@ -2515,14 +2534,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, &rspiocbq); - if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { - spin_unlock_irqrestore(&phba->hbalock, - iflag); - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, - &rspiocbq); - spin_lock_irqsave(&phba->hbalock, - iflag); - } + if (unlikely(!cmdiocbq)) + break; + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; + if (cmdiocbq->iocb_cmpl) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, + &rspiocbq); + spin_lock_irqsave(&phba->hbalock, iflag); + } break; case LPFC_UNSOL_IOCB: spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -3091,6 +3112,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) /* Check to see if any errors occurred during init */ if ((status & HS_FFERM) || (i >= 20)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2751 Adapter failed to restart, " + "status reg x%x, FW Data: A8 x%x AC x%x\n", + status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; retval = 1; } @@ -3278,6 +3305,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) if (retval != MBX_SUCCESS) { if (retval != MBX_BUSY) mempool_free(pmb, phba->mbox_mem_pool); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2752 KILL_BOARD command failed retval %d\n", + retval); spin_lock_irq(&phba->hbalock); phba->link_flag &= ~LS_IGNORE_ERATT; spin_unlock_irq(&phba->hbalock); @@ -4035,7 +4065,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) lpfc_sli_hba_setup_error: phba->link_state = LPFC_HBA_ERROR; - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0445 Firmware initialization failed\n"); return rc; } @@ -4388,7 +4418,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); /* Read the port's service parameters. */ - lpfc_read_sparam(phba, mboxq, vport->vpi); + rc = lpfc_read_sparam(phba, mboxq, vport->vpi); + if (rc) { + phba->link_state = LPFC_HBA_ERROR; + rc = -ENOMEM; + goto out_free_vpd; + } + mboxq->vport = vport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); mp = (struct lpfc_dmabuf *) mboxq->context1; @@ -4483,6 +4519,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Post receive buffers to the device */ lpfc_sli4_rb_setup(phba); + /* Reset HBA FCF states after HBA reset */ + phba->fcf.fcf_flag = 0; + phba->fcf.current_rec.flag = 0; + /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, jiffies + HZ * (phba->fc_ratov * 2)); @@ -7436,6 +7476,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, { wait_queue_head_t *pdone_q; unsigned long iflags; + struct lpfc_scsi_buf *lpfc_cmd; spin_lock_irqsave(&phba->hbalock, iflags); cmdiocbq->iocb_flag |= LPFC_IO_WAKE; @@ -7443,6 +7484,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, &rspiocbq->iocb, sizeof(IOCB_t)); + /* Set the exchange busy flag for task management commands */ + if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && + !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { + lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, + cur_iocbq); + lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; + } + pdone_q = cmdiocbq->context_un.wait_queue; if (pdone_q) wake_up(pdone_q); @@ -9061,6 +9110,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, /* Fake the irspiocb and copy necessary response information */ lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + /* Pass the cmd_iocb and the rsp state to the upper layer */ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); } @@ -11941,15 +11996,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, } /** - * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. + * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. * @phba: pointer to lpfc hba data structure. * @fcf_index: FCF table entry offset. * - * This routine is invoked to read up to @fcf_num of FCF record from the - * device starting with the given @fcf_index. + * This routine is invoked to scan the entire FCF table by reading FCF + * record and processing it one at a time starting from the @fcf_index + * for initial FCF discovery or fast FCF failover rediscovery. + * + * Return 0 if the mailbox command is submitted sucessfully, none 0 + * otherwise. **/ int -lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) +lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) { int rc = 0, error; LPFC_MBOXQ_t *mboxq; @@ -11961,17 +12020,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) "2000 Failed to allocate mbox for " "READ_FCF cmd\n"); error = -ENOMEM; - goto fail_fcfscan; + goto fail_fcf_scan; } /* Construct the read FCF record mailbox command */ - rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); if (rc) { error = -EINVAL; - goto fail_fcfscan; + goto fail_fcf_scan; } /* Issue the mailbox command asynchronously */ mboxq->vport = phba->pport; - mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) error = -EIO; @@ -11979,9 +12038,13 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) spin_lock_irq(&phba->hbalock); phba->hba_flag |= FCF_DISC_INPROGRESS; spin_unlock_irq(&phba->hbalock); + /* Reset FCF round robin index bmask for new scan */ + if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); error = 0; } -fail_fcfscan: +fail_fcf_scan: if (error) { if (mboxq) lpfc_sli4_mbox_cmd_free(phba, mboxq); @@ -11994,6 +12057,181 @@ fail_fcfscan: } /** + * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read an FCF record indicated by @fcf_index + * and to use it for FLOGI round robin FCF failover. + * + * Return 0 if the mailbox command is submitted sucessfully, none 0 + * otherwise. + **/ +int +lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, + "2763 Failed to allocate mbox for " + "READ_FCF cmd\n"); + error = -ENOMEM; + goto fail_fcf_read; + } + /* Construct the read FCF record mailbox command */ + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); + if (rc) { + error = -EINVAL; + goto fail_fcf_read; + } + /* Issue the mailbox command asynchronously */ + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + error = -EIO; + else + error = 0; + +fail_fcf_read: + if (error && mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return error; +} + +/** + * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read an FCF record indicated by @fcf_index to + * determine whether it's eligible for FLOGI round robin failover list. + * + * Return 0 if the mailbox command is submitted sucessfully, none 0 + * otherwise. + **/ +int +lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, + "2758 Failed to allocate mbox for " + "READ_FCF cmd\n"); + error = -ENOMEM; + goto fail_fcf_read; + } + /* Construct the read FCF record mailbox command */ + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); + if (rc) { + error = -EINVAL; + goto fail_fcf_read; + } + /* Issue the mailbox command asynchronously */ + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + error = -EIO; + else + error = 0; + +fail_fcf_read: + if (error && mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return error; +} + +/** + * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine is to get the next eligible FCF record index in a round + * robin fashion. If the next eligible FCF record index equals to the + * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) + * shall be returned, otherwise, the next eligible FCF record's index + * shall be returned. + **/ +uint16_t +lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) +{ + uint16_t next_fcf_index; + + /* Search from the currently registered FCF index */ + next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX, + phba->fcf.current_rec.fcf_indx); + /* Wrap around condition on phba->fcf.fcf_rr_bmask */ + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) + next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX, 0); + /* Round robin failover stop condition */ + if (next_fcf_index == phba->fcf.fcf_rr_init_indx) + return LPFC_FCOE_FCF_NEXT_NONE; + + return next_fcf_index; +} + +/** + * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine sets the FCF record index in to the eligible bmask for + * round robin failover search. It checks to make sure that the index + * does not go beyond the range of the driver allocated bmask dimension + * before setting the bit. + * + * Returns 0 if the index bit successfully set, otherwise, it returns + * -EINVAL. + **/ +int +lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) +{ + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2610 HBA FCF index reached driver's " + "book keeping dimension: fcf_index:%d, " + "driver_bmask_max:%d\n", + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); + return -EINVAL; + } + /* Set the eligible FCF record index bmask */ + set_bit(fcf_index, phba->fcf.fcf_rr_bmask); + + return 0; +} + +/** + * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine clears the FCF record index from the eligible bmask for + * round robin failover search. It checks to make sure that the index + * does not go beyond the range of the driver allocated bmask dimension + * before clearing the bit. + **/ +void +lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) +{ + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2762 HBA FCF index goes beyond driver's " + "book keeping dimension: fcf_index:%d, " + "driver_bmask_max:%d\n", + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); + return; + } + /* Clear the eligible FCF record index bmask */ + clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); +} + +/** * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table * @phba: pointer to lpfc hba data structure. * @@ -12014,21 +12252,40 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &redisc_fcf->header.cfg_shdr.response); if (shdr_status || shdr_add_status) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2746 Requesting for FCF rediscovery failed " "status x%x add_status x%x\n", shdr_status, shdr_add_status); - /* - * Request failed, last resort to re-try current - * registered FCF entry - */ - lpfc_retry_pport_discovery(phba); - } else + if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * CVL event triggered FCF rediscover request failed, + * last resort to re-try current registered FCF entry. + */ + lpfc_retry_pport_discovery(phba); + } else { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * DEAD FCF event triggered FCF rediscover request + * failed, last resort to fail over as a link down + * to FCF registration. + */ + lpfc_sli4_fcf_dead_failthrough(phba); + } + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2775 Start FCF rediscovery quiescent period " + "wait timer before scaning FCF table\n"); /* * Start FCF rediscovery wait timer for pending FCF * before rescan FCF record table. */ lpfc_fcf_redisc_wait_start_timer(phba); + } mempool_free(mbox, phba->mbox_mem_pool); } @@ -12047,6 +12304,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; int rc, length; + /* Cancel retry delay timers to all vports before FCF rediscover */ + lpfc_cancel_all_vport_retry_delay_timer(phba); + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -12078,6 +12338,31 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) } /** + * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event + * @phba: pointer to lpfc hba data structure. + * + * This function is the failover routine as a last resort to the FCF DEAD + * event when driver failed to perform fast FCF failover. + **/ +void +lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) +{ + uint32_t link_state; + + /* + * Last resort as FCF DEAD event failover will treat this as + * a link down, but save the link state because we don't want + * it to be changed to Link Down unless it is already down. + */ + link_state = phba->link_state; + lpfc_linkdown(phba); + phba->link_state = link_state; + + /* Unregister FCF if no devices connected to it */ + lpfc_unregister_unused_fcf(phba); +} + +/** * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. * @phba: pointer to lpfc hba data structure. * diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index dfcf5437d1f..b4a639c4761 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -62,6 +62,7 @@ struct lpfc_iocbq { #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ #define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ #define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ +#define DSS_SECURITY_OP 0x100 /* security IO */ #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ #define LPFC_FIP_ELS_ID_SHIFT 14 diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 86308836600..4a35e7b9bc5 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -153,15 +153,27 @@ struct lpfc_fcf { #define FCF_REGISTERED 0x02 /* FCF registered with FW */ #define FCF_SCAN_DONE 0x04 /* FCF table scan done */ #define FCF_IN_USE 0x08 /* Atleast one discovery completed */ -#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */ -#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */ -#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */ +#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */ +#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */ +#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */ +#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC) +#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ +#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ +#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ uint32_t addr_mode; + uint16_t fcf_rr_init_indx; struct lpfc_fcf_rec current_rec; struct lpfc_fcf_rec failover_rec; struct timer_list redisc_wait; + unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ }; +/* + * Maximum FCF table index, it is for driver internal book keeping, it + * just needs to be no less than the supported HBA's FCF table size. + */ +#define LPFC_SLI4_FCF_TBL_INDX_MAX 32 + #define LPFC_REGION23_SIGNATURE "RG23" #define LPFC_REGION23_VERSION 1 #define LPFC_REGION23_LAST_REC 0xff @@ -431,11 +443,18 @@ enum lpfc_sge_type { SCSI_BUFF_TYPE }; +enum lpfc_sgl_state { + SGL_FREED, + SGL_ALLOCATED, + SGL_XRI_ABORTED +}; + struct lpfc_sglq { /* lpfc_sglqs are used in double linked lists */ struct list_head list; struct list_head clist; enum lpfc_sge_type buff_type; /* is this a scsi sgl */ + enum lpfc_sgl_state state; uint16_t iotag; /* pre-assigned IO tag */ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ struct sli4_sge *sgl; /* pre-assigned SGL */ @@ -463,8 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, struct lpfc_mbx_sge *); -int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *, - uint16_t); +int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *, + uint16_t); void lpfc_sli4_hba_reset(struct lpfc_hba *); struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, @@ -523,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); -int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); -void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t); +int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t); +int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t); +void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_unregister_fcf(struct lpfc_hba *); int lpfc_sli4_post_status_check(struct lpfc_hba *); uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index ac276aa46fb..013deec5dae 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.9" +#define LPFC_DRIVER_VERSION "8.3.10" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index dc86e873102..869f76cbc58 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -123,7 +123,12 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) } mb = &pmb->u.mb; - lpfc_read_sparam(phba, pmb, vport->vpi); + rc = lpfc_read_sparam(phba, pmb, vport->vpi); + if (rc) { + mempool_free(pmb, phba->mbox_mem_pool); + return -ENOMEM; + } + /* * Grab buffer pointer and clear context1 so we can use * lpfc_sli_issue_box_wait |