diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 401 |
1 files changed, 325 insertions, 76 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index cd9697edf86..da9ba06ad58 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -621,6 +621,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) /** * lpfc_hba_init_link - Initialize the FC link * @phba: pointer to lpfc hba data structure. + * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the INIT_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data @@ -632,7 +633,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) * Any other value - error **/ int -lpfc_hba_init_link(struct lpfc_hba *phba) +lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) { struct lpfc_vport *vport = phba->pport; LPFC_MBOXQ_t *pmb; @@ -651,7 +652,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba) phba->cfg_link_speed); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; lpfc_set_loopback_flag(phba); - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + rc = lpfc_sli_issue_mbox(phba, pmb, flag); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0498 Adapter failed to init, mbxCmd x%x " @@ -664,17 +665,21 @@ lpfc_hba_init_link(struct lpfc_hba *phba) writel(0xffffffff, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ phba->link_state = LPFC_HBA_ERROR; - if (rc != MBX_BUSY) + if (rc != MBX_BUSY || flag == MBX_POLL) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; + if (flag == MBX_POLL) + mempool_free(pmb, phba->mbox_mem_pool); return 0; } /** * lpfc_hba_down_link - this routine downs the FC link + * @phba: pointer to lpfc hba data structure. + * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the DOWN_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data @@ -685,7 +690,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba) * Any other value - error **/ int -lpfc_hba_down_link(struct lpfc_hba *phba) +lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) { LPFC_MBOXQ_t *pmb; int rc; @@ -701,7 +706,7 @@ lpfc_hba_down_link(struct lpfc_hba *phba) "0491 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + rc = lpfc_sli_issue_mbox(phba, pmb, flag); if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -711,6 +716,9 @@ lpfc_hba_down_link(struct lpfc_hba *phba) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } + if (flag == MBX_POLL) + mempool_free(pmb, phba->mbox_mem_pool); + return 0; } @@ -1024,27 +1032,46 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) /* If there is no heart beat outstanding, issue a heartbeat command */ if (phba->cfg_enable_hba_heartbeat) { if (!phba->hb_outstanding) { - pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); - if (!pmboxq) { - mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); - return; - } + if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && + (list_empty(&psli->mboxq))) { + pmboxq = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!pmboxq) { + mod_timer(&phba->hb_tmofunc, + jiffies + + HZ * LPFC_HB_MBOX_INTERVAL); + return; + } - lpfc_heart_beat(phba, pmboxq); - pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; - pmboxq->vport = phba->pport; - retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + lpfc_heart_beat(phba, pmboxq); + pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; + pmboxq->vport = phba->pport; + retval = lpfc_sli_issue_mbox(phba, pmboxq, + MBX_NOWAIT); + + if (retval != MBX_BUSY && + retval != MBX_SUCCESS) { + mempool_free(pmboxq, + phba->mbox_mem_pool); + mod_timer(&phba->hb_tmofunc, + jiffies + + HZ * LPFC_HB_MBOX_INTERVAL); + return; + } + phba->skipped_hb = 0; + phba->hb_outstanding = 1; + } else if (time_before_eq(phba->last_completion_time, + phba->skipped_hb)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2857 Last completion time not " + " updated in %d ms\n", + jiffies_to_msecs(jiffies + - phba->last_completion_time)); + } else + phba->skipped_hb = jiffies; - if (retval != MBX_BUSY && retval != MBX_SUCCESS) { - mempool_free(pmboxq, phba->mbox_mem_pool); - mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); - return; - } mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); - phba->hb_outstanding = 1; return; } else { /* @@ -1818,6 +1845,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", "EmulexSecure Fibre"}; break; + case PCI_DEVICE_ID_BALIUS: + m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", + "Fibre Channel Adapter"}; + break; default: m = (typeof(m)){"Unknown", "", ""}; break; @@ -2279,10 +2310,32 @@ static void lpfc_block_mgmt_io(struct lpfc_hba * phba) { unsigned long iflag; + uint8_t actcmd = MBX_HEARTBEAT; + unsigned long timeout; + spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; + if (phba->sli.mbox_active) + actcmd = phba->sli.mbox_active->u.mb.mbxCommand; spin_unlock_irqrestore(&phba->hbalock, iflag); + /* Determine how long we might wait for the active mailbox + * command to be gracefully completed by firmware. + */ + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + + jiffies; + /* Wait for the outstnading mailbox command to complete */ + while (phba->sli.mbox_active) { + /* Check active mailbox complete status every 2ms */ + msleep(2); + if (time_after(jiffies, timeout)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2813 Mgmt IO is Blocked %x " + "- mbox cmd %x still active\n", + phba->sli.sli_flag, actcmd); + break; + } + } } /** @@ -3247,10 +3300,10 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) if (!ndlp) return 0; } - if (phba->pport->port_state <= LPFC_FLOGI) + if (phba->pport->port_state < LPFC_FLOGI) return NULL; /* If virtual link is not yet instantiated ignore CVL */ - if (vport->port_state <= LPFC_FDISC) + if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)) return NULL; shost = lpfc_shost_from_vport(vport); if (!shost) @@ -3323,29 +3376,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, "evt_tag:x%x, fcf_index:x%x\n", acqe_fcoe->event_tag, acqe_fcoe->index); - spin_lock_irq(&phba->hbalock); - if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || - (phba->hba_flag & FCF_DISC_INPROGRESS)) { - /* - * If the current FCF is in discovered state or - * FCF discovery is in progress, do nothing. - */ - spin_unlock_irq(&phba->hbalock); - break; - } - - if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { - /* - * If fast FCF failover rescan event is pending, - * do nothing. - */ - spin_unlock_irq(&phba->hbalock); - break; - } - spin_unlock_irq(&phba->hbalock); - - if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && - !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { /* * During period of FCF discovery, read the FCF * table record indexed by the event to update @@ -3360,6 +3391,25 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); } + /* If the FCF discovery is in progress, do nothing. */ + spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & FCF_DISC_INPROGRESS) { + spin_unlock_irq(&phba->hbalock); + break; + } + /* If fast FCF failover rescan event is pending, do nothing */ + if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { + spin_unlock_irq(&phba->hbalock); + break; + } + + /* If the FCF has been in discovered state, do nothing. */ + if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { + spin_unlock_irq(&phba->hbalock); + break; + } + spin_unlock_irq(&phba->hbalock); + /* Otherwise, scan the entire FCF table and re-discover SAN */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2770 Start FCF table scan due to new FCF " @@ -3385,13 +3435,9 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, "2549 FCF disconnected from network index 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); - /* If the event is not for currently used fcf do nothing */ - if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) - break; - /* We request port to rediscover the entire FCF table for - * a fast recovery from case that the current FCF record - * is no longer valid if we are not in the middle of FCF - * failover process already. + /* + * If we are in the middle of FCF failover process, clear + * the corresponding FCF bit in the roundrobin bitmap. */ spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { @@ -3400,9 +3446,23 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); break; } + spin_unlock_irq(&phba->hbalock); + + /* If the event is not for currently used fcf do nothing */ + if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) + break; + + /* + * Otherwise, request the port to rediscover the entire FCF + * table for a fast recovery from case that the current FCF + * is no longer valid as we are not in the middle of FCF + * failover process already. + */ + spin_lock_irq(&phba->hbalock); /* Mark the fast failover process in progress */ phba->fcf.fcf_flag |= FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2771 Start FCF fast failover process due to " "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " @@ -3422,12 +3482,16 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * as a link down to FCF registration. */ lpfc_sli4_fcf_dead_failthrough(phba); - } else - /* Handling fast FCF failover to a DEAD FCF event - * is considered equalivant to receiving CVL to all - * vports. + } else { + /* Reset FCF roundrobin bmask for new discovery */ + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); + /* + * Handling fast FCF failover to a DEAD FCF event is + * considered equalivant to receiving CVL to all vports. */ lpfc_sli4_perform_all_vport_cvl(phba); + } break; case LPFC_FCOE_EVENT_TYPE_CVL: lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, @@ -3502,7 +3566,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * the current registered FCF entry. */ lpfc_retry_pport_discovery(phba); - } + } else + /* + * Reset FCF roundrobin bmask for new + * discovery. + */ + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); } break; default: @@ -4885,6 +4955,7 @@ lpfc_create_shost(struct lpfc_hba *phba) phba->fc_altov = FF_DEF_ALTOV; phba->fc_arbtov = FF_DEF_ARBTOV; + atomic_set(&phba->sdev_cnt, 0); vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); if (!vport) return -ENODEV; @@ -5533,9 +5604,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) mempool_free(pmb, phba->mbox_mem_pool); /* Reset the DFT_HBA_Q_DEPTH to the max xri */ - if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) + if (phba->cfg_hba_queue_depth > + (phba->sli4_hba.max_cfg_param.max_xri - + lpfc_sli4_get_els_iocb_cnt(phba))) phba->cfg_hba_queue_depth = - phba->sli4_hba.max_cfg_param.max_xri; + phba->sli4_hba.max_cfg_param.max_xri - + lpfc_sli4_get_els_iocb_cnt(phba); return rc; } @@ -6993,22 +7067,28 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) static int lpfc_sli4_enable_msix(struct lpfc_hba *phba) { - int rc, index; + int vectors, rc, index; /* Set up MSI-X multi-message vectors */ for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) phba->sli4_hba.msix_entries[index].entry = index; /* Configure MSI-X capability structure */ + vectors = phba->sli4_hba.cfg_eqn; +enable_msix_vectors: rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, - phba->sli4_hba.cfg_eqn); - if (rc) { + vectors); + if (rc > 1) { + vectors = rc; + goto enable_msix_vectors; + } else if (rc) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0484 PCI enable MSI-X failed (%d)\n", rc); goto msi_fail_out; } + /* Log MSI-X vector assignment */ - for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) + for (index = 0; index < vectors; index++) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0489 MSI-X entry[%d]: vector=x%x " "message=%d\n", index, @@ -7030,7 +7110,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) } /* The rest of the vector(s) are associated to fast-path handler(s) */ - for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { + for (index = 1; index < vectors; index++) { phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; rc = request_irq(phba->sli4_hba.msix_entries[index].vector, @@ -7044,6 +7124,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) goto cfg_fail_out; } } + phba->sli4_hba.msix_vec_nr = vectors; return rc; @@ -7077,9 +7158,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) /* Free up MSI-X multi-message vectors */ free_irq(phba->sli4_hba.msix_entries[0].vector, phba); - for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) + for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) free_irq(phba->sli4_hba.msix_entries[index].vector, &phba->sli4_hba.fcp_eq_hdl[index - 1]); + /* Disable MSI-X */ pci_disable_msix(phba->pcidev); @@ -7121,6 +7203,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) pci_disable_msi(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0490 MSI request_irq failed (%d)\n", rc); + return rc; } for (index = 0; index < phba->cfg_fcp_eq_count; index++) { @@ -7128,7 +7211,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) phba->sli4_hba.fcp_eq_hdl[index].phba = phba; } - return rc; + return 0; } /** @@ -7839,6 +7922,9 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2710 PCI channel disable preparing for reset\n"); + /* Block any management I/Os to the device */ + lpfc_block_mgmt_io(phba); + /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); @@ -7848,6 +7934,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ lpfc_sli_flush_fcp_rings(phba); } @@ -7861,7 +7948,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) * pending I/Os. **/ static void -lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) +lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2711 PCI channel permanent disable for failure\n"); @@ -7910,7 +7997,7 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent failure, prepare for device down */ - lpfc_prep_dev_for_perm_failure(phba); + lpfc_sli_prep_dev_for_perm_failure(phba); return PCI_ERS_RESULT_DISCONNECT; default: /* Unknown state, prepare and request slot reset */ @@ -7979,7 +8066,8 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev) } else phba->intr_mode = intr_mode; - /* Take device offline; this will perform cleanup */ + /* Take device offline, it will perform cleanup */ + lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli_brdrestart(phba); @@ -8110,8 +8198,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) } /* Initialize and populate the iocb list per host */ - error = lpfc_init_iocb_list(phba, - phba->sli4_hba.max_cfg_param.max_xri); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2821 initialize iocb list %d.\n", + phba->cfg_iocb_cnt*1024); + error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); + if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1413 Failed to initialize iocb list.\n"); @@ -8160,6 +8252,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Default to single FCP EQ for non-MSI-X */ if (phba->intr_type != MSIX) phba->cfg_fcp_eq_count = 1; + else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) + phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8321,7 +8415,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0298 PCI device Power Management suspend.\n"); + "2843 PCI device Power Management suspend.\n"); /* Bring down the device */ lpfc_offline_prep(phba); @@ -8412,6 +8506,84 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) } /** + * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI4 device for PCI slot recover. It + * aborts all the outstanding SCSI I/Os to the pci device. + **/ +static void +lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2828 PCI channel I/O abort preparing for recovery\n"); + /* + * There may be errored I/Os through HBA, abort all I/Os on txcmplq + * and let the SCSI mid-layer to retry them to recover. + */ + pring = &psli->ring[psli->fcp_ring]; + lpfc_sli_abort_iocb_ring(phba, pring); +} + +/** + * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI4 device for PCI slot reset. It + * disables the device interrupt and pci device, and aborts the internal FCP + * pending I/Os. + **/ +static void +lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2826 PCI channel disable preparing for reset\n"); + + /* Block any management I/Os to the device */ + lpfc_block_mgmt_io(phba); + + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + + /* stop all timers */ + lpfc_stop_hba_timers(phba); + + /* Disable interrupt and pci device */ + lpfc_sli4_disable_intr(phba); + pci_disable_device(phba->pcidev); + + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); +} + +/** + * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI4 device for PCI slot permanently + * disabling. It blocks the SCSI transport layer traffic and flushes the FCP + * pending I/Os. + **/ +static void +lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2827 PCI channel permanent disable for failure\n"); + + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + + /* stop all timers */ + lpfc_stop_hba_timers(phba); + + /* Clean up all driver's outstanding SCSI I/Os */ + lpfc_sli_flush_fcp_rings(phba); +} + +/** * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device * @pdev: pointer to PCI device. * @state: the current PCI connection state. @@ -8430,7 +8602,29 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) static pci_ers_result_t lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) { - return PCI_ERS_RESULT_NEED_RESET; + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + switch (state) { + case pci_channel_io_normal: + /* Non-fatal error, prepare for recovery */ + lpfc_sli4_prep_dev_for_recover(phba); + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + /* Fatal error, prepare for slot reset */ + lpfc_sli4_prep_dev_for_reset(phba); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + /* Permanent failure, prepare for device down */ + lpfc_sli4_prep_dev_for_perm_failure(phba); + return PCI_ERS_RESULT_DISCONNECT; + default: + /* Unknown state, prepare and request slot reset */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2825 Unknown PCI error state: x%x\n", state); + lpfc_sli4_prep_dev_for_reset(phba); + return PCI_ERS_RESULT_NEED_RESET; + } } /** @@ -8454,6 +8648,39 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) static pci_ers_result_t lpfc_io_slot_reset_s4(struct pci_dev *pdev) { + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + struct lpfc_sli *psli = &phba->sli; + uint32_t intr_mode; + + dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); + if (pci_enable_device_mem(pdev)) { + printk(KERN_ERR "lpfc: Cannot re-enable " + "PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_restore_state(pdev); + if (pdev->is_busmaster) + pci_set_master(pdev); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2824 Cannot re-enable interrupt after " + "slot reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } else + phba->intr_mode = intr_mode; + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + return PCI_ERS_RESULT_RECOVERED; } @@ -8470,7 +8697,27 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev) static void lpfc_io_resume_s4(struct pci_dev *pdev) { - return; + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + /* + * In case of slot reset, as function reset is performed through + * mailbox command which needs DMA to be enabled, this operation + * has to be moved to the io resume phase. Taking device offline + * will perform the necessary cleanup. + */ + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { + /* Perform device reset */ + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + /* Bring the device back online */ + lpfc_online(phba); + } + + /* Clean up Advanced Error Reporting (AER) if needed */ + if (phba->hba_flag & HBA_AER_ENABLED) + pci_cleanup_aer_uncorrect_error_status(pdev); } /** @@ -8802,6 +9049,8 @@ static struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, + PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; |