diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 194 |
1 files changed, 170 insertions, 24 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 103a5aa4ae8..ae3cb0ab0ae 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -455,6 +455,11 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) struct lpfc_iocbq * iocbq = NULL; list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); + + if (iocbq) + phba->iocb_cnt++; + if (phba->iocb_cnt > phba->iocb_max) + phba->iocb_max = phba->iocb_cnt; return iocbq; } @@ -575,7 +580,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { struct lpfc_sglq *sglq; size_t start_clean = offsetof(struct lpfc_iocbq, iocb); - unsigned long iflag; + unsigned long iflag = 0; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; if (iocbq->sli4_xritag == NO_XRI) sglq = NULL; @@ -593,6 +599,17 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) } else { sglq->state = SGL_FREED; list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + + /* Check if TXQ queue needs to be serviced */ + if (pring->txq_cnt) { + spin_lock_irqsave( + &phba->pport->work_port_lock, iflag); + phba->pport->work_port_events |= + WORKER_SERVICE_TXQ; + lpfc_worker_wake_up(phba); + spin_unlock_irqrestore( + &phba->pport->work_port_lock, iflag); + } } } @@ -605,6 +622,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } + /** * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool * @phba: Pointer to HBA context object. @@ -642,6 +660,7 @@ static void __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { phba->__lpfc_sli_release_iocbq(phba, iocbq); + phba->iocb_cnt--; } /** @@ -872,7 +891,11 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { list_add_tail(&piocb->list, &pring->txcmplq); + piocb->iocb_flag |= LPFC_IO_ON_Q; pring->txcmplq_cnt++; + if (pring->txcmplq_cnt > pring->txcmplq_max) + pring->txcmplq_max = pring->txcmplq_cnt; + if ((unlikely(pring->ringno == LPFC_ELS_RING)) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { @@ -897,7 +920,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * the txq, the function returns first iocb in the list after * removing the iocb from the list, else it returns NULL. **/ -static struct lpfc_iocbq * +struct lpfc_iocbq * lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { struct lpfc_iocbq *cmd_iocb; @@ -2150,7 +2173,10 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; list_del_init(&cmd_iocb->list); - pring->txcmplq_cnt--; + if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { + pring->txcmplq_cnt--; + cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; + } return cmd_iocb; } @@ -2183,7 +2209,10 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; list_del_init(&cmd_iocb->list); - pring->txcmplq_cnt--; + if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { + cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; + pring->txcmplq_cnt--; + } return cmd_iocb; } @@ -5578,7 +5607,7 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) * iocb to the txq when SLI layer cannot submit the command iocb * to the ring. **/ -static void +void __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { @@ -6195,7 +6224,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_sglq *sglq; - uint16_t xritag; union lpfc_wqe wqe; struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; @@ -6204,10 +6232,26 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) sglq = NULL; else { + if (pring->txq_cnt) { + if (!(flag & SLI_IOCB_RET_IOCB)) { + __lpfc_sli_ringtx_put(phba, + pring, piocb); + return IOCB_SUCCESS; + } else { + return IOCB_BUSY; + } + } else { sglq = __lpfc_sli_get_sglq(phba); - if (!sglq) - return IOCB_ERROR; - piocb->sli4_xritag = sglq->sli4_xritag; + if (!sglq) { + if (!(flag & SLI_IOCB_RET_IOCB)) { + __lpfc_sli_ringtx_put(phba, + pring, + piocb); + return IOCB_SUCCESS; + } else + return IOCB_BUSY; + } + } } } else if (piocb->iocb_flag & LPFC_IO_FCP) { sglq = NULL; /* These IO's already have an XRI and @@ -6223,8 +6267,9 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, } if (sglq) { - xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq); - if (xritag != sglq->sli4_xritag) + piocb->sli4_xritag = sglq->sli4_xritag; + + if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) return IOCB_ERROR; } @@ -6264,7 +6309,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, * IOCB_SUCCESS - Success * IOCB_BUSY - Busy **/ -static inline int +int __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { @@ -7081,13 +7126,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ abort_iocb = phba->sli.iocbq_lookup[abort_context]; - lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, - "0327 Cannot abort els iocb %p " - "with tag %x context %x, abort status %x, " - "abort code %x\n", - abort_iocb, abort_iotag, abort_context, - irsp->ulpStatus, irsp->un.ulpWord[4]); - /* * If the iocb is not found in Firmware queue the iocb * might have completed already. Do not free it again. @@ -7106,6 +7144,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) abort_context = abort_iocb->iocb.ulpContext; } + + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, + "0327 Cannot abort els iocb %p " + "with tag %x context %x, abort status %x, " + "abort code %x\n", + abort_iocb, abort_iotag, abort_context, + irsp->ulpStatus, irsp->un.ulpWord[4]); /* * make sure we have the right iocbq before taking it * off the txcmplq and try to call completion routine. @@ -7123,7 +7168,10 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * following abort XRI from the HBA. */ list_del_init(&abort_iocb->list); - pring->txcmplq_cnt--; + if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) { + abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q; + pring->txcmplq_cnt--; + } /* Firmware could still be in progress of DMAing * payload, so don't free data buffer till after @@ -7255,8 +7303,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0339 Abort xri x%x, original iotag x%x, " "abort cmd iotag x%x\n", + iabt->un.acxri.abortIoTag, iabt->un.acxri.abortContextTag, - iabt->un.acxri.abortIoTag, abtsiocbp->iotag); + abtsiocbp->iotag); retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); if (retval) @@ -7586,7 +7635,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, long timeleft, timeout_req = 0; int retval = IOCB_SUCCESS; uint32_t creg_val; - + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; /* * If the caller has provided a response iocbq buffer, then context2 * is NULL or its an error. @@ -7608,7 +7657,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, readl(phba->HCregaddr); /* flush */ } - retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0); + retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, + SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { timeout_req = timeout * HZ; timeleft = wait_event_timeout(done_q, @@ -7630,6 +7680,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, timeout, (timeleft / jiffies)); retval = IOCB_TIMEDOUT; } + } else if (retval == IOCB_BUSY) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", + phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); + return retval; } else { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0332 IOCB wait issue failed, Data x%x\n", @@ -8775,12 +8830,17 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, { struct lpfc_iocbq *irspiocbq; unsigned long iflags; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; /* Get an irspiocbq for later ELS response processing use */ irspiocbq = lpfc_sli_get_iocbq(phba); if (!irspiocbq) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0387 Failed to allocate an iocbq\n"); + "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " + "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", + pring->txq_cnt, phba->iocb_cnt, + phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, + phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); return false; } @@ -12695,3 +12755,89 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) spin_unlock_irq(&phba->hbalock); } +/** + * lpfc_drain_txq - Drain the txq + * @phba: Pointer to HBA context object. + * + * This function attempt to submit IOCBs on the txq + * to the adapter. For SLI4 adapters, the txq contains + * ELS IOCBs that have been deferred because the there + * are no SGLs. This congestion can occur with large + * vport counts during node discovery. + **/ + +uint32_t +lpfc_drain_txq(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_iocbq *piocbq = 0; + unsigned long iflags = 0; + char *fail_msg = NULL; + struct lpfc_sglq *sglq; + union lpfc_wqe wqe; + + spin_lock_irqsave(&phba->hbalock, iflags); + if (pring->txq_cnt > pring->txq_max) + pring->txq_max = pring->txq_cnt; + + spin_unlock_irqrestore(&phba->hbalock, iflags); + + while (pring->txq_cnt) { + spin_lock_irqsave(&phba->hbalock, iflags); + + sglq = __lpfc_sli_get_sglq(phba); + if (!sglq) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + break; + } else { + piocbq = lpfc_sli_ringtx_get(phba, pring); + if (!piocbq) { + /* The txq_cnt out of sync. This should + * never happen + */ + sglq = __lpfc_clear_active_sglq(phba, + sglq->sli4_xritag); + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2823 txq empty and txq_cnt is %d\n ", + pring->txq_cnt); + break; + } + } + + /* The xri and iocb resources secured, + * attempt to issue request + */ + piocbq->sli4_xritag = sglq->sli4_xritag; + if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) + fail_msg = "to convert bpl to sgl"; + else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) + fail_msg = "to convert iocb to wqe"; + else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) + fail_msg = " - Wq is full"; + else + lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); + + if (fail_msg) { + /* Failed means we can't issue and need to cancel */ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2822 IOCB failed %s iotag 0x%x " + "xri 0x%x\n", + fail_msg, + piocbq->iotag, piocbq->sli4_xritag); + list_add_tail(&piocbq->list, &completions); + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + + spin_lock_irqsave(&phba->pport->work_port_lock, iflags); + phba->pport->work_port_events &= ~WORKER_SERVICE_TXQ; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); + + /* Cancel all the IOCBs that cannot be issued */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + + return pring->txq_cnt; +} |