summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-03-31 14:55:14 +1000
committerDave Airlie <airlied@redhat.com>2010-03-31 14:55:14 +1000
commit3595be778d8cb887f0e0575ef0a0c1a094d120bb (patch)
tree15671ed8bd3597d2efe13aa57b755c66014acb57 /drivers/scsi/lpfc/lpfc_sli.c
parentc414a117c6094c3f86b533f97beaf45ef9075f03 (diff)
parent220bf991b0366cc50a94feede3d7341fa5710ee4 (diff)
Merge branch 'v2.6.34-rc2' into drm-linus
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c654
1 files changed, 560 insertions, 94 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 589549b2bf0..fe6660ca645 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -494,7 +494,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
*
* Returns sglq ponter = success, NULL = Failure.
**/
-static struct lpfc_sglq *
+struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
uint16_t adj_xri;
@@ -526,6 +526,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
return NULL;
adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+ sglq->state = SGL_ALLOCATED;
return sglq;
}
@@ -580,18 +581,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
if (sglq) {
- if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
- && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
- && (iocbq->iocb.un.ulpWord[4]
- == IOERR_ABORT_REQUESTED))) {
+ if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+ (sglq->state != SGL_XRI_ABORTED)) {
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
iflag);
list_add(&sglq->list,
&phba->sli4_hba.lpfc_abts_els_sgl_list);
spin_unlock_irqrestore(
&phba->sli4_hba.abts_sgl_list_lock, iflag);
- } else
+ } else {
+ sglq->state = SGL_FREED;
list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+ }
}
@@ -764,10 +765,6 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case DSSCMD_IWRITE64_CX:
case DSSCMD_IREAD64_CR:
case DSSCMD_IREAD64_CX:
- case DSSCMD_INVALIDATE_DEK:
- case DSSCMD_SET_KEK:
- case DSSCMD_GET_KEK_ID:
- case DSSCMD_GEN_XFER:
type = LPFC_SOL_IOCB;
break;
case CMD_ABORT_XRI_CN:
@@ -1717,6 +1714,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_dmabuf *mp;
uint16_t rpi, vpi;
int rc;
+ struct lpfc_vport *vport = pmb->vport;
mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1745,6 +1743,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+ /* Unreg VPI, if the REG_VPI succeed after VLink failure */
+ if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
+ !(phba->pport->load_flag & FC_UNLOADING) &&
+ !pmb->u.mb.mbxStatus) {
+ lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
+ pmb->vport = vport;
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc != MBX_NOT_FINISHED)
+ return;
+ }
+
if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, pmb);
else
@@ -2228,9 +2238,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* All other are passed to the completion callback.
*/
if (pring->ringno == LPFC_ELS_RING) {
- if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (cmdiocbp->iocb_flag &
+ LPFC_DRIVER_ABORTED)) {
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
cmdiocbp->iocb_flag &=
~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
saveq->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
saveq->iocb.un.ulpWord[4] =
@@ -2240,7 +2256,62 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* of DMAing payload, so don't free data
* buffer till after a hbeat.
*/
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (saveq->iocb_flag &
+ LPFC_EXCHANGE_BUSY) {
+ /* Set cmdiocb flag for the
+ * exchange busy so sgl (xri)
+ * will not be released until
+ * the abort xri is received
+ * from hba.
+ */
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb_flag |=
+ LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ }
+ if (cmdiocbp->iocb_flag &
+ LPFC_DRIVER_ABORTED) {
+ /*
+ * Clear LPFC_DRIVER_ABORTED
+ * bit in case it was driver
+ * initiated abort.
+ */
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb_flag &=
+ ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ cmdiocbp->iocb.un.ulpWord[4] =
+ IOERR_ABORT_REQUESTED;
+ /*
+ * For SLI4, irsiocb contains
+ * NO_XRI in sli_xritag, it
+ * shall not affect releasing
+ * sgl (xri) process.
+ */
+ saveq->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ saveq->iocb.un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ saveq->iocb_flag |=
+ LPFC_DELAY_MEM_FREE;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ }
}
}
(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2463,14 +2534,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
- if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
- &rspiocbq);
- spin_lock_irqsave(&phba->hbalock,
- iflag);
- }
+ if (unlikely(!cmdiocbq))
+ break;
+ if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ if (cmdiocbq->iocb_cmpl) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+ &rspiocbq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
break;
case LPFC_UNSOL_IOCB:
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -3039,6 +3112,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
/* Check to see if any errors occurred during init */
if ((status & HS_FFERM) || (i >= 20)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2751 Adapter failed to restart, "
+ "status reg x%x, FW Data: A8 x%x AC x%x\n",
+ status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
phba->link_state = LPFC_HBA_ERROR;
retval = 1;
}
@@ -3226,6 +3305,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
if (retval != MBX_SUCCESS) {
if (retval != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2752 KILL_BOARD command failed retval %d\n",
+ retval);
spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_IGNORE_ERATT;
spin_unlock_irq(&phba->hbalock);
@@ -3983,7 +4065,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
lpfc_sli_hba_setup_error:
phba->link_state = LPFC_HBA_ERROR;
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0445 Firmware initialization failed\n");
return rc;
}
@@ -4336,7 +4418,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
/* Read the port's service parameters. */
- lpfc_read_sparam(phba, mboxq, vport->vpi);
+ rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
+ if (rc) {
+ phba->link_state = LPFC_HBA_ERROR;
+ rc = -ENOMEM;
+ goto out_free_vpd;
+ }
+
mboxq->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4431,6 +4519,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Post receive buffers to the device */
lpfc_sli4_rb_setup(phba);
+ /* Reset HBA FCF states after HBA reset */
+ phba->fcf.fcf_flag = 0;
+ phba->fcf.current_rec.flag = 0;
+
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
jiffies + HZ * (phba->fc_ratov * 2));
@@ -5687,19 +5779,19 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
for (i = 0; i < numBdes; i++) {
/* Should already be byte swapped. */
- sgl->addr_hi = bpl->addrHigh;
- sgl->addr_lo = bpl->addrLow;
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- bde.tus.w = le32_to_cpu(bpl->tus.w);
- bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
+ sgl->addr_hi = bpl->addrHigh;
+ sgl->addr_lo = bpl->addrLow;
+
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->word3 = cpu_to_le32(sgl->word3);
+ /* swap the size field back to the cpu so we
+ * can assign it to the sgl.
+ */
+ bde.tus.w = le32_to_cpu(bpl->tus.w);
+ sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
bpl++;
sgl++;
}
@@ -5712,11 +5804,10 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
- bf_set(lpfc_sli4_sge_len, sgl,
- icmd->un.genreq64.bdl.bdeSize);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->word3 = cpu_to_le32(sgl->word3);
+ sgl->sge_len =
+ cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
}
return sglq->sli4_xritag;
}
@@ -5987,12 +6078,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
else
bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
- abort_tag = iocbq->iocb.un.acxri.abortIoTag;
wqe->words[5] = 0;
bf_set(lpfc_wqe_gen_ct, &wqe->generic,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
- wqe->generic.abort_tag = abort_tag;
/*
* The abort handler will send us CMD_ABORT_XRI_CN or
* CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
@@ -6121,15 +6210,15 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
return IOCB_ERROR;
- if (piocb->iocb_flag & LPFC_IO_FCP) {
+ if ((piocb->iocb_flag & LPFC_IO_FCP) ||
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
/*
* For FCP command IOCB, get a new WQ index to distribute
* WQE across the WQsr. On the other hand, for abort IOCB,
* it carries the same WQ index to the original command
* IOCB.
*/
- if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN))
+ if (piocb->iocb_flag & LPFC_IO_FCP)
piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
&wqe))
@@ -7004,7 +7093,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb->iocb.ulpContext != abort_context ||
(abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
spin_unlock_irq(&phba->hbalock);
- else {
+ else if (phba->sli_rev < LPFC_SLI_REV4) {
+ /*
+ * leave the SLI4 aborted command on the txcmplq
+ * list and the command complete WCQE's XB bit
+ * will tell whether the SGL (XRI) can be released
+ * immediately or to the aborted SGL list for the
+ * following abort XRI from the HBA.
+ */
list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
spin_unlock_irq(&phba->hbalock);
@@ -7013,11 +7109,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* payload, so don't free data buffer till after
* a hbeat.
*/
+ spin_lock_irq(&phba->hbalock);
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
-
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irq(&phba->hbalock);
+
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
- abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+ abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
}
}
@@ -7106,7 +7204,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
/* This signals the response to set the correct status
- * before calling the completion handler.
+ * before calling the completion handler
*/
cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
@@ -7124,6 +7222,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
+ if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+ abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
if (phba->link_state >= LPFC_LINK_UP)
iabt->ulpCommand = CMD_ABORT_XRI_CN;
@@ -7330,6 +7430,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
+ abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
if (lpfc_is_link_up(phba))
abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
@@ -7374,6 +7476,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
{
wait_queue_head_t *pdone_q;
unsigned long iflags;
+ struct lpfc_scsi_buf *lpfc_cmd;
spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7381,6 +7484,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
&rspiocbq->iocb, sizeof(IOCB_t));
+ /* Set the exchange busy flag for task management commands */
+ if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
+ !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
+ cur_iocbq);
+ lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ }
+
pdone_q = cmdiocbq->context_un.wait_queue;
if (pdone_q)
wake_up(pdone_q);
@@ -8359,11 +8470,24 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
}
}
+/**
+ * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
+ * @phba: pointer to lpfc hba data structure
+ * @pIocbIn: pointer to the rspiocbq
+ * @pIocbOut: pointer to the cmdiocbq
+ * @wcqe: pointer to the complete wcqe
+ *
+ * This routine transfers the fields of a command iocbq to a response iocbq
+ * by copying all the IOCB fields from command iocbq and transferring the
+ * completion status information from the complete wcqe.
+ **/
static void
-lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
+lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
+ struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe)
{
+ unsigned long iflags;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -8377,8 +8501,17 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
wcqe->total_data_placed;
else
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- else
+ else {
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+ }
+
+ /* Pick up HBA exchange busy condition */
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
}
/**
@@ -8419,7 +8552,7 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
}
/* Fake the irspiocbq and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
+ lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
return irspiocbq;
}
@@ -8849,8 +8982,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
int ecount = 0;
uint16_t cqid;
- if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
- bf_get(lpfc_eqe_minor_code, eqe) != 0) {
+ if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0359 Not a valid slow-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
@@ -8976,7 +9108,13 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
}
/* Fake the irspiocb and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
+ lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
+
+ if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
/* Pass the cmd_iocb and the rsp state to the upper layer */
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
@@ -9082,8 +9220,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint16_t cqid;
int ecount = 0;
- if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
- unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
+ if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0366 Not a valid fast-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
@@ -11859,24 +11996,22 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
+ * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: FCF table entry offset.
*
- * This routine is invoked to read up to @fcf_num of FCF record from the
- * device starting with the given @fcf_index.
+ * This routine is invoked to scan the entire FCF table by reading FCF
+ * record and processing it one at a time starting from the @fcf_index
+ * for initial FCF discovery or fast FCF failover rediscovery.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
**/
int
-lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
+lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
{
int rc = 0, error;
LPFC_MBOXQ_t *mboxq;
- void *virt_addr;
- dma_addr_t phys_addr;
- uint8_t *bytep;
- struct lpfc_mbx_sge sge;
- uint32_t alloc_len, req_len;
- struct lpfc_mbx_read_fcf_tbl *read_fcf;
phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -11885,51 +12020,31 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
"2000 Failed to allocate mbox for "
"READ_FCF cmd\n");
error = -ENOMEM;
- goto fail_fcfscan;
+ goto fail_fcf_scan;
}
-
- req_len = sizeof(struct fcf_record) +
- sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
-
- /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
- alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
- LPFC_SLI4_MBX_NEMBED);
-
- if (alloc_len < req_len) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0291 Allocated DMA memory size (x%x) is "
- "less than the requested DMA memory "
- "size (x%x)\n", alloc_len, req_len);
- error = -ENOMEM;
- goto fail_fcfscan;
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_scan;
}
-
- /* Get the first SGE entry from the non-embedded DMA memory. This
- * routine only uses a single SGE.
- */
- lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
- phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
- virt_addr = mboxq->sge_array->addr[0];
- read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
-
- /* Set up command fields */
- bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
- /* Perform necessary endian conversion */
- bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
- lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+ /* Issue the mailbox command asynchronously */
mboxq->vport = phba->pport;
- mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
+ if (rc == MBX_NOT_FINISHED)
error = -EIO;
- } else {
+ else {
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
+ /* Reset FCF round robin index bmask for new scan */
+ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
error = 0;
}
-fail_fcfscan:
+fail_fcf_scan:
if (error) {
if (mboxq)
lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -11942,6 +12057,312 @@ fail_fcfscan:
}
/**
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index
+ * and to use it for FLOGI round robin FCF failover.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+ "2763 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ error = -ENOMEM;
+ goto fail_fcf_read;
+ }
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_read;
+ }
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ error = -EIO;
+ else
+ error = 0;
+
+fail_fcf_read:
+ if (error && mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return error;
+}
+
+/**
+ * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index to
+ * determine whether it's eligible for FLOGI round robin failover list.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+ "2758 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ error = -ENOMEM;
+ goto fail_fcf_read;
+ }
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_read;
+ }
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ error = -EIO;
+ else
+ error = 0;
+
+fail_fcf_read:
+ if (error && mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return error;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get the next eligible FCF record index in a round
+ * robin fashion. If the next eligible FCF record index equals to the
+ * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * shall be returned, otherwise, the next eligible FCF record's index
+ * shall be returned.
+ **/
+uint16_t
+lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
+{
+ uint16_t next_fcf_index;
+
+ /* Search from the currently registered FCF index */
+ next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX,
+ phba->fcf.current_rec.fcf_indx);
+ /* Wrap around condition on phba->fcf.fcf_rr_bmask */
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ /* Round robin failover stop condition */
+ if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
+ return LPFC_FCOE_FCF_NEXT_NONE;
+
+ return next_fcf_index;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets the FCF record index in to the eligible bmask for
+ * round robin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before setting the bit.
+ *
+ * Returns 0 if the index bit successfully set, otherwise, it returns
+ * -EINVAL.
+ **/
+int
+lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2610 HBA FCF index reached driver's "
+ "book keeping dimension: fcf_index:%d, "
+ "driver_bmask_max:%d\n",
+ fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+ return -EINVAL;
+ }
+ /* Set the eligible FCF record index bmask */
+ set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine clears the FCF record index from the eligible bmask for
+ * round robin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before clearing the bit.
+ **/
+void
+lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2762 HBA FCF index goes beyond driver's "
+ "book keeping dimension: fcf_index:%d, "
+ "driver_bmask_max:%d\n",
+ fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+ return;
+ }
+ /* Clear the eligible FCF record index bmask */
+ clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+}
+
+/**
+ * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the completion routine for the rediscover FCF table mailbox
+ * command. If the mailbox command returned failure, it will try to stop the
+ * FCF rediscover wait timer.
+ **/
+void
+lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+ uint32_t shdr_status, shdr_add_status;
+
+ redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &redisc_fcf->header.cfg_shdr.response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &redisc_fcf->header.cfg_shdr.response);
+ if (shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2746 Requesting for FCF rediscovery failed "
+ "status x%x add_status x%x\n",
+ shdr_status, shdr_add_status);
+ if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * CVL event triggered FCF rediscover request failed,
+ * last resort to re-try current registered FCF entry.
+ */
+ lpfc_retry_pport_discovery(phba);
+ } else {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * DEAD FCF event triggered FCF rediscover request
+ * failed, last resort to fail over as a link down
+ * to FCF registration.
+ */
+ lpfc_sli4_fcf_dead_failthrough(phba);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2775 Start FCF rediscovery quiescent period "
+ "wait timer before scaning FCF table\n");
+ /*
+ * Start FCF rediscovery wait timer for pending FCF
+ * before rescan FCF record table.
+ */
+ lpfc_fcf_redisc_wait_start_timer(phba);
+ }
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request for rediscovery of the entire FCF table
+ * by the port.
+ **/
+int
+lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+ int rc, length;
+
+ /* Cancel retry delay timers to all vports before FCF rediscover */
+ lpfc_cancel_all_vport_retry_delay_timer(phba);
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2745 Failed to allocate mbox for "
+ "requesting FCF rediscover.\n");
+ return -ENOMEM;
+ }
+
+ length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+ /* Set count to 0 for invalidating the entire FCF database */
+ bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
+
+ /* Issue the mailbox command asynchronously */
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function is the failover routine as a last resort to the FCF DEAD
+ * event when driver failed to perform fast FCF failover.
+ **/
+void
+lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
+{
+ uint32_t link_state;
+
+ /*
+ * Last resort as FCF DEAD event failover will treat this as
+ * a link down, but save the link state because we don't want
+ * it to be changed to Link Down unless it is already down.
+ */
+ link_state = phba->link_state;
+ lpfc_linkdown(phba);
+ phba->link_state = link_state;
+
+ /* Unregister FCF if no devices connected to it */
+ lpfc_unregister_unused_fcf(phba);
+}
+
+/**
* lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
* @phba: pointer to lpfc hba data structure.
*
@@ -12069,3 +12490,48 @@ out:
kfree(rgn23_data);
return;
}
+
+/**
+ * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
+ * @vport: pointer to vport data structure.
+ *
+ * This function iterate through the mailboxq and clean up all REG_LOGIN
+ * and REG_VPI mailbox commands associated with the vport. This function
+ * is called when driver want to restart discovery of the vport due to
+ * a Clear Virtual Link event.
+ **/
+void
+lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mb, *nextmb;
+ struct lpfc_dmabuf *mp;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+ if (mb->vport != vport)
+ continue;
+
+ if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+ (mb->u.mb.mbxCommand != MBX_REG_VPI))
+ continue;
+
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ mp = (struct lpfc_dmabuf *) (mb->context1);
+ if (mp) {
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ }
+ list_del(&mb->list);
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
+ mb = phba->sli.mbox_active;
+ if (mb && (mb->vport == vport)) {
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
+ (mb->u.mb.mbxCommand == MBX_REG_VPI))
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+