diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 539 |
1 files changed, 369 insertions, 170 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2ee0374a990..dacabbe0a58 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -2817,7 +2817,7 @@ void lpfc_poll_eratt(unsigned long ptr) * This function is called from the interrupt context when there is a ring * event for the fcp ring. The caller does not hold any lock. * The function processes each response iocb in the response ring until it - * finds an iocb with LE bit set and chains all the iocbs upto the iocb with + * finds an iocb with LE bit set and chains all the iocbs up to the iocb with * LE bit set. The function will call the completion handler of the command iocb * if the response iocb indicates a completion for a command iocb or it is * an abort completion. The function will call lpfc_sli_process_unsol_iocb @@ -3477,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) int retval = 0; /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) + return 1; /* * Check status register every 100ms for 5 retries, then every @@ -3502,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) { + retval = 1; + break; + } } /* Check to see if any errors occurred during init */ @@ -3584,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) uint32_t __iomem *resp_buf; uint32_t __iomem *mbox_buf; volatile uint32_t mbox; - uint32_t hc_copy; + uint32_t hc_copy, ha_copy, resp_data; int i; uint8_t hdrtype; @@ -3601,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) resp_buf = phba->MBslimaddr; /* Disable the error attention */ - hc_copy = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &hc_copy)) + return; writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); readl(phba->HCregaddr); /* flush */ phba->link_flag |= LS_IGNORE_ERATT; - if (readl(phba->HAregaddr) & HA_ERATT) { + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return; + if (ha_copy & HA_ERATT) { /* Clear Chip error bit */ writel(HA_ERATT, phba->HAregaddr); phba->pport->stopped = 1; @@ -3620,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) mbox_buf = phba->MBslimaddr; writel(mbox, mbox_buf); - for (i = 0; - readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) - mdelay(1); - - if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { + for (i = 0; i < 50; i++) { + if (lpfc_readl((resp_buf + 1), &resp_data)) + return; + if (resp_data != ~(BARRIER_TEST_PATTERN)) + mdelay(1); + else + break; + } + resp_data = 0; + if (lpfc_readl((resp_buf + 1), &resp_data)) + return; + if (resp_data != ~(BARRIER_TEST_PATTERN)) { if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || phba->pport->stopped) goto restore_hc; @@ -3633,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) } ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; - for (i = 0; readl(resp_buf) != mbox && i < 500; i++) - mdelay(1); + resp_data = 0; + for (i = 0; i < 500; i++) { + if (lpfc_readl(resp_buf, &resp_data)) + return; + if (resp_data != mbox) + mdelay(1); + else + break; + } clear_errat: - while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) - mdelay(1); + while (++i < 500) { + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return; + if (!(ha_copy & HA_ERATT)) + mdelay(1); + else + break; + } if (readl(phba->HAregaddr) & HA_ERATT) { writel(HA_ERATT, phba->HAregaddr); @@ -3686,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) /* Disable the error attention */ spin_lock_irq(&phba->hbalock); - status = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &status)) { + spin_unlock_irq(&phba->hbalock); + mempool_free(pmb, phba->mbox_mem_pool); + return 1; + } status &= ~HC_ERINT_ENA; writel(status, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -3720,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) * 3 seconds we still set HBA_ERROR state because the status of the * board is now undefined. */ - ha_copy = readl(phba->HAregaddr); - + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; while ((i++ < 30) && !(ha_copy & HA_ERATT)) { mdelay(100); - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; } del_timer_sync(&psli->mbox_tmo); @@ -4018,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) uint32_t status, i = 0; /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) + return -EIO; /* Check status register to see what current state is */ i = 0; @@ -4073,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) + return -EIO; } /* Check to see if any errors occurred during init */ @@ -5083,7 +5117,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) /* Setting state unknown so lpfc_sli_abort_iocb_ring * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing - * it to fail all oustanding SCSI IO. + * it to fail all outstanding SCSI IO. */ spin_lock_irq(&phba->pport->work_port_lock); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; @@ -5136,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, MAILBOX_t *mb; struct lpfc_sli *psli = &phba->sli; uint32_t status, evtctr; - uint32_t ha_copy; + uint32_t ha_copy, hc_copy; int i; unsigned long timeout; unsigned long drvr_flag = 0; @@ -5202,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } - if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && - !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { - spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { + if (lpfc_readl(phba->HCregaddr, &hc_copy) || + !(hc_copy & HC_MBINT_ENA)) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):2528 Mailbox command x%x cannot " "issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, pmbox->u.mb.mbxCommand, psli->sli_flag, flag); - goto out_not_finished; + goto out_not_finished; + } } if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { @@ -5408,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, word0 = le32_to_cpu(word0); } else { /* First read mbox status word */ - word0 = readl(phba->MBslimaddr); + if (lpfc_readl(phba->MBslimaddr, &word0)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } } /* Read the HBA Host Attention Register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mb->mbxCommand) * 1000) + jiffies; @@ -5463,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, word0 = readl(phba->MBslimaddr); } /* Read the HBA Host Attention Register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } } if (psli->sli_flag & LPFC_SLI_ACTIVE) { @@ -5983,7 +6031,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) } /** - * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table + * lpfc_mbox_api_table_setup - Set up mbox api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * @@ -6263,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, bf_set(lpfc_sli4_sge_last, sgl, 1); else bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); /* swap the size field back to the cpu so we * can assign it to the sgl. */ @@ -6283,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, bf_set(lpfc_sli4_sge_offset, sgl, offset); offset += bde.tus.f.bdeSize; } + sgl->word2 = cpu_to_le32(sgl->word2); bpl++; sgl++; } @@ -6528,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / sizeof(struct ulp_bde64); for (i = 0; i < numBdes; i++) { - if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64) - break; bde.tus.w = le32_to_cpu(bpl[i].tus.w); + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) + break; xmit_len += bde.tus.f.bdeSize; } /* word3 iocb=IO_TAG wqe=request_payload_len */ @@ -6620,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, xritag = 0; break; case CMD_XMIT_BLS_RSP64_CX: - /* As BLS ABTS-ACC WQE is very different from other WQEs, + /* As BLS ABTS RSP WQE is very different from other WQEs, * we re-construct this WQE here based on information in * iocbq from scratch. */ memset(wqe, 0, sizeof(union lpfc_wqe)); /* OX_ID is invariable to who sent ABTS to CT exchange */ bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, - bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); - if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == + bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); + if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == LPFC_ABTS_UNSOL_INT) { /* ABTS sent by initiator to CT exchange, the * RX_ID field will be filled with the newly @@ -6642,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * RX_ID from ABTS. */ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, - bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); + bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); } bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); @@ -6653,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, LPFC_WQE_LENLOC_NONE); /* Overwrite the pre-set comnd type with OTHER_COMMAND */ command_type = OTHER_COMMAND; + if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { + bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, + bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); + bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, + bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); + bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, + bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); + } + break; case CMD_XRI_ABORTED_CX: case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ @@ -6701,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || - piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || + piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) sglq = NULL; else { if (pring->txq_cnt) { @@ -6789,7 +6847,7 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, } /** - * lpfc_sli_api_table_setup - Set up sli api fucntion jump table + * lpfc_sli_api_table_setup - Set up sli api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * @@ -7463,7 +7521,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_dmabuf *mp, *next_mp; struct list_head *slp = &pring->postbufq; - /* Search postbufq, from the begining, looking for a match on tag */ + /* Search postbufq, from the beginning, looking for a match on tag */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { if (mp->buffer_tag == tag) { @@ -7507,7 +7565,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_dmabuf *mp, *next_mp; struct list_head *slp = &pring->postbufq; - /* Search postbufq, from the begining, looking for a match on phys */ + /* Search postbufq, from the beginning, looking for a match on phys */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { if (mp->phys == phys) { @@ -8194,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, piocb->iocb_flag &= ~LPFC_IO_WAKE; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) + return IOCB_ERROR; creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -8236,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, } if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) + return IOCB_ERROR; creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -8378,7 +8438,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) * for possible error attention events. The caller must hold the hostlock * with spin_lock_irq(). * - * This fucntion returns 1 when there is Error Attention in the Host Attention + * This function returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ static int @@ -8387,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) uint32_t ha_copy; /* Read chip Host Attention (HA) register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + goto unplug_err; + if (ha_copy & HA_ERATT) { /* Read host status register to retrieve error event */ - lpfc_sli_read_hs(phba); + if (lpfc_sli_read_hs(phba)) + goto unplug_err; /* Check if there is a deferred error condition is active */ if ((HS_FFER1 & phba->work_hs) && @@ -8409,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) return 1; } return 0; + +unplug_err: + /* Set the driver HS work bitmap */ + phba->work_hs |= UNPLUG_ERR; + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; } /** @@ -8419,7 +8491,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) * for possible error attention events. The caller must hold the hostlock * with spin_lock_irq(). * - * This fucntion returns 1 when there is Error Attention in the Host Attention + * This function returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ static int @@ -8436,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: - uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); - uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, + &uerr_sta_lo) || + lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, + &uerr_sta_hi)) { + phba->work_hs |= UNPLUG_ERR; + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8456,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) } break; case LPFC_SLI_INTF_IF_TYPE_2: - portstat_reg.word0 = - readl(phba->sli4_hba.u.if_type2.STATUSregaddr); - portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0) || + lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr)){ + phba->work_hs |= UNPLUG_ERR; + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { phba->work_status[0] = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); @@ -8496,7 +8581,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) * This function is called from timer soft interrupt context to check HBA's * error attention register bit for error attention events. * - * This fucntion returns 1 when there is Error Attention in the Host Attention + * This function returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ int @@ -8639,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) return IRQ_NONE; /* Need to read HA REG for slow-path events */ spin_lock_irqsave(&phba->hbalock, iflag); - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + goto unplug_error; /* If somebody is waiting to handle an eratt don't process it * here. The brdkill function will do this. */ @@ -8665,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) } /* Clear up only attention source related to slow-path */ - hc_copy = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &hc_copy)) + goto unplug_error; + writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), phba->HCregaddr); @@ -8688,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) */ spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag &= ~LPFC_PROCESS_LA; - control = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &control)) + goto unplug_error; control &= ~HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -8708,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) status >>= (4*LPFC_ELS_RING); if (status & HA_RXMASK) { spin_lock_irqsave(&phba->hbalock, iflag); - control = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &control)) + goto unplug_error; lpfc_debugfs_slow_ring_trc(phba, "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", @@ -8741,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) } spin_lock_irqsave(&phba->hbalock, iflag); if (work_ha_copy & HA_ERATT) { - lpfc_sli_read_hs(phba); + if (lpfc_sli_read_hs(phba)) + goto unplug_error; /* * Check if there is a deferred error condition * is active @@ -8872,6 +8963,9 @@ send_current_mbox: lpfc_worker_wake_up(phba); } return IRQ_HANDLED; +unplug_error: + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_HANDLED; } /* lpfc_sli_sp_intr_handler */ @@ -8919,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) if (lpfc_intr_state_check(phba)) return IRQ_NONE; /* Need to read HA REG for FCP ring and other ring events */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return IRQ_HANDLED; /* Clear up only attention source related to fast-path */ spin_lock_irqsave(&phba->hbalock, iflag); /* @@ -9004,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id) return IRQ_NONE; spin_lock(&phba->hbalock); - phba->ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { + spin_unlock(&phba->hbalock); + return IRQ_HANDLED; + } + if (unlikely(!phba->ha_copy)) { spin_unlock(&phba->hbalock); return IRQ_NONE; @@ -9026,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id) } /* Clear attention sources except link and error attentions */ - hc_copy = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &hc_copy)) { + spin_unlock(&phba->hbalock); + return IRQ_HANDLED; + } writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), phba->HCregaddr); @@ -9582,7 +9684,7 @@ out: * @cq: Pointer to the completion queue. * @wcqe: Pointer to a completion queue entry. * - * This routine process a slow-path work-queue or recieve queue completion queue + * This routine process a slow-path work-queue or receive queue completion queue * entry. * * Return: true if work posted to worker thread, otherwise false. @@ -10403,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -10413,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, LPFC_MBOX_OPCODE_CQ_CREATE, length, LPFC_SLI4_MBX_EMBED); cq_create = &mbox->u.mqe.un.cq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, cq->page_count); bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); - bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.cqv); + if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { + bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, + eq->queue_id); + } else { + bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, + eq->queue_id); + } switch (cq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -10449,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -10515,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); switch (mq->entry_count) { case 16: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_16); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_16); break; case 32: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_32); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_32); break; case 64: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_64); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_64); break; case 128: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_128); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_128); break; } list_for_each_entry(dmabuf, &mq->page_list, list) { @@ -10586,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, length, LPFC_SLI4_MBX_EMBED); mq_create_ext = &mbox->u.mqe.un.mq_create_ext; + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request, mq->page_count); bf_set(lpfc_mbx_mq_create_ext_async_evt_link, @@ -10598,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, &mq_create_ext->u.request, 1); bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, &mq_create_ext->u.request, 1); - bf_set(lpfc_mq_context_cq_id, - &mq_create_ext->u.request.context, cq->queue_id); bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.mqv); + if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) + bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, + cq->queue_id); + else + bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, + cq->queue_id); switch (mq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -10610,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, return -EINVAL; /* otherwise default to smallest count (drop through) */ case 16: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_16); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_16); break; case 32: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_32); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_32); break; case 64: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_64); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_64); break; case 128: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_128); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_128); break; } list_for_each_entry(dmabuf, &mq->page_list, list) { @@ -10634,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, putPaddrHigh(dmabuf->phys); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create_ext->u.response); if (rc != MBX_SUCCESS) { @@ -10711,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + struct dma_address *page; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; @@ -10724,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, length, LPFC_SLI4_MBX_EMBED); wq_create = &mbox->u.mqe.un.wq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, wq->page_count); bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, cq->queue_id); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.wqv); + if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, + wq->entry_count); + switch (wq->entry_size) { + default: + case 64: + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_64); + break; + case 128: + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_128); + break; + } + bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + page = wq_create->u.request_1.page; + } else { + page = wq_create->u.request.page; + } list_for_each_entry(dmabuf, &wq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); - wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = - putPaddrLow(dmabuf->phys); - wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = - putPaddrHigh(dmabuf->phys); + page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); + page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -10815,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_EMBED); rq_create = &mbox->u.mqe.un.rq_create; - switch (hrq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2535 Unsupported RQ count. (%d)\n", - hrq->entry_count); - if (hrq->entry_count < 512) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 512: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_512); - break; - case 1024: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_1024); - break; - case 2048: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_2048); - break; - case 4096: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_4096); - break; + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.rqv); + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; + } else { + switch (hrq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2535 Unsupported RQ count. (%d)\n", + hrq->entry_count); + if (hrq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_HDR_BUF_SIZE); } bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, cq->queue_id); bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, hrq->page_count); - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, - LPFC_HDR_BUF_SIZE); list_for_each_entry(dmabuf, &hrq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = @@ -10855,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -10881,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_EMBED); - switch (drq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2536 Unsupported RQ count. (%d)\n", - drq->entry_count); - if (drq->entry_count < 512) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 512: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_512); - break; - case 1024: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_1024); - break; - case 2048: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_2048); - break; - case 4096: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_4096); - break; + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.rqv); + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; + } else { + switch (drq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2536 Unsupported RQ count. (%d)\n", + drq->entry_count); + if (drq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_DATA_BUF_SIZE); } bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, cq->queue_id); bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, drq->page_count); - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, - LPFC_DATA_BUF_SIZE); list_for_each_entry(dmabuf, &drq->page_list, list) { rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); @@ -11580,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) static char *rctl_names[] = FC_RCTL_NAMES_INIT; char *type_names[] = FC_TYPE_NAMES_INIT; struct fc_vft_header *fc_vft_hdr; + uint32_t *header = (uint32_t *) fc_hdr; switch (fc_hdr->fh_r_ctl) { case FC_RCTL_DD_UNCAT: /* uncategorized information */ @@ -11628,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) default: goto drop; } + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s\n", + "2538 Received frame rctl:%s type:%s " + "Frame Data:%08x %08x %08x %08x %08x %08x\n", rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type]); + type_names[fc_hdr->fh_type], + be32_to_cpu(header[0]), be32_to_cpu(header[1]), + be32_to_cpu(header[2]), be32_to_cpu(header[3]), + be32_to_cpu(header[4]), be32_to_cpu(header[5])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, @@ -11928,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, } /** - * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler + * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler * @phba: Pointer to HBA context object. * @cmd_iocbq: pointer to the command iocbq structure. * @rsp_iocbq: pointer to the response iocbq structure. * - * This function handles the sequence abort accept iocb command complete + * This function handles the sequence abort response iocb command complete * event. It properly releases the memory allocated to the sequence abort * accept iocb. **/ static void -lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, +lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmd_iocbq, struct lpfc_iocbq *rsp_iocbq) { @@ -11947,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, } /** - * lpfc_sli4_seq_abort_acc - Accept sequence abort + * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort * @phba: Pointer to HBA context object. * @fc_hdr: pointer to a FC frame header. * - * This function sends a basic accept to a previous unsol sequence abort + * This function sends a basic response to a previous unsol sequence abort * event after aborting the sequence handling. **/ static void -lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, +lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) { struct lpfc_iocbq *ctiocb = NULL; @@ -11963,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, uint16_t oxid, rxid; uint32_t sid, fctl; IOCB_t *icmd; + int rc; if (!lpfc_is_link_up(phba)) return; @@ -11983,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, + phba->sli4_hba.max_cfg_param.xri_base)) lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); - /* Allocate buffer for acc iocb */ + /* Allocate buffer for rsp iocb */ ctiocb = lpfc_sli_get_iocbq(phba); if (!ctiocb) return; @@ -12008,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, ctiocb->iocb_cmpl = NULL; ctiocb->vport = phba->pport; - ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; + ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; + ctiocb->sli4_xritag = NO_XRI; + + /* If the oxid maps to the FCP XRI range or if it is out of range, + * send a BLS_RJT. The driver no longer has that exchange. + * Override the IOCB for a BA_RJT. + */ + if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + + phba->sli4_hba.max_cfg_param.xri_base) || + oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + + phba->sli4_hba.max_cfg_param.xri_base)) { + icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; + bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); + bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); + bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); + } if (fctl & FC_FC_EX_CTX) { /* ABTS sent by responder to CT exchange, construction * of BA_ACC will use OX_ID from ABTS for the XRI_TAG * field and RX_ID from ABTS for RX_ID field. */ - bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); - bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); - ctiocb->sli4_xritag = oxid; + bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); } else { /* ABTS sent by initiator to CT exchange, construction * of BA_ACC will need to allocate a new XRI as for the * XRI_TAG and RX_ID fields. */ - bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); - bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); - ctiocb->sli4_xritag = NO_XRI; + bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); } - bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); + bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); - /* Xmit CT abts accept on exchange <xid> */ + /* Xmit CT abts response on exchange <xid> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", - CMD_XMIT_BLS_RSP64_CX, phba->link_state); - lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); + "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", + icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "2925 Failed to issue CT ABTS RSP x%x on " + "xri x%x, Data x%x\n", + icmd->un.xseq64.w5.hcsw.Rctl, oxid, + phba->link_state); + lpfc_sli_release_iocbq(phba, ctiocb); + } } /** @@ -12081,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, lpfc_in_buf_free(phba, &dmabuf->dbuf); } /* Send basic accept (BA_ACC) to the abort requester */ - lpfc_sli4_seq_abort_acc(phba, &fc_hdr); + lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); } /** @@ -12772,7 +12971,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, * record and processing it one at a time starting from the @fcf_index * for initial FCF discovery or fast FCF failover rediscovery. * - * Return 0 if the mailbox command is submitted sucessfully, none 0 + * Return 0 if the mailbox command is submitted successfully, none 0 * otherwise. **/ int @@ -12833,7 +13032,7 @@ fail_fcf_scan: * This routine is invoked to read an FCF record indicated by @fcf_index * and to use it for FLOGI roundrobin FCF failover. * - * Return 0 if the mailbox command is submitted sucessfully, none 0 + * Return 0 if the mailbox command is submitted successfully, none 0 * otherwise. **/ int @@ -12879,7 +13078,7 @@ fail_fcf_read: * This routine is invoked to read an FCF record indicated by @fcf_index to * determine whether it's eligible for FLOGI roundrobin failover list. * - * Return 0 if the mailbox command is submitted sucessfully, none 0 + * Return 0 if the mailbox command is submitted successfully, none 0 * otherwise. **/ int |