diff options
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 9 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 190 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 13 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 86 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 19 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 72 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 16 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mem.c | 5 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 35 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 85 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 112 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.h | 22 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 |
14 files changed, 460 insertions, 208 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index f81691fcf17..d44f9aac6b8 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -21,10 +21,12 @@ struct lpfc_sli2_slim; -#define LPFC_MAX_TARGET 256 /* max targets supported */ -#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */ -#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */ +#define LPFC_MAX_TARGET 256 /* max number of targets supported */ +#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els + requests */ +#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact + the NameServer before giving up. */ #define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */ #define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */ #define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */ @@ -41,7 +43,6 @@ struct lpfc_sli2_slim; (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) /* Provide maximum configuration definitions. */ #define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ -#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */ #define FC_MAX_ADPTMSG 64 #define MAX_HBAEVT 32 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index b62a72dfab2..d384c16f4a8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -219,8 +219,18 @@ lpfc_issue_lip(struct Scsi_Host *host) return -ENOMEM; memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); - mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + pmboxq->mb.mbxCommand = MBX_DOWN_LINK; + pmboxq->mb.mbxOwner = OWN_HOST; + + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); + + if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { + memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); + lpfc_init_link(phba, pmboxq, phba->cfg_topology, + phba->cfg_link_speed); + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, + phba->fc_ratov * 2); + } if (mbxstatus == MBX_TIMEOUT) pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -233,51 +243,53 @@ lpfc_issue_lip(struct Scsi_Host *host) return 0; } -static ssize_t -lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) +static int +lpfc_selective_reset(struct lpfc_hba *phba) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); + struct completion online_compl; + int status = 0; + + init_completion(&online_compl); + lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_OFFLINE); + wait_for_completion(&online_compl); + + if (status != 0) + return -EIO; + + init_completion(&online_compl); + lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_ONLINE); + wait_for_completion(&online_compl); + + if (status != 0) + return -EIO; + + return 0; } static ssize_t -lpfc_board_online_show(struct class_device *cdev, char *buf) +lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count) { struct Scsi_Host *host = class_to_shost(cdev); struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + int status = -EINVAL; - if (phba->fc_flag & FC_OFFLINE_MODE) - return snprintf(buf, PAGE_SIZE, "0\n"); + if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) + status = lpfc_selective_reset(phba); + + if (status == 0) + return strlen(buf); else - return snprintf(buf, PAGE_SIZE, "1\n"); + return status; } static ssize_t -lpfc_board_online_store(struct class_device *cdev, const char *buf, - size_t count) +lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) { struct Scsi_Host *host = class_to_shost(cdev); struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; - struct completion online_compl; - int val=0, status=0; - - if (sscanf(buf, "%d", &val) != 1) - return -EINVAL; - - init_completion(&online_compl); - - if (val) - lpfc_workq_post_event(phba, &status, &online_compl, - LPFC_EVT_ONLINE); - else - lpfc_workq_post_event(phba, &status, &online_compl, - LPFC_EVT_OFFLINE); - wait_for_completion(&online_compl); - if (!status) - return strlen(buf); - else - return -EIO; + return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); } static ssize_t @@ -532,10 +544,9 @@ static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, NULL); -static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR, - lpfc_board_online_show, lpfc_board_online_store); static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); +static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); static int lpfc_poll = 0; module_param(lpfc_poll, int, 0); @@ -695,12 +706,12 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " "during discovery"); /* -# lpfc_max_luns: maximum number of LUNs per target driver will support -# Value range is [1,32768]. Default value is 256. -# NOTE: The SCSI layer will scan each target for this many luns +# lpfc_max_luns: maximum allowed LUN. +# Value range is [0,65535]. Default value is 255. +# NOTE: The SCSI layer might probe all allowed LUN on some old targets. */ -LPFC_ATTR_R(max_luns, 256, 1, 32768, - "Maximum number of LUNs per target driver will support"); +LPFC_ATTR_R(max_luns, 255, 0, 65535, + "Maximum allowed LUN"); /* # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. @@ -739,8 +750,8 @@ struct class_device_attribute *lpfc_host_attrs[] = { &class_device_attr_lpfc_max_luns, &class_device_attr_nport_evt_cnt, &class_device_attr_management_version, - &class_device_attr_board_online, &class_device_attr_board_mode, + &class_device_attr_issue_reset, &class_device_attr_lpfc_poll, &class_device_attr_lpfc_poll_tmo, NULL, @@ -873,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count) phba->sysfs_mbox.mbox == NULL ) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EINVAL; + return -EAGAIN; } } @@ -989,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) spin_unlock_irq(phba->host->host_lock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, - phba->fc_ratov * 2); + lpfc_mbox_tmo_val(phba, + phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); spin_lock_irq(phba->host->host_lock); } if (rc != MBX_SUCCESS) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -ENODEV; + return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; } phba->sysfs_mbox.state = SMBOX_READING; } @@ -1005,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) printk(KERN_WARNING "mbox_read: Bad State\n"); sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EINVAL; + return -EAGAIN; } memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); @@ -1199,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost) struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; struct lpfc_sli *psli = &phba->sli; struct fc_host_statistics *hs = &phba->link_stats; + struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; + unsigned long seconds; int rc = 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -1261,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; hs->error_frames = pmb->un.varRdLnk.crcCnt; + hs->link_failure_count -= lso->link_failure_count; + hs->loss_of_sync_count -= lso->loss_of_sync_count; + hs->loss_of_signal_count -= lso->loss_of_signal_count; + hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; + hs->invalid_tx_word_count -= lso->invalid_tx_word_count; + hs->invalid_crc_count -= lso->invalid_crc_count; + hs->error_frames -= lso->error_frames; + if (phba->fc_topology == TOPOLOGY_LOOP) { hs->lip_count = (phba->fc_eventTag >> 1); + hs->lip_count -= lso->link_events; hs->nos_count = -1; } else { hs->lip_count = -1; hs->nos_count = (phba->fc_eventTag >> 1); + hs->nos_count -= lso->link_events; } hs->dumped_frames = -1; -/* FIX ME */ - /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ + seconds = get_seconds(); + if (seconds < psli->stats_start) + hs->seconds_since_last_reset = seconds + + ((unsigned long)-1 - psli->stats_start); + else + hs->seconds_since_last_reset = seconds - psli->stats_start; return hs; } +static void +lpfc_reset_stats(struct Scsi_Host *shost) +{ + struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; + LPFC_MBOXQ_t *pmboxq; + MAILBOX_t *pmb; + int rc = 0; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return; + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + + pmb = &pmboxq->mb; + pmb->mbxCommand = MBX_READ_STATUS; + pmb->mbxOwner = OWN_HOST; + pmb->un.varWords[0] = 0x1; /* reset request */ + pmboxq->context1 = NULL; + + if ((phba->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc == MBX_TIMEOUT) + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + else + mempool_free(pmboxq, phba->mbox_mem_pool); + return; + } + + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + pmb->mbxCommand = MBX_READ_LNK_STAT; + pmb->mbxOwner = OWN_HOST; + pmboxq->context1 = NULL; + + if ((phba->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc == MBX_TIMEOUT) + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + else + mempool_free( pmboxq, phba->mbox_mem_pool); + return; + } + + lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; + lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; + lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; + lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; + lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; + lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; + lso->error_frames = pmb->un.varRdLnk.crcCnt; + lso->link_events = (phba->fc_eventTag >> 1); + + psli->stats_start = get_seconds(); + + return; +} /* * The LPFC driver treats linkdown handling as target loss events so there @@ -1420,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = { */ .get_fc_host_stats = lpfc_get_stats, - - /* the LPFC driver doesn't support resetting stats yet */ + .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index ee22173fce4..2a176467f71 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); +int lpfc_mbox_tmo_val(struct lpfc_hba *, int); int lpfc_mem_alloc(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); @@ -147,6 +148,7 @@ int lpfc_sli_hba_setup(struct lpfc_hba *); int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_handle_mb_event(struct lpfc_hba *); +int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index b65ee57af53..bbb7310210b 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba, } ct_unsol_event_exit_piocbq: + list_del(&head); if (pmbuf) { list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { lpfc_mbuf_free(phba, matp->virt, matp->phys); @@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (CTrsp->CommandResponse.bits.CmdRsp == be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0239 NameServer Rsp " + "%d:0208 NameServer Rsp " "Data: x%x\n", phba->brd_no, phba->fc_flag); @@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) lpfc_decode_firmware_rev(phba, fwrev, 0); - if (phba->Port[0]) { - sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, - phba->Port, fwrev, lpfc_release_version); - } else { - sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, - fwrev, lpfc_release_version); - } + sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, + fwrev, lpfc_release_version); + return; } /* diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4126fd87956..3567de61316 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -648,33 +648,32 @@ lpfc_more_plogi(struct lpfc_hba * phba) } static struct lpfc_nodelist * -lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, +lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp, struct lpfc_nodelist *ndlp) { struct lpfc_nodelist *new_ndlp; - struct lpfc_dmabuf *pcmd, *prsp; uint32_t *lp; struct serv_parm *sp; uint8_t name[sizeof (struct lpfc_name)]; uint32_t rc; - pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; - prsp = (struct lpfc_dmabuf *) pcmd->list.next; lp = (uint32_t *) prsp->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); + memset(name, 0, sizeof (struct lpfc_name)); /* Now we to find out if the NPort we are logging into, matches the WWPN * we have for that ndlp. If not, we have some work to do. */ new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); - memset(name, 0, sizeof (struct lpfc_name)); - rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); - if (!rc || (new_ndlp == ndlp)) { + if (new_ndlp == ndlp) return ndlp; - } if (!new_ndlp) { + rc = + memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); + if (!rc) + return ndlp; new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); if (!new_ndlp) return ndlp; @@ -683,17 +682,21 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, } lpfc_unreg_rpi(phba, new_ndlp); - new_ndlp->nlp_prev_state = ndlp->nlp_state; new_ndlp->nlp_DID = ndlp->nlp_DID; - new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; - lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST); + new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; + new_ndlp->nlp_state = ndlp->nlp_state; + lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK); /* Move this back to NPR list */ - lpfc_unreg_rpi(phba, ndlp); - ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ - ndlp->nlp_state = NLP_STE_NPR_NODE; - lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); - + if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + } + else { + lpfc_unreg_rpi(phba, ndlp); + ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ + ndlp->nlp_state = NLP_STE_NPR_NODE; + lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); + } return new_ndlp; } @@ -703,6 +706,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, { IOCB_t *irsp; struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *prsp; int disc, rc, did, type; @@ -769,7 +773,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, } } else { /* Good status, call state machine */ - ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp); + prsp = list_entry(((struct lpfc_dmabuf *) + cmdiocb->context2)->list.next, + struct lpfc_dmabuf, list); + ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp); rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PLOGI); } @@ -1841,9 +1848,12 @@ static void lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb) { + IOCB_t *irsp; struct lpfc_nodelist *ndlp; LPFC_MBOXQ_t *mbox = NULL; + irsp = &rspiocb->iocb; + ndlp = (struct lpfc_nodelist *) cmdiocb->context1; if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; @@ -1886,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, mempool_free( mbox, phba->mbox_mem_pool); } else { mempool_free( mbox, phba->mbox_mem_pool); - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - ndlp = NULL; + /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ + if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || + (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || + (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { + if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + ndlp = NULL; + } } } } @@ -2832,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* Xmit ELS RPS ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0128 Xmit ELS RPS ACC response tag x%x " + "%d:0118 Xmit ELS RPS ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -2941,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0128 Xmit ELS RPL ACC response tag x%x " + "%d:0120 Xmit ELS RPL ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -3102,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist *ndlp, *next_ndlp; /* FAN received */ - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", phba->brd_no); icmd = &cmdiocb->iocb; @@ -3282,10 +3298,9 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba) } else lpfc_sli_release_iocbq(phba, piocb); } - if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) { - phba->els_tmofunc.expires = jiffies + HZ * timeout; - add_timer(&phba->els_tmofunc); - } + if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) + mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout); + spin_unlock_irq(phba->host->host_lock); } @@ -3442,6 +3457,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { ndlp->nlp_type |= NLP_FABRIC; } + ndlp->nlp_state = NLP_STE_UNUSED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); } phba->fc_stat.elsRcvFrame++; @@ -3463,13 +3480,14 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, rjt_err = 1; break; } + ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp); lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); break; case ELS_CMD_FLOGI: phba->fc_stat.elsRcvFLOGI++; lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_LOGO: @@ -3492,7 +3510,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, phba->fc_stat.elsRcvRSCN++; lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_ADISC: @@ -3535,28 +3553,28 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, phba->fc_stat.elsRcvLIRR++; lpfc_els_rcv_lirr(phba, elsiocb, ndlp); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_RPS: phba->fc_stat.elsRcvRPS++; lpfc_els_rcv_rps(phba, elsiocb, ndlp); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_RPL: phba->fc_stat.elsRcvRPL++; lpfc_els_rcv_rpl(phba, elsiocb, ndlp); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_RNID: phba->fc_stat.elsRcvRNID++; lpfc_els_rcv_rnid(phba, elsiocb, ndlp); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; default: @@ -3568,7 +3586,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, "%d:0115 Unknown ELS command x%x received from " "NPORT x%x\n", phba->brd_no, cmd, did); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index adb086009ae..b2f1552f184 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1084,7 +1084,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba, fc_remote_port_rolechg(rport, rport_ids.roles); if ((rport->scsi_target_id != -1) && - (rport->scsi_target_id < MAX_FCP_TARGET)) { + (rport->scsi_target_id < LPFC_MAX_TARGET)) { ndlp->nlp_sid = rport->scsi_target_id; } @@ -1313,7 +1313,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) if ((rport_add == mapped) && ((!nlp->rport) || (nlp->rport->scsi_target_id == -1) || - (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) { + (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) { nlp->nlp_state = NLP_STE_UNMAPPED_NODE; spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= NLP_TGT_NO_SCSIID; @@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } + + spin_lock_irq(phba->host->host_lock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { @@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) mempool_free(mb, phba->mbox_mem_pool); } } + spin_unlock_irq(phba->host->host_lock); lpfc_els_abort(phba,ndlp,0); spin_lock_irq(phba->host->host_lock); @@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to REGLOGIN */ /* FIND node DID reglogin */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID reglogin" + "%d:0901 FIND node DID reglogin" " Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to PRLI */ /* FIND node DID prli */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID prli " + "%d:0902 FIND node DID prli " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to NPR */ /* FIND node DID npr */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID npr " + "%d:0903 FIND node DID npr " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to UNUSED */ /* FIND node DID unused */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID unused " + "%d:0905 FIND node DID unused " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0226 Device Discovery " + "%d:0206 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; @@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) if (!clearlambox) { clrlaerr = 1; lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0226 Device Discovery " + "%d:0207 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 81755a3f7c6..f6948ffe689 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -71,6 +71,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba) uint16_t offset = 0; static char licensed[56] = "key unlock for use with gnu public licensed code only\0"; + static int init_key = 1; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { @@ -82,10 +83,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba) phba->hba_state = LPFC_INIT_MBX_CMDS; if (lpfc_is_LC_HBA(phba->pcidev->device)) { - uint32_t *ptext = (uint32_t *) licensed; + if (init_key) { + uint32_t *ptext = (uint32_t *) licensed; - for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) - *ptext = cpu_to_be32(*ptext); + for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) + *ptext = cpu_to_be32(*ptext); + init_key = 0; + } lpfc_read_nv(phba, pmb); memset((char*)mb->un.varRDnvp.rsvd3, 0, @@ -405,19 +409,26 @@ lpfc_config_port_post(struct lpfc_hba * phba) } /* MBOX buffer will be freed in mbox compl */ - i = 0; + return (0); +} + +static int +lpfc_discovery_wait(struct lpfc_hba *phba) +{ + int i = 0; + while ((phba->hba_state != LPFC_HBA_READY) || (phba->num_disc_nodes) || (phba->fc_prli_sent) || ((phba->fc_map_cnt == 0) && (i<2)) || - (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { + (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) { /* Check every second for 30 retries. */ i++; if (i > 30) { - break; + return -ETIMEDOUT; } if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { /* The link is down. Set linkdown timeout */ - break; + return -ETIMEDOUT; } /* Delay for 1 second to give discovery time to complete. */ @@ -425,12 +436,7 @@ lpfc_config_port_post(struct lpfc_hba * phba) } - /* Since num_disc_nodes keys off of PLOGI, delay a bit to let - * any potential PRLIs to flush thru the SLI sub-system. - */ - msleep(50); - - return (0); + return 0; } /************************************************************************/ @@ -1339,7 +1345,8 @@ lpfc_offline(struct lpfc_hba * phba) struct lpfc_sli_ring *pring; struct lpfc_sli *psli; unsigned long iflag; - int i = 0; + int i; + int cnt = 0; if (!phba) return 0; @@ -1348,20 +1355,31 @@ lpfc_offline(struct lpfc_hba * phba) return 0; psli = &phba->sli; - pring = &psli->ring[psli->fcp_ring]; lpfc_linkdown(phba); + lpfc_sli_flush_mbox_queue(phba); - /* The linkdown event takes 30 seconds to timeout. */ - while (pring->txcmplq_cnt) { - mdelay(10); - if (i++ > 3000) - break; + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->ring[i]; + /* The linkdown event takes 30 seconds to timeout. */ + while (pring->txcmplq_cnt) { + mdelay(10); + if (cnt++ > 3000) { + lpfc_printf_log(phba, + KERN_WARNING, LOG_INIT, + "%d:0466 Outstanding IO when " + "bringing Adapter offline\n", + phba->brd_no); + break; + } + } } + /* stop all timers associated with this hba */ lpfc_stop_timer(phba); phba->work_hba_events = 0; + phba->work_ha = 0; lpfc_printf_log(phba, KERN_WARNING, @@ -1599,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_iocbq; } - /* We can rely on a queue depth attribute only after SLI HBA setup */ + /* + * Set initial can_queue value since 0 is no longer supported and + * scsi_add_host will fail. This will be adjusted later based on the + * max xri value determined in hba setup. + */ host->can_queue = phba->cfg_hba_queue_depth - 10; /* Tell the midlayer we support 16 byte commands */ @@ -1639,6 +1661,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_irq; } + /* + * hba setup may have changed the hba_queue_depth so we need to adjust + * the value of can_queue. + */ + host->can_queue = phba->cfg_hba_queue_depth - 10; + + lpfc_discovery_wait(phba); + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { spin_lock_irq(phba->host->host_lock); lpfc_poll_start_timer(phba); diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index e42f22aaf71..4d016c2a1b2 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba) return mbq; } + +int +lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) +{ + switch (cmd) { + case MBX_WRITE_NV: /* 0x03 */ + case MBX_UPDATE_CFG: /* 0x1B */ + case MBX_DOWN_LOAD: /* 0x1C */ + case MBX_DEL_LD_ENTRY: /* 0x1D */ + case MBX_LOAD_AREA: /* 0x81 */ + case MBX_FLASH_WR_ULA: /* 0x98 */ + case MBX_LOAD_EXP_ROM: /* 0x9C */ + return LPFC_MBOX_TMO_FLASH_CMD; + } + return LPFC_MBOX_TMO; +} diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 07017658ac5..066292d3995 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -133,6 +133,11 @@ lpfc_mem_free(struct lpfc_hba * phba) pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); pci_pool_destroy(phba->lpfc_mbuf_pool); + + /* Free the iocb lookup array */ + kfree(psli->iocbq_lookup); + psli->iocbq_lookup = NULL; + } void * diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 27d60ad897c..20449a8dd53 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0201 Abort outstanding I/O on NPort x%x " + "%d:0205 Abort outstanding I/O on NPort x%x " "Data: x%x x%x x%x\n", phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, mbox->context2 = ndlp; ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); + /* + * If there is an outstanding PLOGI issued, abort it before + * sending ACC rsp for received PLOGI. If pending plogi + * is not canceled here, the plogi will be rejected by + * remote port and will be retried. On a configuration with + * single discovery thread, this will cause a huge delay in + * discovery. Also this will cause multiple state machines + * running in parallel for this node. + */ + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp, 1); + } + lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); return 1; @@ -1110,6 +1124,17 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba, phba->brd_no, did, mb->mbxStatus, phba->hba_state); + /* + * If RegLogin failed due to lack of HBA resources do not + * retry discovery. + */ + if (mb->mbxStatus == MBXERR_RPI_FULL) { + ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; + ndlp->nlp_state = NLP_STE_UNUSED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); + return ndlp->nlp_state; + } + /* Put ndlp in npr list set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(phba->host->host_lock); @@ -1590,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, lpfc_rcv_padisc(phba, ndlp, cmdiocb); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + /* + * Do not start discovery if discovery is about to start + * or discovery in progress for this node. Starting discovery + * here will affect the counting of discovery threads. + */ + if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) && + (ndlp->nlp_flag & NLP_NPR_2B_DISC)){ if (ndlp->nlp_flag & NLP_NPR_ADISC) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; ndlp->nlp_state = NLP_STE_ADISC_ISSUE; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index aea1ee472f3..a8816a8738f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -21,6 +21,7 @@ #include <linux/pci.h> #include <linux/interrupt.h> +#include <linux/delay.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -153,22 +154,6 @@ static void lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) { unsigned long iflag = 0; - /* - * There are only two special cases to consider. (1) the scsi command - * requested scatter-gather usage or (2) the scsi command allocated - * a request buffer, but did not request use_sg. There is a third - * case, but it does not require resource deallocation. - */ - if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { - dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, - psb->seg_cnt, psb->pCmd->sc_data_direction); - } else { - if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { - dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, - psb->pCmd->request_bufflen, - psb->pCmd->sc_data_direction); - } - } spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); psb->pCmd = NULL; @@ -282,6 +267,27 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) } static void +lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) +{ + /* + * There are only two special cases to consider. (1) the scsi command + * requested scatter-gather usage or (2) the scsi command allocated + * a request buffer, but did not request use_sg. There is a third + * case, but it does not require resource deallocation. + */ + if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { + dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, + psb->seg_cnt, psb->pCmd->sc_data_direction); + } else { + if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { + dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, + psb->pCmd->request_bufflen, + psb->pCmd->sc_data_direction); + } + } +} + +static void lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) { struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; @@ -454,6 +460,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, cmd->scsi_done(cmd); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); return; } @@ -511,6 +518,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } } + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); } @@ -609,6 +617,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, static int lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, + unsigned int lun, uint8_t task_mgmt_cmd) { struct lpfc_sli *psli; @@ -627,8 +636,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, piocb = &piocbq->iocb; fcp_cmnd = lpfc_cmd->fcp_cmnd; - int_to_scsilun(lpfc_cmd->pCmd->device->lun, - &lpfc_cmd->fcp_cmnd->fcp_lun); + int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); fcp_cmnd->fcpCntl2 = task_mgmt_cmd; piocb->ulpCommand = CMD_FCP_ICMND64_CR; @@ -655,14 +663,16 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, static int lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, - unsigned tgt_id, struct lpfc_rport_data *rdata) + unsigned tgt_id, unsigned int lun, + struct lpfc_rport_data *rdata) { struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; int ret; lpfc_cmd->rdata = rdata; - ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); + ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, + FCP_TARGET_RESET); if (!ret) return FAILED; @@ -822,6 +832,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) return 0; out_host_busy_free_buf: + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -831,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) return 0; } +static void +lpfc_block_error_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + + spin_lock_irq(shost->host_lock); + while (rport->port_state == FC_PORTSTATE_BLOCKED) { + spin_unlock_irq(shost->host_lock); + msleep(1000); + spin_lock_irq(shost->host_lock); + } + spin_unlock_irq(shost->host_lock); + return; +} static int lpfc_abort_handler(struct scsi_cmnd *cmnd) @@ -845,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) unsigned int loop_count = 0; int ret = SUCCESS; + lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; @@ -947,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) int ret = FAILED; int cnt, loopcnt; + lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); /* * If target is not in a MAPPED state, delay the reset until @@ -969,12 +997,12 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) if (lpfc_cmd == NULL) goto out; - lpfc_cmd->pCmd = cmnd; lpfc_cmd->timeout = 60; lpfc_cmd->scsi_hba = phba; lpfc_cmd->rdata = rdata; - ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); + ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, + FCP_LUN_RESET); if (!ret) goto out_free_scsi_buf; @@ -1001,7 +1029,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) cmd_status = iocbqrsp->iocb.ulpStatus; lpfc_sli_release_iocbq(phba, iocbqrsp); - lpfc_release_scsi_buf(phba, lpfc_cmd); /* * All outstanding txcmplq I/Os should have been aborted by the device. @@ -1040,6 +1067,8 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) } out_free_scsi_buf: + lpfc_release_scsi_buf(phba, lpfc_cmd); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0713 SCSI layer issued LUN reset (%d, %d) " "Data: x%x x%x x%x\n", @@ -1062,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) int cnt, loopcnt; struct lpfc_scsi_buf * lpfc_cmd; + lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = lpfc_get_scsi_buf(phba); @@ -1070,7 +1100,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) /* The lpfc_cmd storage is reused. Set all loop invariants. */ lpfc_cmd->timeout = 60; - lpfc_cmd->pCmd = cmnd; lpfc_cmd->scsi_hba = phba; /* @@ -1078,7 +1107,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) * targets known to the driver. Should any target reset * fail, this routine returns failure to the midlayer. */ - for (i = 0; i < MAX_FCP_TARGET; i++) { + for (i = 0; i < LPFC_MAX_TARGET; i++) { /* Search the mapped list for this target ID */ match = 0; list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { @@ -1090,11 +1119,11 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) if (!match) continue; - ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, - i, ndlp->rport->dd_data); + ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, + ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0713 Bus Reset on target %d failed\n", + "%d:0700 Bus Reset on target %d failed\n", phba->brd_no, i); err_count++; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index bb69a7a1ec5..70f4d5a1348 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -191,35 +191,12 @@ static int lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) { - uint16_t iotag; - list_add_tail(&piocb->list, &pring->txcmplq); pring->txcmplq_cnt++; if (unlikely(pring->ringno == LPFC_ELS_RING)) mod_timer(&phba->els_tmofunc, jiffies + HZ * (phba->fc_ratov << 1)); - if (pring->fast_lookup) { - /* Setup fast lookup based on iotag for completion */ - iotag = piocb->iocb.ulpIoTag; - if (iotag && (iotag < pring->fast_iotag)) - *(pring->fast_lookup + iotag) = piocb; - else { - - /* Cmd ring <ringno> put: iotag <iotag> greater then - configured max <fast_iotag> wd0 <icmd> */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_SLI, - "%d:0316 Cmd ring %d put: iotag x%x " - "greater then configured max x%x " - "wd0 x%x\n", - phba->brd_no, - pring->ringno, iotag, - pring->fast_iotag, - *(((uint32_t *)(&piocb->iocb)) + 7)); - } - } return (0); } @@ -343,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) kfree(old_arr); return iotag; } - } + } else + spin_unlock_irq(phba->host->host_lock); lpfc_printf_log(phba, KERN_ERR,LOG_SLI, "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", @@ -601,7 +579,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba) /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus <status> */ lpfc_printf_log(phba, - KERN_ERR, + KERN_WARNING, LOG_MBOX | LOG_SLI, "%d:0304 Stray Mailbox Interrupt " "mbxCommand x%x mbxStatus x%x\n", @@ -992,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - printk(KERN_INFO "%s: IOCB cmd 0x%x processed." - " Skipping completion\n", __FUNCTION__, - irsp->ulpCommand); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "%d:0314 IOCB cmd 0x%x" + " processed. Skipping" + " completion", phba->brd_no, + irsp->ulpCommand); break; } @@ -1127,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, if (unlikely(irsp->ulpStatus)) { /* Rsp ring <ringno> error: IOCB */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "%d:0326 Rsp Ring %d error: IOCB Data: " + "%d:0336 Rsp Ring %d error: IOCB Data: " "x%x x%x x%x x%x x%x x%x x%x x%x\n", phba->brd_no, pring->ringno, irsp->un.ulpWord[0], irsp->un.ulpWord[1], @@ -1145,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " - "Skipping completion\n", __FUNCTION__, - irsp->ulpCommand); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "%d:0333 IOCB cmd 0x%x" + " processed. Skipping" + " completion\n", phba->brd_no, + irsp->ulpCommand); break; } @@ -1178,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, } else { /* Unknown IOCB command */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0321 Unknown IOCB command " + "%d:0334 Unknown IOCB command " "Data: x%x, x%x x%x x%x x%x\n", phba->brd_no, type, irsp->ulpCommand, irsp->ulpStatus, irsp->ulpIoTag, @@ -1261,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0312 Ring %d handler: portRspPut %d " + "%d:0303 Ring %d handler: portRspPut %d " "is bigger then rsp ring %d\n", phba->brd_no, pring->ringno, portRspPut, portRspMax); @@ -1406,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0321 Unknown IOCB command " + "%d:0335 Unknown IOCB command " "Data: x%x x%x x%x x%x\n", phba->brd_no, irsp->ulpCommand, @@ -1422,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, next_iocb, &saveq->list, list) { + list_del(&rspiocbp->list); lpfc_sli_release_iocbq(phba, rspiocbp); } } - lpfc_sli_release_iocbq(phba, saveq); } } @@ -1570,8 +1552,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) void lpfc_reset_barrier(struct lpfc_hba * phba) { - uint32_t * resp_buf; - uint32_t * mbox_buf; + uint32_t __iomem *resp_buf; + uint32_t __iomem *mbox_buf; volatile uint32_t mbox; uint32_t hc_copy; int i; @@ -1587,7 +1569,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba) * Tell the other part of the chip to suspend temporarily all * its DMA activity. */ - resp_buf = (uint32_t *)phba->MBslimaddr; + resp_buf = phba->MBslimaddr; /* Disable the error attention */ hc_copy = readl(phba->HCregaddr); @@ -1605,7 +1587,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba) ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); - mbox_buf = (uint32_t *)phba->MBslimaddr; + mbox_buf = phba->MBslimaddr; writel(mbox, mbox_buf); for (i = 0; @@ -1734,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba) phba->fc_myDID = 0; phba->fc_prevDID = 0; - psli->sli_flag = 0; - /* Turn off parity checking and serr during the physical reset */ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); /* Now toggle INITFF bit in the Host Control Register */ writel(HC_INITFF, phba->HCregaddr); mdelay(1); @@ -1783,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) /* Restart HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, + "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, phba->hba_state, psli->sli_flag); word0 = 0; @@ -1805,7 +1785,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) skip_post = 0; word0 = 0; /* This is really setting up word1 */ } - to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t); + to_slim = phba->MBslimaddr + sizeof (uint32_t); writel(*(uint32_t *) mb, to_slim); readl(to_slim); /* flush */ @@ -1815,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) spin_unlock_irq(phba->host->host_lock); + memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); + psli->stats_start = get_seconds(); + if (skip_post) mdelay(100); else @@ -1925,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) } while (resetcount < 2 && !done) { + spin_lock_irq(phba->host->host_lock); + phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(phba->host->host_lock); phba->hba_state = LPFC_STATE_UNKNOWN; lpfc_sli_brdrestart(phba); msleep(2500); @@ -1932,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) if (rc) break; + spin_lock_irq(phba->host->host_lock); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(phba->host->host_lock); resetcount++; /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 @@ -2217,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) return (MBX_NOT_FINISHED); } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); + mod_timer(&psli->mbox_tmo, (jiffies + + (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); } /* Mailbox cmd <cmd> issue */ @@ -2277,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) break; case MBX_POLL: - i = 0; psli->mbox_active = NULL; if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First read mbox status word */ @@ -2291,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Read the HBA Host Attention Register */ ha_copy = readl(phba->HAregaddr); + i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); + i *= 1000; /* Convert to ms */ + /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || (!(ha_copy & HA_MBATT) && (phba->hba_state > LPFC_WARM_START))) { - if (i++ >= 100) { + if (i-- <= 0) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); @@ -2313,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Can be in interrupt context, do not sleep */ /* (or might be called with interrupts disabled) */ - mdelay(i); + mdelay(1); spin_lock_irqsave(phba->host->host_lock, drvr_flag); @@ -2659,8 +2651,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba) INIT_LIST_HEAD(&(pring->txq)); - kfree(pring->fast_lookup); - pring->fast_lookup = NULL; } spin_unlock_irqrestore(phba->host->host_lock, flags); @@ -3030,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, if (timeleft == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0329 IOCB wait timeout error - no " + "%d:0338 IOCB wait timeout error - no " "wake response Data x%x\n", phba->brd_no, timeout); retval = IOCB_TIMEDOUT; @@ -3110,6 +3100,24 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, return retval; } +int +lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) +{ + int i = 0; + + while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) { + if (i++ > LPFC_MBOX_TMO * 1000) + return 1; + + if (lpfc_sli_handle_mb_event(phba) == 0) + i = 0; + + msleep(1); + } + + return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; +} + irqreturn_t lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) { diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index a52d6c6cf08..e26de680935 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -135,8 +135,6 @@ struct lpfc_sli_ring { uint32_t fast_iotag; /* max fastlookup based iotag */ uint32_t iotag_ctr; /* keeps track of the next iotag to use */ uint32_t iotag_max; /* max iotag value to use */ - struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by - iotag */ struct list_head txq; uint16_t txq_cnt; /* current length of queue */ uint16_t txq_max; /* max length */ @@ -174,6 +172,18 @@ struct lpfc_sli_stat { uint32_t mbox_busy; /* Mailbox cmd busy */ }; +/* Structure to store link status values when port stats are reset */ +struct lpfc_lnk_stat { + uint32_t link_failure_count; + uint32_t loss_of_sync_count; + uint32_t loss_of_signal_count; + uint32_t prim_seq_protocol_err_count; + uint32_t invalid_tx_word_count; + uint32_t invalid_crc_count; + uint32_t error_frames; + uint32_t link_events; +}; + /* Structure used to hold SLI information */ struct lpfc_sli { uint32_t num_rings; @@ -203,6 +213,8 @@ struct lpfc_sli { struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ size_t iocbq_lookup_len; /* current lengs of the array */ uint16_t last_iotag; /* last allocated IOTAG */ + unsigned long stats_start; /* in seconds */ + struct lpfc_lnk_stat lnk_stat_offsets; }; /* Given a pointer to the start of the ring, and the slot number of @@ -213,3 +225,9 @@ struct lpfc_sli { #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ +#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write + * or erase cmds. This is especially + * long because of the potential of + * multiple flash erases that can be + * spawned. + */ diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 6b737568b83..c7091ea29f3 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.6" +#define LPFC_DRIVER_VERSION "8.1.9" #define LPFC_DRIVER_NAME "lpfc" |