diff options
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r-- | drivers/scsi/qla2xxx/qla_os.c | 228 |
1 files changed, 178 insertions, 50 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index b79fca7d461..209f50e788a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -11,6 +11,7 @@ #include <linux/delay.h> #include <linux/kthread.h> #include <linux/mutex.h> +#include <linux/kobject.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> @@ -77,21 +78,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to report for target devices."); -int ql2xqfulltracking = 1; -module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(ql2xqfulltracking, - "Controls whether the driver tracks queue full status " - "returns and dynamically adjusts a scsi device's queue " - "depth. Default is 1, perform tracking. Set to 0 to " - "disable dynamic tracking and adjustment of queue depth."); - -int ql2xqfullrampup = 120; -module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(ql2xqfullrampup, - "Number of seconds to wait to begin to ramp-up the queue " - "depth for a device after a queue-full condition has been " - "detected. Default is 120 seconds."); - int ql2xiidmaenable=1; module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); MODULE_PARM_DESC(ql2xiidmaenable, @@ -137,7 +123,7 @@ static int qla2xxx_eh_target_reset(struct scsi_cmnd *); static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); static int qla2xxx_eh_host_reset(struct scsi_cmnd *); -static int qla2x00_change_queue_depth(struct scsi_device *, int); +static int qla2x00_change_queue_depth(struct scsi_device *, int, int); static int qla2x00_change_queue_type(struct scsi_device *, int); struct scsi_host_template qla2xxx_driver_template = { @@ -489,11 +475,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) srb_t *sp; int rval; - if (unlikely(pci_channel_offline(ha->pdev))) { - if (ha->pdev->error_state == pci_channel_io_frozen) - cmd->result = DID_REQUEUE << 16; - else + if (ha->flags.eeh_busy) { + if (ha->flags.pci_channel_io_perm_failure) cmd->result = DID_NO_CONNECT << 16; + else + cmd->result = DID_REQUEUE << 16; goto qc24_fail_command; } @@ -566,8 +552,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) #define ABORT_POLLING_PERIOD 1000 #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) unsigned long wait_iter = ABORT_WAIT_ITER; + scsi_qla_host_t *vha = shost_priv(cmd->device->host); + struct qla_hw_data *ha = vha->hw; int ret = QLA_SUCCESS; + if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { + DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); + return ret; + } + while (CMD_SP(cmd) && wait_iter--) { msleep(ABORT_POLLING_PERIOD); } @@ -727,23 +720,6 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport) spin_unlock_irqrestore(&ha->hardware_lock, flags); } -static void -qla2x00_block_error_handler(struct scsi_cmnd *cmnd) -{ - struct Scsi_Host *shost = cmnd->device->host; - struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); - unsigned long flags; - - spin_lock_irqsave(shost->host_lock, flags); - while (rport->port_state == FC_PORTSTATE_BLOCKED) { - spin_unlock_irqrestore(shost->host_lock, flags); - msleep(1000); - spin_lock_irqsave(shost->host_lock, flags); - } - spin_unlock_irqrestore(shost->host_lock, flags); - return; -} - /************************************************************************** * qla2xxx_eh_abort * @@ -773,7 +749,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) struct req_que *req = vha->req; srb_t *spt; - qla2x00_block_error_handler(cmd); + fc_block_scsi_eh(cmd); if (!CMD_SP(cmd)) return SUCCESS; @@ -904,7 +880,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; int err; - qla2x00_block_error_handler(cmd); + fc_block_scsi_eh(cmd); if (!fcport) return FAILED; @@ -984,7 +960,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) unsigned long serial; srb_t *sp = (srb_t *) CMD_SP(cmd); - qla2x00_block_error_handler(cmd); + fc_block_scsi_eh(cmd); id = cmd->device->id; lun = cmd->device->lun; @@ -1047,7 +1023,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) srb_t *sp = (srb_t *) CMD_SP(cmd); scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - qla2x00_block_error_handler(cmd); + fc_block_scsi_eh(cmd); id = cmd->device->id; lun = cmd->device->lun; @@ -1233,10 +1209,61 @@ qla2xxx_slave_destroy(struct scsi_device *sdev) sdev->hostdata = NULL; } +static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth) +{ + fc_port_t *fcport = (struct fc_port *) sdev->hostdata; + + if (!scsi_track_queue_full(sdev, qdepth)) + return; + + DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, + "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", + fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, + sdev->queue_depth)); +} + +static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) +{ + fc_port_t *fcport = sdev->hostdata; + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = NULL; + + req = vha->req; + if (!req) + return; + + if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth) + return; + + if (sdev->ordered_tags) + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth); + else + scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); + + DEBUG2(qla_printk(KERN_INFO, ha, + "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", + fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, + sdev->queue_depth)); +} + static int -qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth) +qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + switch (reason) { + case SCSI_QDEPTH_DEFAULT: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + break; + case SCSI_QDEPTH_QFULL: + qla2x00_handle_queue_full(sdev, qdepth); + break; + case SCSI_QDEPTH_RAMP_UP: + qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); + break; + default: + return EOPNOTSUPP; + } + return sdev->queue_depth; } @@ -1790,6 +1817,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Set ISP-type information. */ qla2x00_set_isp_flags(ha); + + /* Set EEH reset type to fundamental if required by hba */ + if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { + pdev->needs_freset = 1; + pci_save_state(pdev); + } + /* Configure PCI I/O space */ ret = qla2x00_iospace_config(ha); if (ret) @@ -2016,13 +2050,13 @@ skip_dpc: DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", base_vha->host_no, ha)); - base_vha->flags.init_done = 1; - base_vha->flags.online = 1; - ret = scsi_add_host(host, &pdev->dev); if (ret) goto probe_failed; + base_vha->flags.init_done = 1; + base_vha->flags.online = 1; + ha->isp_ops->enable_intrs(ha); scsi_scan_host(host); @@ -2154,6 +2188,24 @@ qla2x00_free_device(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + + /* Disable timer */ + if (vha->timer_active) + qla2x00_stop_timer(vha); + + /* Kill the kernel thread for this host */ + if (ha->dpc_thread) { + struct task_struct *t = ha->dpc_thread; + + /* + * qla2xxx_wake_dpc checks for ->dpc_thread + * so we need to zero it out. + */ + ha->dpc_thread = NULL; + kthread_stop(t); + } + qla25xx_delete_queues(vha); if (ha->flags.fce_enabled) @@ -2165,6 +2217,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) /* Stop currently executing firmware. */ qla2x00_try_to_stop_firmware(vha); + vha->flags.online = 0; + /* turn-off interrupts on the card */ if (ha->interrupts_on) ha->isp_ops->disable_intrs(ha); @@ -2653,6 +2707,37 @@ qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); +int +qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.uevent.code = code; + return qla2x00_post_work(vha, e); +} + +static void +qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) +{ + char event_string[40]; + char *envp[] = { event_string, NULL }; + + switch (code) { + case QLA_UEVENT_CODE_FW_DUMP: + snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", + vha->host_no); + break; + default: + /* do nothing */ + break; + } + kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); +} + void qla2x00_do_work(struct scsi_qla_host *vha) { @@ -2690,6 +2775,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla2x00_async_logout_done(vha, e->u.logio.fcport, e->u.logio.data); break; + case QLA_EVT_UEVENT: + qla2x00_uevent_emit(vha, e->u.uevent.code); + break; } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); @@ -2805,6 +2893,13 @@ qla2x00_do_dpc(void *data) if (!base_vha->flags.init_done) continue; + if (ha->flags.eeh_busy) { + DEBUG17(qla_printk(KERN_WARNING, ha, + "qla2x00_do_dpc: dpc_flags: %lx\n", + base_vha->dpc_flags)); + continue; + } + DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no)); ha->dpc_active = 1; @@ -2995,8 +3090,13 @@ qla2x00_timer(scsi_qla_host_t *vha) int index; srb_t *sp; int t; + uint16_t w; struct qla_hw_data *ha = vha->hw; struct req_que *req; + + /* Hardware read to raise pending EEH errors during mailbox waits. */ + if (!pci_channel_offline(ha->pdev)) + pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); /* * Ports - Port down timer. * @@ -3198,16 +3298,23 @@ qla2x00_release_firmware(void) static pci_ers_result_t qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { - scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + scsi_qla_host_t *vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = vha->hw; + + DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", + state)); switch (state) { case pci_channel_io_normal: + ha->flags.eeh_busy = 0; return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: + ha->flags.eeh_busy = 1; pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: - qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + ha->flags.pci_channel_io_perm_failure = 1; + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; @@ -3258,6 +3365,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) struct qla_hw_data *ha = base_vha->hw; int rc; + DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); + if (ha->mem_only) rc = pci_enable_device_mem(pdev); else @@ -3266,19 +3375,33 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) if (rc) { qla_printk(KERN_WARNING, ha, "Can't re-enable PCI device after reset.\n"); - return ret; } - pci_set_master(pdev); if (ha->isp_ops->pci_config(base_vha)) return ret; +#ifdef QL_DEBUG_LEVEL_17 + { + uint8_t b; + uint32_t i; + + printk("slot_reset_1: "); + for (i = 0; i < 256; i++) { + pci_read_config_byte(ha->pdev, i, &b); + printk("%s%02x", (i%16) ? " " : "\n", b); + } + printk("\n"); + } +#endif set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) ret = PCI_ERS_RESULT_RECOVERED; clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + DEBUG17(qla_printk(KERN_WARNING, ha, + "slot_reset-return:ret=%x\n", ret)); + return ret; } @@ -3289,12 +3412,17 @@ qla2xxx_pci_resume(struct pci_dev *pdev) struct qla_hw_data *ha = base_vha->hw; int ret; + DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); + ret = qla2x00_wait_for_hba_online(base_vha); if (ret != QLA_SUCCESS) { qla_printk(KERN_ERR, ha, "the device failed to resume I/O " "from slot/link_reset"); } + + ha->flags.eeh_busy = 0; + pci_cleanup_aer_uncorrect_error_status(pdev); } |