diff options
author | David S. Miller <davem@davemloft.net> | 2009-06-15 03:02:23 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-15 03:02:23 -0700 |
commit | 9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (patch) | |
tree | 8d104ec2a459346b99413b0b77421ca7b9936c1a /drivers/scsi/ibmvscsi | |
parent | ca44d6e60f9de26281fda203f58b570e1748c015 (diff) | |
parent | 45e3e1935e2857c54783291107d33323b3ef33c8 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
Documentation/feature-removal-schedule.txt
drivers/scsi/fcoe/fcoe.c
net/core/drop_monitor.c
net/core/net-traces.c
Diffstat (limited to 'drivers/scsi/ibmvscsi')
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.c | 434 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.h | 40 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 463 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.h | 4 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/viosrp.h | 68 |
5 files changed, 754 insertions, 255 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index ea4abee7a2a..b4b805e8d7d 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -110,7 +110,7 @@ static const struct { { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, - { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, @@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *); static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); static void ibmvfc_tgt_query_target(struct ibmvfc_target *); +static void ibmvfc_npiv_logout(struct ibmvfc_host *); static const char *unknown_error = "unknown error"; @@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd) int fc_rsp_len = rsp->fcp_rsp_len; if ((rsp->flags & FCP_RSP_LEN_VALID) && - ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || + ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || rsp->data.info.rsp_code)) return DID_ERROR << 16; @@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, case IBMVFC_TGT_ACTION_DEL_RPORT: break; default: + if (action == IBMVFC_TGT_ACTION_DEL_RPORT) + tgt->add_rport = 0; tgt->action = action; break; } @@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) vhost->action = action; break; + case IBMVFC_HOST_ACTION_LOGO_WAIT: + if (vhost->action == IBMVFC_HOST_ACTION_LOGO) + vhost->action = action; + break; case IBMVFC_HOST_ACTION_INIT_WAIT: if (vhost->action == IBMVFC_HOST_ACTION_INIT) vhost->action = action; @@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, switch (vhost->action) { case IBMVFC_HOST_ACTION_INIT_WAIT: case IBMVFC_HOST_ACTION_NONE: - case IBMVFC_HOST_ACTION_TGT_ADD: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: vhost->action = action; break; default: @@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) vhost->action = action; break; + case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_QUERY_TGTS: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: - case IBMVFC_HOST_ACTION_TGT_ADD: case IBMVFC_HOST_ACTION_NONE: default: vhost->action = action; @@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) } list_for_each_entry(tgt, &vhost->targets, queue) - tgt->need_login = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; @@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)crq->msgs); } @@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); /* Clean out the queue */ @@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) } /** - * __ibmvfc_reset_host - Reset the connection to the server (no locking) + * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ * @vhost: struct ibmvfc host to reset **/ -static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) +static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost) { int rc; @@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) } /** - * ibmvfc_reset_host - Reset the connection to the server + * __ibmvfc_reset_host - Reset the connection to the server (no locking) * @vhost: struct ibmvfc host to reset **/ +static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) +{ + if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT && + !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO); + vhost->job_step = ibmvfc_npiv_logout; + wake_up(&vhost->work_wait_q); + } else + ibmvfc_hard_reset_host(vhost); +} + +/** + * ibmvfc_reset_host - Reset the connection to the server + * @vhost: ibmvfc host struct + **/ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) { unsigned long flags; @@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) * ibmvfc_retry_host_init - Retry host initialization if allowed * @vhost: ibmvfc host struct * + * Returns: 1 if init will be retried / 0 if not + * **/ -static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) +static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost) { + int retry = 0; + if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { vhost->delay_init = 1; if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { @@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) __ibmvfc_reset_host(vhost); - else + else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); + retry = 1; + } } wake_up(&vhost->work_wait_q); + return retry; } /** @@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) login_info->partition_num = vhost->partition_number; login_info->vfc_frame_version = 1; login_info->fcp_version = 3; + login_info->flags = IBMVFC_FLUSH_ON_HALT; if (vhost->client_migrated) - login_info->flags = IBMVFC_CLIENT_MIGRATED; + login_info->flags |= IBMVFC_CLIENT_MIGRATED; login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; login_info->capabilities = IBMVFC_CAN_MIGRATE; @@ -1452,6 +1485,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) } /** + * ibmvfc_relogin - Log back into the specified device + * @sdev: scsi device struct + * + **/ +static void ibmvfc_relogin(struct scsi_device *sdev) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_target *tgt; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (rport == tgt->rport) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + break; + } + } + + ibmvfc_reinit_host(vhost); +} + +/** * ibmvfc_scsi_done - Handle responses from commands * @evt: ibmvfc event to be handled * @@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) - ibmvfc_reinit_host(evt->vhost); + ibmvfc_relogin(cmnd->device); if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) cmnd->result = (DID_ERROR << 16); @@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_host *vhost) { const char *desc = ibmvfc_get_ae_desc(crq->event); + struct ibmvfc_target *tgt; ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); switch (crq->event) { - case IBMVFC_AE_LINK_UP: case IBMVFC_AE_RESUME: + switch (crq->link_state) { + case IBMVFC_AE_LS_LINK_DOWN: + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + break; + case IBMVFC_AE_LS_LINK_DEAD: + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + break; + case IBMVFC_AE_LS_LINK_UP: + case IBMVFC_AE_LS_LINK_BOUNCED: + default: + vhost->events_to_log |= IBMVFC_AE_LINKUP; + vhost->delay_init = 1; + __ibmvfc_reset_host(vhost); + break; + }; + + break; + case IBMVFC_AE_LINK_UP: vhost->events_to_log |= IBMVFC_AE_LINKUP; vhost->delay_init = 1; __ibmvfc_reset_host(vhost); @@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, case IBMVFC_AE_SCN_NPORT: case IBMVFC_AE_SCN_GROUP: vhost->events_to_log |= IBMVFC_AE_RSCN; + ibmvfc_reinit_host(vhost); + break; case IBMVFC_AE_ELS_LOGO: case IBMVFC_AE_ELS_PRLO: case IBMVFC_AE_ELS_PLOGI: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (!crq->scsi_id && !crq->wwpn && !crq->node_name) + break; + if (crq->scsi_id && tgt->scsi_id != crq->scsi_id) + continue; + if (crq->wwpn && tgt->ids.port_name != crq->wwpn) + continue; + if (crq->node_name && tgt->ids.node_name != crq->node_name) + continue; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + } + ibmvfc_reinit_host(vhost); break; case IBMVFC_AE_LINK_DOWN: @@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) return; case IBMVFC_CRQ_XPORT_EVENT: vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); if (crq->format == IBMVFC_PARTITION_MIGRATED) { /* We need to re-setup the interpartition connection */ @@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) done = 1; } - if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) + if (vhost->scan_complete) done = 1; spin_unlock_irqrestore(shost->host_lock, flags); return done; @@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev, vhost->login_buf->resp.partition_name); } -static struct device_attribute ibmvfc_host_partition_name = { - .attr = { - .name = "partition_name", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_partition_name, -}; - static ssize_t ibmvfc_show_host_device_name(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev, vhost->login_buf->resp.device_name); } -static struct device_attribute ibmvfc_host_device_name = { - .attr = { - .name = "device_name", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_device_name, -}; - static ssize_t ibmvfc_show_host_loc_code(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev, vhost->login_buf->resp.port_loc_code); } -static struct device_attribute ibmvfc_host_loc_code = { - .attr = { - .name = "port_loc_code", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_loc_code, -}; - static ssize_t ibmvfc_show_host_drc_name(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev, vhost->login_buf->resp.drc_name); } -static struct device_attribute ibmvfc_host_drc_name = { - .attr = { - .name = "drc_name", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_drc_name, -}; - static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); } -static struct device_attribute ibmvfc_host_npiv_version = { - .attr = { - .name = "npiv_version", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_npiv_version, -}; +static ssize_t ibmvfc_show_host_capabilities(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities); +} /** * ibmvfc_show_log_level - Show the adapter's error logging level @@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev, return strlen(buf); } -static struct device_attribute ibmvfc_log_level_attr = { - .attr = { - .name = "log_level", - .mode = S_IRUGO | S_IWUSR, - }, - .show = ibmvfc_show_log_level, - .store = ibmvfc_store_log_level -}; +static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL); +static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL); +static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL); +static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL); +static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL); +static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL); +static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, + ibmvfc_show_log_level, ibmvfc_store_log_level); #ifdef CONFIG_SCSI_IBMVFC_TRACE /** @@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = { #endif static struct device_attribute *ibmvfc_attrs[] = { - &ibmvfc_host_partition_name, - &ibmvfc_host_device_name, - &ibmvfc_host_loc_code, - &ibmvfc_host_drc_name, - &ibmvfc_host_npiv_version, - &ibmvfc_log_level_attr, + &dev_attr_partition_name, + &dev_attr_device_name, + &dev_attr_port_loc_code, + &dev_attr_drc_name, + &dev_attr_npiv_version, + &dev_attr_capabilities, + &dev_attr_log_level, NULL }; @@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt, * @tgt: ibmvfc target struct * @job_step: initialization job step * + * Returns: 1 if step will be retried / 0 if not + * **/ -static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, +static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, void (*job_step) (struct ibmvfc_target *)) { if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); wake_up(&tgt->vhost->work_wait_q); + return 0; } else ibmvfc_init_tgt(tgt, job_step); + return 1; } /* Defined in FC-LS */ @@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; struct ibmvfc_prli_svc_parms *parms = &rsp->parms; u32 status = rsp->common.status; - int index; + int index, level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); + tgt->add_rport = 1; } else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); } else if (prli_rsp[index].retry) @@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), - rsp->status, rsp->error, status); if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + + tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), + rsp->status, rsp->error, status); break; }; @@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; u32 status = rsp->common.status; + int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, - ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, - ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); - if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + + tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, + ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, + ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); break; }; @@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "ADISC succeeded\n"); if (ibmvfc_adisc_needs_plogi(mad, tgt)) - tgt->need_login = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_FAILED: default: - tgt->need_login = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", @@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; u32 status = rsp->common.status; + int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, - ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, - ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); - if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + + tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, + ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, + ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); break; }; @@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) } spin_unlock_irqrestore(vhost->host->host_lock, flags); - tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); + tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO); if (!tgt) { dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", scsi_id); @@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; u32 mad_status = rsp->common.status; + int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: @@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); break; case IBMVFC_MAD_FAILED: - dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); - ibmvfc_retry_host_init(vhost); + level += ibmvfc_retry_host_init(vhost); + ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); break; case IBMVFC_MAD_DRIVER_FAILED: break; @@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) u32 mad_status = evt->xfer_iu->npiv_login.common.status; struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; unsigned int npiv_max_sectors; + int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: ibmvfc_free_event(evt); break; case IBMVFC_MAD_FAILED: - dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_host_init(vhost); + level += ibmvfc_retry_host_init(vhost); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); ibmvfc_free_event(evt); return; case IBMVFC_MAD_CRQ_ERROR: @@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) return; } + vhost->logged_in = 1; npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", rsp->partition_name, rsp->device_name, rsp->port_loc_code, @@ -3636,6 +3702,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) }; /** + * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + u32 mad_status = evt->xfer_iu->npiv_logout.common.status; + + ibmvfc_free_event(evt); + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + if (list_empty(&vhost->sent) && + vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { + ibmvfc_init_host(vhost, 0); + return; + } + break; + case IBMVFC_MAD_FAILED: + case IBMVFC_MAD_NOT_SUPPORTED: + case IBMVFC_MAD_CRQ_ERROR: + case IBMVFC_MAD_DRIVER_FAILED: + default: + ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status); + break; + } + + ibmvfc_hard_reset_host(vhost); +} + +/** + * ibmvfc_npiv_logout - Issue an NPIV Logout + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost) +{ + struct ibmvfc_npiv_logout_mad *mad; + struct ibmvfc_event *evt; + + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT); + + mad = &evt->iu.npiv_logout; + memset(mad, 0, sizeof(*mad)); + mad->common.version = 1; + mad->common.opcode = IBMVFC_NPIV_LOGOUT; + mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad); + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent NPIV logout\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +} + +/** * ibmvfc_dev_init_to_do - Is there target initialization work to do? * @vhost: ibmvfc host struct * @@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: case IBMVFC_HOST_ACTION_INIT_WAIT: + case IBMVFC_HOST_ACTION_LOGO_WAIT: return 0; case IBMVFC_HOST_ACTION_TGT_INIT: case IBMVFC_HOST_ACTION_QUERY_TGTS: @@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) return 0; return 1; + case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_ALLOC_TGTS: - case IBMVFC_HOST_ACTION_TGT_ADD: case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: case IBMVFC_HOST_ACTION_QUERY: @@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) { struct ibmvfc_host *vhost = tgt->vhost; - struct fc_rport *rport = tgt->rport; + struct fc_rport *rport; unsigned long flags; - if (rport) { - tgt_dbg(tgt, "Setting rport roles\n"); - fc_remote_port_rolechg(rport, tgt->ids.roles); - spin_lock_irqsave(vhost->host->host_lock, flags); - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + tgt_dbg(tgt, "Adding rport\n"); + rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); + spin_lock_irqsave(vhost->host->host_lock, flags); + + if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { + tgt_dbg(tgt, "Deleting rport\n"); + list_del(&tgt->queue); spin_unlock_irqrestore(vhost->host->host_lock, flags); + fc_remote_port_delete(rport); + del_timer_sync(&tgt->timer); + kref_put(&tgt->kref, ibmvfc_release_tgt); return; } - tgt_dbg(tgt, "Adding rport\n"); - rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); - spin_lock_irqsave(vhost->host->host_lock, flags); - tgt->rport = rport; - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); if (rport) { tgt_dbg(tgt, "rport add succeeded\n"); + tgt->rport = rport; rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; rport->supported_classes = 0; tgt->target_id = rport->scsi_target_id; @@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) vhost->events_to_log = 0; switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: + case IBMVFC_HOST_ACTION_LOGO_WAIT: case IBMVFC_HOST_ACTION_INIT_WAIT: break; + case IBMVFC_HOST_ACTION_LOGO: + vhost->job_step(vhost); + break; case IBMVFC_HOST_ACTION_INIT: BUG_ON(vhost->state != IBMVFC_INITIALIZING); if (vhost->delay_init) { @@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) if (vhost->state == IBMVFC_INITIALIZING) { if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { - ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); - vhost->init_retries = 0; - spin_unlock_irqrestore(vhost->host->host_lock, flags); - scsi_unblock_requests(vhost->host); + if (vhost->reinit) { + vhost->reinit = 0; + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + } else { + ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + wake_up(&vhost->init_wait_q); + schedule_work(&vhost->rport_add_work_q); + vhost->init_retries = 0; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + scsi_unblock_requests(vhost->host); + } + return; } else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); @@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) if (!ibmvfc_dev_init_to_do(vhost)) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); break; - case IBMVFC_HOST_ACTION_TGT_ADD: - list_for_each_entry(tgt, &vhost->targets, queue) { - if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) { - spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_tgt_add_rport(tgt); - return; - } - } - - if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { - vhost->reinit = 0; - scsi_block_requests(vhost->host); - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); - } else { - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); - wake_up(&vhost->init_wait_q); - } - break; default: break; }; @@ -4118,6 +4241,56 @@ nomem: } /** + * ibmvfc_rport_add_thread - Worker thread for rport adds + * @work: work struct + * + **/ +static void ibmvfc_rport_add_thread(struct work_struct *work) +{ + struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host, + rport_add_work_q); + struct ibmvfc_target *tgt; + struct fc_rport *rport; + unsigned long flags; + int did_work; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + do { + did_work = 0; + if (vhost->state != IBMVFC_ACTIVE) + break; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->add_rport) { + did_work = 1; + tgt->add_rport = 0; + kref_get(&tgt->kref); + rport = tgt->rport; + if (!rport) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_tgt_add_rport(tgt); + } else if (get_device(&rport->dev)) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + tgt_dbg(tgt, "Setting rport roles\n"); + fc_remote_port_rolechg(rport, tgt->ids.roles); + put_device(&rport->dev); + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + spin_lock_irqsave(vhost->host->host_lock, flags); + break; + } + } + } while(did_work); + + if (vhost->state == IBMVFC_ACTIVE) + vhost->scan_complete = 1; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + LEAVE; +} + +/** * ibmvfc_probe - Adapter hot plug add entry point * @vdev: vio device struct * @id: vio device id struct @@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) strcpy(vhost->partition_name, "UNKNOWN"); init_waitqueue_head(&vhost->work_wait_q); init_waitqueue_head(&vhost->init_wait_q); + INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); if ((rc = ibmvfc_alloc_mem(vhost))) goto free_scsi_host; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index ca1dcf7a756..c2668d7d67f 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.5" -#define IBMVFC_DRIVER_DATE "(March 19, 2009)" +#define IBMVFC_DRIVER_VERSION "1.0.6" +#define IBMVFC_DRIVER_DATE "(May 28, 2009)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 @@ -57,9 +57,10 @@ * Ensure we have resources for ERP and initialization: * 1 for ERP * 1 for initialization + * 1 for NPIV Logout * 2 for each discovery thread */ -#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) +#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2)) #define IBMVFC_MAD_SUCCESS 0x00 #define IBMVFC_MAD_NOT_SUPPORTED 0xF1 @@ -127,6 +128,7 @@ enum ibmvfc_mad_types { IBMVFC_IMPLICIT_LOGOUT = 0x0040, IBMVFC_PASSTHRU = 0x0200, IBMVFC_TMF_MAD = 0x0100, + IBMVFC_NPIV_LOGOUT = 0x0800, }; struct ibmvfc_mad_common { @@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad { struct srp_direct_buf buffer; }__attribute__((packed, aligned (8))); +struct ibmvfc_npiv_logout_mad { + struct ibmvfc_mad_common common; +}__attribute__((packed, aligned (8))); + #define IBMVFC_MAX_NAME 256 struct ibmvfc_npiv_login { @@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp { #define IBMVFC_NATIVE_FC 0x01 #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 reserved; - u64 capabilites; + u64 capabilities; +#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 max_cmds; u32 scsi_id_sz; u64 max_dma_len; @@ -541,9 +548,17 @@ struct ibmvfc_crq_queue { dma_addr_t msg_token; }; +enum ibmvfc_ae_link_state { + IBMVFC_AE_LS_LINK_UP = 0x01, + IBMVFC_AE_LS_LINK_BOUNCED = 0x02, + IBMVFC_AE_LS_LINK_DOWN = 0x04, + IBMVFC_AE_LS_LINK_DEAD = 0x08, +}; + struct ibmvfc_async_crq { volatile u8 valid; - u8 pad[3]; + u8 link_state; + u8 pad[2]; u32 pad2; volatile u64 event; volatile u64 scsi_id; @@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue { union ibmvfc_iu { struct ibmvfc_mad_common mad_common; struct ibmvfc_npiv_login_mad npiv_login; + struct ibmvfc_npiv_logout_mad npiv_logout; struct ibmvfc_discover_targets discover_targets; struct ibmvfc_port_login plogi; struct ibmvfc_process_login prli; @@ -575,7 +591,6 @@ enum ibmvfc_target_action { IBMVFC_TGT_ACTION_NONE = 0, IBMVFC_TGT_ACTION_INIT, IBMVFC_TGT_ACTION_INIT_WAIT, - IBMVFC_TGT_ACTION_ADD_RPORT, IBMVFC_TGT_ACTION_DEL_RPORT, }; @@ -588,6 +603,7 @@ struct ibmvfc_target { int target_id; enum ibmvfc_target_action action; int need_login; + int add_rport; int init_retries; u32 cancel_key; struct ibmvfc_service_parms service_parms; @@ -627,6 +643,8 @@ struct ibmvfc_event_pool { enum ibmvfc_host_action { IBMVFC_HOST_ACTION_NONE = 0, + IBMVFC_HOST_ACTION_LOGO, + IBMVFC_HOST_ACTION_LOGO_WAIT, IBMVFC_HOST_ACTION_INIT, IBMVFC_HOST_ACTION_INIT_WAIT, IBMVFC_HOST_ACTION_QUERY, @@ -635,7 +653,6 @@ enum ibmvfc_host_action { IBMVFC_HOST_ACTION_ALLOC_TGTS, IBMVFC_HOST_ACTION_TGT_INIT, IBMVFC_HOST_ACTION_TGT_DEL_FAILED, - IBMVFC_HOST_ACTION_TGT_ADD, }; enum ibmvfc_host_state { @@ -682,6 +699,8 @@ struct ibmvfc_host { int client_migrated; int reinit; int delay_init; + int scan_complete; + int logged_in; int events_to_log; #define IBMVFC_AE_LINKUP 0x0001 #define IBMVFC_AE_LINKDOWN 0x0002 @@ -692,6 +711,7 @@ struct ibmvfc_host { void (*job_step) (struct ibmvfc_host *); struct task_struct *work_thread; struct tasklet_struct tasklet; + struct work_struct rport_add_work_q; wait_queue_head_t init_wait_q; wait_queue_head_t work_wait_q; }; @@ -707,6 +727,12 @@ struct ibmvfc_host { #define tgt_err(t, fmt, ...) \ dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) +#define tgt_log(t, level, fmt, ...) \ + do { \ + if ((t)->vhost->log_level >= level) \ + tgt_err(t, fmt, ##__VA_ARGS__); \ + } while (0) + #define ibmvfc_dbg(vhost, ...) \ DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index c9aa7611e40..11d2602ae88 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -70,6 +70,7 @@ #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/delay.h> +#include <linux/of.h> #include <asm/firmware.h> #include <asm/vio.h> #include <asm/firmware.h> @@ -87,9 +88,15 @@ */ static int max_id = 64; static int max_channel = 3; -static int init_timeout = 5; +static int init_timeout = 300; +static int login_timeout = 60; +static int info_timeout = 30; +static int abort_timeout = 60; +static int reset_timeout = 60; static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; +static int fast_fail = 1; +static int client_reserve = 1; static struct scsi_transport_template *ibmvscsi_transport_template; @@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); module_param_named(max_requests, max_requests, int, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); +module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); +module_param_named(client_reserve, client_reserve, int, S_IRUGO ); +MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); /* ------------------------------------------------------------ * Routines for the event pool and event structs @@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, /* ------------------------------------------------------------ * Routines for driver initialization */ + /** - * adapter_info_rsp: - Handle response to MAD adapter info request - * @evt_struct: srp_event_struct with the response + * map_persist_bufs: - Pre-map persistent data for adapter logins + * @hostdata: ibmvscsi_host_data of host * - * Used as a "done" callback by when sending adapter_info. Gets called - * by ibmvscsi_handle_crq() -*/ -static void adapter_info_rsp(struct srp_event_struct *evt_struct) + * Map the capabilities and adapter info DMA buffers to avoid runtime failures. + * Return 1 on error, 0 on success. + */ +static int map_persist_bufs(struct ibmvscsi_host_data *hostdata) { - struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - dma_unmap_single(hostdata->dev, - evt_struct->iu.mad.adapter_info.buffer, - evt_struct->iu.mad.adapter_info.common.length, - DMA_BIDIRECTIONAL); - if (evt_struct->xfer_iu->mad.adapter_info.common.status) { - dev_err(hostdata->dev, "error %d getting adapter info\n", - evt_struct->xfer_iu->mad.adapter_info.common.status); - } else { - dev_info(hostdata->dev, "host srp version: %s, " - "host partition %s (%d), OS %d, max io %u\n", - hostdata->madapter_info.srp_version, - hostdata->madapter_info.partition_name, - hostdata->madapter_info.partition_number, - hostdata->madapter_info.os_type, - hostdata->madapter_info.port_max_txu[0]); - - if (hostdata->madapter_info.port_max_txu[0]) - hostdata->host->max_sectors = - hostdata->madapter_info.port_max_txu[0] >> 9; - - if (hostdata->madapter_info.os_type == 3 && - strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { - dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", - hostdata->madapter_info.srp_version); - dev_err(hostdata->dev, "limiting scatterlists to %d\n", - MAX_INDIRECT_BUFS); - hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; - } + hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + + if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) { + dev_err(hostdata->dev, "Unable to map capabilities buffer!\n"); + return 1; } + + hostdata->adapter_info_addr = dma_map_single(hostdata->dev, + &hostdata->madapter_info, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) { + dev_err(hostdata->dev, "Unable to map adapter info buffer!\n"); + dma_unmap_single(hostdata->dev, hostdata->caps_addr, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + return 1; + } + + return 0; } /** - * send_mad_adapter_info: - Sends the mad adapter info request - * and stores the result so it can be retrieved with - * sysfs. We COULD consider causing a failure if the - * returned SRP version doesn't match ours. - * @hostdata: ibmvscsi_host_data of host - * - * Returns zero if successful. -*/ -static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) + * unmap_persist_bufs: - Unmap persistent data needed for adapter logins + * @hostdata: ibmvscsi_host_data of host + * + * Unmap the capabilities and adapter info DMA buffers + */ +static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata) { - struct viosrp_adapter_info *req; - struct srp_event_struct *evt_struct; - unsigned long flags; - dma_addr_t addr; - - evt_struct = get_event_struct(&hostdata->pool); - if (!evt_struct) { - dev_err(hostdata->dev, - "couldn't allocate an event for ADAPTER_INFO_REQ!\n"); - return; - } - - init_event_struct(evt_struct, - adapter_info_rsp, - VIOSRP_MAD_FORMAT, - init_timeout); - - req = &evt_struct->iu.mad.adapter_info; - memset(req, 0x00, sizeof(*req)); - - req->common.type = VIOSRP_ADAPTER_INFO_TYPE; - req->common.length = sizeof(hostdata->madapter_info); - req->buffer = addr = dma_map_single(hostdata->dev, - &hostdata->madapter_info, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); + dma_unmap_single(hostdata->dev, hostdata->caps_addr, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); - if (dma_mapping_error(hostdata->dev, req->buffer)) { - if (!firmware_has_feature(FW_FEATURE_CMO)) - dev_err(hostdata->dev, - "Unable to map request_buffer for " - "adapter_info!\n"); - free_event_struct(&hostdata->pool, evt_struct); - return; - } - - spin_lock_irqsave(hostdata->host->host_lock, flags); - if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) { - dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); - dma_unmap_single(hostdata->dev, - addr, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); - } - spin_unlock_irqrestore(hostdata->host->host_lock, flags); -}; + dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr, + sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); +} /** * login_rsp: - Handle response to SRP login request @@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) } dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); - - if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) - dev_err(hostdata->dev, "Invalid request_limit.\n"); + hostdata->client_migrated = 0; /* Now we know what the real request-limit is. * This value is set rather than added to request_limit because @@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct) /* If we had any pending I/Os, kick them */ scsi_unblock_requests(hostdata->host); - - send_mad_adapter_info(hostdata); - return; } /** * send_srp_login: - Sends the srp login * @hostdata: ibmvscsi_host_data of host - * + * * Returns zero if successful. */ static int send_srp_login(struct ibmvscsi_host_data *hostdata) @@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) unsigned long flags; struct srp_login_req *login; struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); - if (!evt_struct) { - dev_err(hostdata->dev, "couldn't allocate an event for login req!\n"); - return FAILED; - } - init_event_struct(evt_struct, - login_rsp, - VIOSRP_SRP_FORMAT, - init_timeout); + BUG_ON(!evt_struct); + init_event_struct(evt_struct, login_rsp, + VIOSRP_SRP_FORMAT, login_timeout); login = &evt_struct->iu.srp.login_req; - memset(login, 0x00, sizeof(struct srp_login_req)); + memset(login, 0, sizeof(*login)); login->opcode = SRP_LOGIN_REQ; login->req_it_iu_len = sizeof(union srp_iu); login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; - + spin_lock_irqsave(hostdata->host->host_lock, flags); /* Start out with a request limit of 0, since this is negotiated in * the login request we are just sending and login requests always @@ -962,13 +911,241 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) */ atomic_set(&hostdata->request_limit, 0); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); dev_info(hostdata->dev, "sent SRP login\n"); return rc; }; /** + * capabilities_rsp: - Handle response to MAD adapter capabilities request + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending adapter_info. + */ +static void capabilities_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + + if (evt_struct->xfer_iu->mad.capabilities.common.status) { + dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", + evt_struct->xfer_iu->mad.capabilities.common.status); + } else { + if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) + dev_info(hostdata->dev, "Partition migration not supported\n"); + + if (client_reserve) { + if (hostdata->caps.reserve.common.server_support == + SERVER_SUPPORTS_CAP) + dev_info(hostdata->dev, "Client reserve enabled\n"); + else + dev_info(hostdata->dev, "Client reserve not supported\n"); + } + } + + send_srp_login(hostdata); +} + +/** + * send_mad_capabilities: - Sends the mad capabilities request + * and stores the result so it can be retrieved with + * @hostdata: ibmvscsi_host_data of host + */ +static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) +{ + struct viosrp_capabilities *req; + struct srp_event_struct *evt_struct; + unsigned long flags; + struct device_node *of_node = hostdata->dev->archdata.of_node; + const char *location; + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, capabilities_rsp, + VIOSRP_MAD_FORMAT, info_timeout); + + req = &evt_struct->iu.mad.capabilities; + memset(req, 0, sizeof(*req)); + + hostdata->caps.flags = CAP_LIST_SUPPORTED; + if (hostdata->client_migrated) + hostdata->caps.flags |= CLIENT_MIGRATED; + + strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), + sizeof(hostdata->caps.name)); + hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0'; + + location = of_get_property(of_node, "ibm,loc-code", NULL); + location = location ? location : dev_name(hostdata->dev); + strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); + hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; + + req->common.type = VIOSRP_CAPABILITIES_TYPE; + req->buffer = hostdata->caps_addr; + + hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; + hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); + hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; + hostdata->caps.migration.ecl = 1; + + if (client_reserve) { + hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; + hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); + hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; + hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; + req->common.length = sizeof(hostdata->caps); + } else + req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) + dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n"); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +}; + +/** + * fast_fail_rsp: - Handle response to MAD enable fast fail + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending enable fast fail. Gets called + * by ibmvscsi_handle_crq() + */ +static void fast_fail_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; + + if (status == VIOSRP_MAD_NOT_SUPPORTED) + dev_err(hostdata->dev, "fast_fail not supported in server\n"); + else if (status == VIOSRP_MAD_FAILED) + dev_err(hostdata->dev, "fast_fail request failed\n"); + else if (status != VIOSRP_MAD_SUCCESS) + dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); + + send_mad_capabilities(hostdata); +} + +/** + * init_host - Start host initialization + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. + */ +static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) +{ + int rc; + unsigned long flags; + struct viosrp_fast_fail *fast_fail_mad; + struct srp_event_struct *evt_struct; + + if (!fast_fail) { + send_mad_capabilities(hostdata); + return 0; + } + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout); + + fast_fail_mad = &evt_struct->iu.mad.fast_fail; + memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); + fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; + fast_fail_mad->common.length = sizeof(*fast_fail_mad); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + return rc; +} + +/** + * adapter_info_rsp: - Handle response to MAD adapter info request + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending adapter_info. Gets called + * by ibmvscsi_handle_crq() +*/ +static void adapter_info_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + + if (evt_struct->xfer_iu->mad.adapter_info.common.status) { + dev_err(hostdata->dev, "error %d getting adapter info\n", + evt_struct->xfer_iu->mad.adapter_info.common.status); + } else { + dev_info(hostdata->dev, "host srp version: %s, " + "host partition %s (%d), OS %d, max io %u\n", + hostdata->madapter_info.srp_version, + hostdata->madapter_info.partition_name, + hostdata->madapter_info.partition_number, + hostdata->madapter_info.os_type, + hostdata->madapter_info.port_max_txu[0]); + + if (hostdata->madapter_info.port_max_txu[0]) + hostdata->host->max_sectors = + hostdata->madapter_info.port_max_txu[0] >> 9; + + if (hostdata->madapter_info.os_type == 3 && + strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { + dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", + hostdata->madapter_info.srp_version); + dev_err(hostdata->dev, "limiting scatterlists to %d\n", + MAX_INDIRECT_BUFS); + hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; + } + } + + enable_fast_fail(hostdata); +} + +/** + * send_mad_adapter_info: - Sends the mad adapter info request + * and stores the result so it can be retrieved with + * sysfs. We COULD consider causing a failure if the + * returned SRP version doesn't match ours. + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. +*/ +static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) +{ + struct viosrp_adapter_info *req; + struct srp_event_struct *evt_struct; + unsigned long flags; + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, + adapter_info_rsp, + VIOSRP_MAD_FORMAT, + info_timeout); + + req = &evt_struct->iu.mad.adapter_info; + memset(req, 0x00, sizeof(*req)); + + req->common.type = VIOSRP_ADAPTER_INFO_TYPE; + req->common.length = sizeof(hostdata->madapter_info); + req->buffer = hostdata->adapter_info_addr; + + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) + dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +}; + +/** + * init_adapter: Start virtual adapter initialization sequence + * + */ +static void init_adapter(struct ibmvscsi_host_data *hostdata) +{ + send_mad_adapter_info(hostdata); +} + +/** * sync_completion: Signal that a synchronous command has completed * Note that after returning from this call, the evt_struct is freed. * the caller waiting on this completion shouldn't touch the evt_struct @@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, - init_timeout); + abort_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; @@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) evt->sync_srp = &srp_rsp; init_completion(&evt->comp); - rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; @@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, - init_timeout); + reset_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; @@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) evt->sync_srp = &srp_rsp; init_completion(&evt->comp); - rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; @@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, if ((rc = ibmvscsi_ops->send_crq(hostdata, 0xC002000000000000LL, 0)) == 0) { /* Now login */ - send_srp_login(hostdata); + init_adapter(hostdata); } else { dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); } @@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, dev_info(hostdata->dev, "partner initialization complete\n"); /* Now login */ - send_srp_login(hostdata); + init_adapter(hostdata); break; default: dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); @@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, if (crq->format == 0x06) { /* We need to re-setup the interpartition connection */ dev_info(hostdata->dev, "Re-enabling adapter!\n"); + hostdata->client_migrated = 1; purge_requests(hostdata, DID_REQUEUE); if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata)) || @@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_event_struct(evt_struct, sync_completion, VIOSRP_MAD_FORMAT, - init_timeout); + info_timeout); host_config = &evt_struct->iu.mad.host_config; @@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_completion(&evt_struct->comp); spin_lock_irqsave(hostdata->host->host_lock, flags); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (rc == 0) wait_for_completion(&evt_struct->comp); @@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) spin_lock_irqsave(shost->host_lock, lock_flags); if (sdev->type == TYPE_DISK) { sdev->allow_restart = 1; - blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); } scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); spin_unlock_irqrestore(shost->host_lock, lock_flags); @@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) /* ------------------------------------------------------------ * sysfs attributes */ +static ssize_t show_host_vhost_loc(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n", + hostdata->caps.loc); + return len; +} + +static struct device_attribute ibmvscsi_host_vhost_loc = { + .attr = { + .name = "vhost_loc", + .mode = S_IRUGO, + }, + .show = show_host_vhost_loc, +}; + +static ssize_t show_host_vhost_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n", + hostdata->caps.name); + return len; +} + +static struct device_attribute ibmvscsi_host_vhost_name = { + .attr = { + .name = "vhost_name", + .mode = S_IRUGO, + }, + .show = show_host_vhost_name, +}; + static ssize_t show_host_srp_version(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = { }; static struct device_attribute *ibmvscsi_attrs[] = { + &ibmvscsi_host_vhost_loc, + &ibmvscsi_host_vhost_name, &ibmvscsi_host_srp_version, &ibmvscsi_host_partition_name, &ibmvscsi_host_partition_number, @@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) atomic_set(&hostdata->request_limit, -1); hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; + if (map_persist_bufs(hostdata)) { + dev_err(&vdev->dev, "couldn't map persistent buffers\n"); + goto persist_bufs_failed; + } + rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); if (rc != 0 && rc != H_RESOURCE) { dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); @@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) host->max_lun = 8; host->max_id = max_id; host->max_channel = max_channel; + host->max_cmd_len = 16; if (scsi_add_host(hostdata->host, hostdata->dev)) goto add_host_failed; @@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) init_pool_failed: ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); init_crq_failed: + unmap_persist_bufs(hostdata); + persist_bufs_failed: scsi_host_put(host); scsi_host_alloc_failed: return -1; @@ -1741,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; + unmap_persist_bufs(hostdata); release_event_pool(&hostdata->pool, hostdata); ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index 2d4339d5e16..76425303def 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h @@ -90,6 +90,7 @@ struct event_pool { /* all driver data associated with a host adapter */ struct ibmvscsi_host_data { atomic_t request_limit; + int client_migrated; struct device *dev; struct event_pool pool; struct crq_queue queue; @@ -97,6 +98,9 @@ struct ibmvscsi_host_data { struct list_head sent; struct Scsi_Host *host; struct mad_adapter_info_data madapter_info; + struct capabilities caps; + dma_addr_t caps_addr; + dma_addr_t adapter_info_addr; }; /* routines for managing a command/response queue */ diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h index 204604501ad..2cd735d1d19 100644 --- a/drivers/scsi/ibmvscsi/viosrp.h +++ b/drivers/scsi/ibmvscsi/viosrp.h @@ -37,6 +37,7 @@ #define SRP_VERSION "16.a" #define SRP_MAX_IU_LEN 256 +#define SRP_MAX_LOC_LEN 32 union srp_iu { struct srp_login_req login_req; @@ -86,7 +87,37 @@ enum viosrp_mad_types { VIOSRP_EMPTY_IU_TYPE = 0x01, VIOSRP_ERROR_LOG_TYPE = 0x02, VIOSRP_ADAPTER_INFO_TYPE = 0x03, - VIOSRP_HOST_CONFIG_TYPE = 0x04 + VIOSRP_HOST_CONFIG_TYPE = 0x04, + VIOSRP_CAPABILITIES_TYPE = 0x05, + VIOSRP_ENABLE_FAST_FAIL = 0x08, +}; + +enum viosrp_mad_status { + VIOSRP_MAD_SUCCESS = 0x00, + VIOSRP_MAD_NOT_SUPPORTED = 0xF1, + VIOSRP_MAD_FAILED = 0xF7, +}; + +enum viosrp_capability_type { + MIGRATION_CAPABILITIES = 0x01, + RESERVATION_CAPABILITIES = 0x02, +}; + +enum viosrp_capability_support { + SERVER_DOES_NOT_SUPPORTS_CAP = 0x0, + SERVER_SUPPORTS_CAP = 0x01, + SERVER_CAP_DATA = 0x02, +}; + +enum viosrp_reserve_type { + CLIENT_RESERVE_SCSI_2 = 0x01, +}; + +enum viosrp_capability_flag { + CLIENT_MIGRATED = 0x01, + CLIENT_RECONNECT = 0x02, + CAP_LIST_SUPPORTED = 0x04, + CAP_LIST_DATA = 0x08, }; /* @@ -127,11 +158,46 @@ struct viosrp_host_config { u64 buffer; }; +struct viosrp_fast_fail { + struct mad_common common; +}; + +struct viosrp_capabilities { + struct mad_common common; + u64 buffer; +}; + +struct mad_capability_common { + u32 cap_type; + u16 length; + u16 server_support; +}; + +struct mad_reserve_cap { + struct mad_capability_common common; + u32 type; +}; + +struct mad_migration_cap { + struct mad_capability_common common; + u32 ecl; +}; + +struct capabilities{ + u32 flags; + char name[SRP_MAX_LOC_LEN]; + char loc[SRP_MAX_LOC_LEN]; + struct mad_migration_cap migration; + struct mad_reserve_cap reserve; +}; + union mad_iu { struct viosrp_empty_iu empty_iu; struct viosrp_error_log error_log; struct viosrp_adapter_info adapter_info; struct viosrp_host_config host_config; + struct viosrp_fast_fail fast_fail; + struct viosrp_capabilities capabilities; }; union viosrp_iu { |