summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2008-10-22 23:57:26 -0400
committerLen Brown <len.brown@intel.com>2008-10-23 00:11:07 -0400
commit057316cc6a5b521b332a1d7ccc871cd60c904c74 (patch)
tree4333e608da237c73ff69b10878025cca96dcb4c8 /drivers/scsi/lpfc/lpfc_scsi.c
parent3e2dab9a1c2deb03c311eb3f83466009147ed4d3 (diff)
parent2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff)
Merge branch 'linus' into test
Conflicts: MAINTAINERS arch/x86/kernel/acpi/boot.c arch/x86/kernel/acpi/sleep.c drivers/acpi/Kconfig drivers/pnp/Makefile drivers/pnp/quirks.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c514
1 files changed, 463 insertions, 51 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1bcebbd3dfa..bd186741182 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -32,6 +32,7 @@
#include "lpfc_version.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -42,6 +43,111 @@
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
+/**
+ * lpfc_update_stats: Update statistical data for the command completion.
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called when there is a command completion and this
+ * function updates the statistical data for the command completion.
+ **/
+static void
+lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ unsigned long flags;
+ struct Scsi_Host *shost = cmd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ unsigned long latency;
+ int i;
+
+ if (cmd->result)
+ return;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (!vport->stat_data_enabled ||
+ vport->stat_data_blocked ||
+ !pnode->lat_data ||
+ (phba->bucket_type == LPFC_NO_BUCKET)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return;
+ }
+ latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
+
+ if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
+ i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
+ phba->bucket_step;
+ if (i >= LPFC_MAX_BUCKET_COUNT)
+ i = LPFC_MAX_BUCKET_COUNT;
+ } else {
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
+ if (latency <= (phba->bucket_base +
+ ((1<<i)*phba->bucket_step)))
+ break;
+ }
+
+ pnode->lat_data[i].cmd_count++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+
+/**
+ * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
+ * event.
+ * @phba: Pointer to HBA context object.
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer to FC node associated with the target.
+ * @lun: Lun number of the scsi device.
+ * @old_val: Old value of the queue depth.
+ * @new_val: New value of the queue depth.
+ *
+ * This function sends an event to the mgmt application indicating
+ * there is a change in the scsi device queue depth.
+ **/
+static void
+lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
+ struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ uint32_t lun,
+ uint32_t old_val,
+ uint32_t new_val)
+{
+ struct lpfc_fast_path_event *fast_path_evt;
+ unsigned long flags;
+
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+
+ fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
+ LPFC_EVENT_VARQUEDEPTH;
+
+ /* Report all luns with change in queue depth */
+ fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
+ &ndlp->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
+ &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+ }
+
+ fast_path_evt->un.queue_depth_evt.oldval = old_val;
+ fast_path_evt->un.queue_depth_evt.newval = new_val;
+ fast_path_evt->vport = vport;
+
+ fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+
+ return;
+}
+
/*
* This function is called with no lock held when there is a resource
* error in driver or in firmware.
@@ -117,9 +223,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct scsi_device *sdev;
- unsigned long new_queue_depth;
+ unsigned long new_queue_depth, old_queue_depth;
unsigned long num_rsrc_err, num_cmd_success;
int i;
+ struct lpfc_rport_data *rdata;
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -137,6 +244,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
else
new_queue_depth = sdev->queue_depth -
new_queue_depth;
+ old_queue_depth = sdev->queue_depth;
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev,
MSG_ORDERED_TAG,
@@ -145,6 +253,13 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
new_queue_depth);
+ rdata = sdev->hostdata;
+ if (rdata)
+ lpfc_send_sdev_queuedepth_change_event(
+ phba, vports[i],
+ rdata->pnode,
+ sdev->lun, old_queue_depth,
+ new_queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -159,6 +274,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
struct Scsi_Host *shost;
struct scsi_device *sdev;
int i;
+ struct lpfc_rport_data *rdata;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@@ -176,6 +292,14 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
sdev->queue_depth+1);
+ rdata = sdev->hostdata;
+ if (rdata)
+ lpfc_send_sdev_queuedepth_change_event(
+ phba, vports[i],
+ rdata->pnode,
+ sdev->lun,
+ sdev->queue_depth - 1,
+ sdev->queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -183,6 +307,35 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
atomic_set(&phba->num_cmd_success, 0);
}
+/**
+ * lpfc_scsi_dev_block: set all scsi hosts to block state.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function walks vport list and set each SCSI host to block state
+ * by invoking fc_remote_port_delete() routine. This function is invoked
+ * with EEH when device's PCI slot has been permanently disabled.
+ **/
+void
+lpfc_scsi_dev_block(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct fc_rport *rport;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ shost_for_each_device(sdev, shost) {
+ rport = starget_to_rport(scsi_target(sdev));
+ fc_remote_port_delete(rport);
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
/*
* This routine allocates a scsi buffer, which contains all the necessary
* information needed to initiate a SCSI I/O. The non-DMAable buffer region
@@ -198,7 +351,9 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
struct lpfc_scsi_buf *psb;
struct ulp_bde64 *bpl;
IOCB_t *iocb;
- dma_addr_t pdma_phys;
+ dma_addr_t pdma_phys_fcp_cmd;
+ dma_addr_t pdma_phys_fcp_rsp;
+ dma_addr_t pdma_phys_bpl;
uint16_t iotag;
psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -238,40 +393,60 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
/* Initialize local short-hand pointers. */
bpl = psb->fcp_bpl;
- pdma_phys = psb->dma_handle;
+ pdma_phys_fcp_cmd = psb->dma_handle;
+ pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
+ pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp);
/*
* The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
* list bdes. Initialize the first two and leave the rest for
* queuecommand.
*/
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
- bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
- bpl->tus.f.bdeFlags = BUFF_USE_CMND;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
- bpl++;
+ bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
+ bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
+ bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
+ bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
/* Setup the physical region for the FCP RSP */
- pdma_phys += sizeof (struct fcp_cmnd);
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
- bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
- bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
+ bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
+ bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
+ bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
/*
* Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
* initialize it with all known data now.
*/
- pdma_phys += (sizeof (struct fcp_rsp));
iocb = &psb->cur_iocbq.iocb;
iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
- iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
- iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
- iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
- iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
- iocb->ulpBdeCount = 1;
+ if (phba->sli_rev == 3) {
+ /* fill in immediate fcp command BDE */
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+ iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+ iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
+ unsli3.fcp_ext.icd);
+ iocb->un.fcpi64.bdl.addrHigh = 0;
+ iocb->ulpBdeCount = 0;
+ iocb->ulpLe = 0;
+ /* fill in responce BDE */
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
+ sizeof(struct fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrLow =
+ putPaddrLow(pdma_phys_fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrHigh =
+ putPaddrHigh(pdma_phys_fcp_rsp);
+ } else {
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
+ iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
+ iocb->ulpBdeCount = 1;
+ iocb->ulpLe = 1;
+ }
iocb->ulpClass = CLASS3;
return psb;
@@ -313,8 +488,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dma_addr_t physaddr;
- uint32_t i, num_bde = 0;
+ uint32_t num_bde = 0;
int nseg, datadir = scsi_cmnd->sc_data_direction;
/*
@@ -352,37 +528,159 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* during probe that limits the number of sg elements in any
* single scsi command. Just run through the seg_cnt and format
* the bde's.
+ * When using SLI-3 the driver will try to fit all the BDEs into
+ * the IOCB. If it can't then the BDEs get added to a BPL as it
+ * does for SLI-2 mode.
*/
- scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
+ scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
physaddr = sg_dma_address(sgel);
- bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
- if (datadir == DMA_TO_DEVICE)
- bpl->tus.f.bdeFlags = 0;
- else
- bpl->tus.f.bdeFlags = BUFF_USE_RCV;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
- bpl++;
- num_bde++;
+ if (phba->sli_rev == 3 &&
+ nseg <= LPFC_EXT_DATA_BDE_COUNT) {
+ data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ data_bde->tus.f.bdeSize = sg_dma_len(sgel);
+ data_bde->addrLow = putPaddrLow(physaddr);
+ data_bde->addrHigh = putPaddrHigh(physaddr);
+ data_bde++;
+ } else {
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl->addrLow =
+ le32_to_cpu(putPaddrLow(physaddr));
+ bpl->addrHigh =
+ le32_to_cpu(putPaddrHigh(physaddr));
+ bpl++;
+ }
}
}
/*
* Finish initializing those IOCB fields that are dependent on the
- * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
- * reinitialized since all iocb memory resources are used many times
- * for transmit, receive, and continuation bpl's.
+ * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
+ * explicitly reinitialized and for SLI-3 the extended bde count is
+ * explicitly reinitialized since all iocb memory resources are reused.
*/
- iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
- iocb_cmd->un.fcpi64.bdl.bdeSize +=
- (num_bde * sizeof (struct ulp_bde64));
- iocb_cmd->ulpBdeCount = 1;
- iocb_cmd->ulpLe = 1;
+ if (phba->sli_rev == 3) {
+ if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
+ /*
+ * The extended IOCB format can only fit 3 BDE or a BPL.
+ * This I/O has more than 3 BDE so the 1st data bde will
+ * be a BPL that is filled in here.
+ */
+ physaddr = lpfc_cmd->dma_handle;
+ data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+ data_bde->tus.f.bdeSize = (num_bde *
+ sizeof(struct ulp_bde64));
+ physaddr += (sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ (2 * sizeof(struct ulp_bde64)));
+ data_bde->addrHigh = putPaddrHigh(physaddr);
+ data_bde->addrLow = putPaddrLow(physaddr);
+ /* ebde count includes the responce bde and data bpl */
+ iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
+ } else {
+ /* ebde count includes the responce bde and data bdes */
+ iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
+ }
+ } else {
+ iocb_cmd->un.fcpi64.bdl.bdeSize =
+ ((num_bde + 2) * sizeof(struct ulp_bde64));
+ }
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
return 0;
}
+/**
+ * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
+ * @phba: Pointer to hba context object.
+ * @vport: Pointer to vport object.
+ * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
+ * @rsp_iocb: Pointer to response iocb object which reported error.
+ *
+ * This function posts an event when there is a SCSI command reporting
+ * error from the scsi device.
+ **/
+static void
+lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+ struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+ uint32_t resp_info = fcprsp->rspStatus2;
+ uint32_t scsi_status = fcprsp->rspStatus3;
+ uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+ struct lpfc_fast_path_event *fast_path_evt = NULL;
+ struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
+ unsigned long flags;
+
+ /* If there is queuefull or busy condition send a scsi event */
+ if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
+ (cmnd->result == SAM_STAT_BUSY)) {
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.scsi_evt.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.scsi_evt.subcategory =
+ (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
+ LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
+ fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
+ memcpy(&fast_path_evt->un.scsi_evt.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.scsi_evt.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
+ ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.check_cond_evt.scsi_event.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
+ LPFC_EVENT_CHECK_COND;
+ fast_path_evt->un.check_cond_evt.scsi_event.lun =
+ cmnd->device->lun;
+ memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ fast_path_evt->un.check_cond_evt.sense_key =
+ cmnd->sense_buffer[2] & 0xf;
+ fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
+ fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
+ } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+ fcpi_parm &&
+ ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
+ ((scsi_status == SAM_STAT_GOOD) &&
+ !(resp_info & (RESID_UNDER | RESID_OVER))))) {
+ /*
+ * If status is good or resid does not match with fcp_param and
+ * there is valid fcpi_parm, then there is a read_check error
+ */
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.read_check_error.header.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.read_check_error.header.subcategory =
+ LPFC_EVENT_FCPRDCHKERR;
+ memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
+ fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
+ fast_path_evt->un.read_check_error.fcpiparam =
+ fcpi_parm;
+ } else
+ return;
+
+ fast_path_evt->vport = vport;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+ return;
+}
static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
{
@@ -411,6 +709,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
uint32_t rsplen = 0;
uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
+
/*
* If this is a task management command, there is no
* scsi packet associated with this lpfc_cmd. The driver
@@ -526,6 +825,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
out:
cmnd->result = ScsiResult(host_status, scsi_status);
+ lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}
static void
@@ -542,9 +842,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
unsigned long flags;
+ struct lpfc_fast_path_event *fast_path_evt;
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+ atomic_dec(&pnode->cmd_pending);
if (lpfc_cmd->status) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -570,12 +872,36 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
- cmd->result = ScsiResult(DID_BUS_BUSY, 0);
+ cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ break;
+ fast_path_evt->un.fabric_evt.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.fabric_evt.subcategory =
+ (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
+ LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
+ if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ memcpy(&fast_path_evt->un.fabric_evt.wwpn,
+ &pnode->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.fabric_evt.wwnn,
+ &pnode->nlp_nodename,
+ sizeof(struct lpfc_name));
+ }
+ fast_path_evt->vport = vport;
+ fast_path_evt->work_evt.evt =
+ LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp,
+ &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
break;
case IOSTAT_LOCAL_REJECT:
- if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
+ if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
- lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
+ lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
} /* else: fall through */
@@ -586,7 +912,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
- cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
+ cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
+ SAM_STAT_BUSY);
} else {
cmd->result = ScsiResult(DID_OK, 0);
}
@@ -602,8 +929,32 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
+ lpfc_update_stats(phba, lpfc_cmd);
result = cmd->result;
sdev = cmd->device;
+ if (vport->cfg_max_scsicmpl_time &&
+ time_after(jiffies, lpfc_cmd->start_time +
+ msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
+ (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
+ ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
+ pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
+
+ pnode->last_change_time = jiffies;
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
+ time_after(jiffies, pnode->last_change_time +
+ msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ pnode->cmd_qdepth += pnode->cmd_qdepth *
+ LPFC_TGTQ_RAMPUP_PCENT / 100;
+ if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
+ pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+ pnode->last_change_time = jiffies;
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ }
+
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
cmd->scsi_done(cmd);
@@ -647,6 +998,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
pnode->last_ramp_up_time = jiffies;
}
}
+ lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
+ 0xFFFFFFFF,
+ sdev->queue_depth - 1, sdev->queue_depth);
}
/*
@@ -676,6 +1030,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0711 detected queue full - lun queue "
"depth adjusted to %d.\n", depth);
+ lpfc_send_sdev_queuedepth_change_event(phba, vport,
+ pnode, 0xFFFFFFFF,
+ depth+1, depth);
}
}
@@ -692,6 +1049,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
+/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+ int i, j;
+ for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+ i += sizeof(uint32_t), j++) {
+ ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+ }
+}
+
static void
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
@@ -758,7 +1133,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcp_cmnd->fcpCntl3 = 0;
phba->fc4ControlRequests++;
}
-
+ if (phba->sli_rev == 3)
+ lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
@@ -798,11 +1174,13 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
piocb = &piocbq->iocb;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
- int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
+ /* Clear out any old data in the FCP command area */
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
-
+ if (vport->phba->sli_rev == 3)
+ lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
-
piocb->ulpContext = ndlp->nlp_rpi;
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
piocb->ulpFCP2Rcvy = 1;
@@ -967,9 +1345,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
* transport is still transitioning.
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
+ cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
goto out_fail_command;
}
+ if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
+ goto out_host_busy;
+
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL) {
lpfc_adjust_queue_depth(phba);
@@ -980,6 +1361,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
goto out_host_busy;
}
+ lpfc_cmd->start_time = jiffies;
/*
* Store the midlayer's command structure for the completion phase
* and complete the command initialization.
@@ -987,6 +1369,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->timeout = 0;
+ lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
cmnd->scsi_done = done;
@@ -996,6 +1379,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+ atomic_inc(&ndlp->cmd_pending);
err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err)
@@ -1010,6 +1394,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
out_host_busy_free_buf:
+ atomic_dec(&ndlp->cmd_pending);
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
@@ -1145,6 +1530,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int ret = SUCCESS;
int status;
int cnt;
+ struct lpfc_scsi_event_header scsi_event;
lpfc_block_error_handler(cmnd);
/*
@@ -1163,6 +1549,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
break;
pnode = rdata->pnode;
}
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_TGTRESET;
+ scsi_event.lun = 0;
+ memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(scsi_event),
+ (char *)&scsi_event,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0721 LUN Reset rport "
@@ -1242,10 +1641,23 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
int match;
- int ret = SUCCESS, status, i;
+ int ret = SUCCESS, status = SUCCESS, i;
int cnt;
struct lpfc_scsi_buf * lpfc_cmd;
unsigned long later;
+ struct lpfc_scsi_event_header scsi_event;
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_BUSRESET;
+ scsi_event.lun = 0;
+ memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(scsi_event),
+ (char *)&scsi_event,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
lpfc_block_error_handler(cmnd);
/*