diff options
Diffstat (limited to 'drivers/scsi')
103 files changed, 2545 insertions, 1099 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index e9559782d3e..74bf1aa7af4 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -263,23 +263,6 @@ config SCSI_SCAN_ASYNC You can override this choice by specifying "scsi_mod.scan=sync" or async on the kernel's command line. -config SCSI_WAIT_SCAN - tristate # No prompt here, this is an invisible symbol. - default m - depends on SCSI - depends on MODULES -# scsi_wait_scan is a loadable module which waits until all the async scans are -# complete. The idea is to use it in initrd/ initramfs scripts. You modprobe -# it after all the modprobes of the root SCSI drivers and it will wait until -# they have all finished scanning their buses before allowing the boot to -# proceed. (This method is not applicable if targets boot independently in -# parallel with the initiator, or with transports with non-deterministic target -# discovery schemes, or if a transport driver does not support scsi_wait_scan.) -# -# This symbol is not exposed as a prompt because little is to be gained by -# disabling it, whereas people who accidentally switch it off may wonder why -# their mkinitrd gets into trouble. - menu "SCSI Transports" depends on SCSI @@ -461,7 +444,7 @@ config SCSI_ACARD config SCSI_AHA152X tristate "Adaptec AHA152X/2825 support" - depends on ISA && SCSI && !64BIT + depends on ISA && SCSI select SCSI_SPI_ATTRS select CHECK_SIGNATURE ---help--- diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 1a3368b0861..888f73a4aae 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -159,8 +159,6 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/ # This goes last, so that "real" scsi devices probe earlier obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o -obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o - scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ scsicam.o scsi_error.o scsi_lib.o scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 52551662d10..d79457ac8be 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -135,6 +135,8 @@ struct inquiry_data { static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap); static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg); static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg); +static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max); +static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new); static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); #ifdef AAC_DETAILED_STATUS_INFO static char *aac_get_status_string(u32 status); @@ -152,10 +154,14 @@ int aac_commit = -1; int startup_timeout = 180; int aif_timeout = 120; int aac_sync_mode; /* Only Sync. transfer - disabled */ +int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */ module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode" " 0=off, 1=on"); +module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list" + " 0=off, 1=on"); module_param(nondasd, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." " 0=off, 1=on"); @@ -963,25 +969,44 @@ static void io_callback(void *context, struct fib * fibptr); static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) { - u16 fibsize; - struct aac_raw_io *readcmd; + struct aac_dev *dev = fib->dev; + u16 fibsize, command; + aac_fib_init(fib); - readcmd = (struct aac_raw_io *) fib_data(fib); - readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); - readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - readcmd->count = cpu_to_le32(count<<9); - readcmd->cid = cpu_to_le16(scmd_id(cmd)); - readcmd->flags = cpu_to_le16(IO_TYPE_READ); - readcmd->bpTotal = 0; - readcmd->bpComplete = 0; + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) { + struct aac_raw_io2 *readcmd2; + readcmd2 = (struct aac_raw_io2 *) fib_data(fib); + memset(readcmd2, 0, sizeof(struct aac_raw_io2)); + readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + readcmd2->byteCount = cpu_to_le32(count<<9); + readcmd2->cid = cpu_to_le16(scmd_id(cmd)); + readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ); + aac_build_sgraw2(cmd, readcmd2, dev->scsi_host_ptr->sg_tablesize); + command = ContainerRawIo2; + fibsize = sizeof(struct aac_raw_io2) + + ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212)); + } else { + struct aac_raw_io *readcmd; + readcmd = (struct aac_raw_io *) fib_data(fib); + readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + readcmd->count = cpu_to_le32(count<<9); + readcmd->cid = cpu_to_le16(scmd_id(cmd)); + readcmd->flags = cpu_to_le16(RIO_TYPE_READ); + readcmd->bpTotal = 0; + readcmd->bpComplete = 0; + aac_build_sgraw(cmd, &readcmd->sg); + command = ContainerRawIo; + fibsize = sizeof(struct aac_raw_io) + + ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw)); + } - aac_build_sgraw(cmd, &readcmd->sg); - fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw)); BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ - return aac_fib_send(ContainerRawIo, + return aac_fib_send(command, fib, fibsize, FsaNormal, @@ -1052,28 +1077,50 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) { - u16 fibsize; - struct aac_raw_io *writecmd; + struct aac_dev *dev = fib->dev; + u16 fibsize, command; + aac_fib_init(fib); - writecmd = (struct aac_raw_io *) fib_data(fib); - writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); - writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - writecmd->count = cpu_to_le32(count<<9); - writecmd->cid = cpu_to_le16(scmd_id(cmd)); - writecmd->flags = (fua && ((aac_cache & 5) != 1) && - (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? - cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) : - cpu_to_le16(IO_TYPE_WRITE); - writecmd->bpTotal = 0; - writecmd->bpComplete = 0; - - aac_build_sgraw(cmd, &writecmd->sg); - fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw)); + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) { + struct aac_raw_io2 *writecmd2; + writecmd2 = (struct aac_raw_io2 *) fib_data(fib); + memset(writecmd2, 0, sizeof(struct aac_raw_io2)); + writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + writecmd2->byteCount = cpu_to_le32(count<<9); + writecmd2->cid = cpu_to_le16(scmd_id(cmd)); + writecmd2->flags = (fua && ((aac_cache & 5) != 1) && + (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? + cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) : + cpu_to_le16(RIO2_IO_TYPE_WRITE); + aac_build_sgraw2(cmd, writecmd2, dev->scsi_host_ptr->sg_tablesize); + command = ContainerRawIo2; + fibsize = sizeof(struct aac_raw_io2) + + ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212)); + } else { + struct aac_raw_io *writecmd; + writecmd = (struct aac_raw_io *) fib_data(fib); + writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + writecmd->count = cpu_to_le32(count<<9); + writecmd->cid = cpu_to_le16(scmd_id(cmd)); + writecmd->flags = (fua && ((aac_cache & 5) != 1) && + (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? + cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) : + cpu_to_le16(RIO_TYPE_WRITE); + writecmd->bpTotal = 0; + writecmd->bpComplete = 0; + aac_build_sgraw(cmd, &writecmd->sg); + command = ContainerRawIo; + fibsize = sizeof(struct aac_raw_io) + + ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw)); + } + BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ - return aac_fib_send(ContainerRawIo, + return aac_fib_send(command, fib, fibsize, FsaNormal, @@ -1492,8 +1539,6 @@ int aac_get_adapter_info(struct aac_dev* dev) dev->a_ops.adapter_write = aac_write_block; } dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; - if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1) - dev->adapter_info.options |= AAC_OPT_NEW_COMM; if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { /* * Worst case size that could cause sg overflow when @@ -2616,12 +2661,18 @@ static void aac_srb_callback(void *context, struct fib * fibptr) srbreply = (struct aac_srb_reply *) fib_data(fibptr); scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ - /* - * Calculate resid for sg - */ - scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) - - le32_to_cpu(srbreply->data_xfer_length)); + if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { + /* fast response */ + srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS); + srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD); + } else { + /* + * Calculate resid for sg + */ + scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) + - le32_to_cpu(srbreply->data_xfer_length)); + } scsi_dma_unmap(scsicmd); @@ -2954,6 +3005,118 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* return byte_count; } +static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max) +{ + unsigned long byte_count = 0; + int nseg; + + nseg = scsi_dma_map(scsicmd); + BUG_ON(nseg < 0); + if (nseg) { + struct scatterlist *sg; + int i, conformable = 0; + u32 min_size = PAGE_SIZE, cur_size; + + scsi_for_each_sg(scsicmd, sg, nseg, i) { + int count = sg_dma_len(sg); + u64 addr = sg_dma_address(sg); + + BUG_ON(i >= sg_max); + rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32)); + rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff)); + cur_size = cpu_to_le32(count); + rio2->sge[i].length = cur_size; + rio2->sge[i].flags = 0; + if (i == 0) { + conformable = 1; + rio2->sgeFirstSize = cur_size; + } else if (i == 1) { + rio2->sgeNominalSize = cur_size; + min_size = cur_size; + } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) { + conformable = 0; + if (cur_size < min_size) + min_size = cur_size; + } + byte_count += count; + } + + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp = le32_to_cpu(rio2->sge[i-1].length) - + (byte_count - scsi_bufflen(scsicmd)); + rio2->sge[i-1].length = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + + rio2->sgeCnt = cpu_to_le32(nseg); + rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212); + /* not conformable: evaluate required sg elements */ + if (!conformable) { + int j, nseg_new = nseg, err_found; + for (i = min_size / PAGE_SIZE; i >= 1; --i) { + err_found = 0; + nseg_new = 2; + for (j = 1; j < nseg - 1; ++j) { + if (rio2->sge[j].length % (i*PAGE_SIZE)) { + err_found = 1; + break; + } + nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE)); + } + if (!err_found) + break; + } + if (i > 0 && nseg_new <= sg_max) + aac_convert_sgraw2(rio2, i, nseg, nseg_new); + } else + rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); + + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } + } + + return byte_count; +} + +static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new) +{ + struct sge_ieee1212 *sge; + int i, j, pos; + u32 addr_low; + + if (aac_convert_sgl == 0) + return 0; + + sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC); + if (sge == NULL) + return -1; + + for (i = 1, pos = 1; i < nseg-1; ++i) { + for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) { + addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE; + sge[pos].addrLow = addr_low; + sge[pos].addrHigh = rio2->sge[i].addrHigh; + if (addr_low < rio2->sge[i].addrLow) + sge[pos].addrHigh++; + sge[pos].length = pages * PAGE_SIZE; + sge[pos].flags = 0; + pos++; + } + } + sge[pos] = rio2->sge[nseg-1]; + memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212)); + + kfree(sge); + rio2->sgeCnt = cpu_to_le32(nseg_new); + rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); + rio2->sgeNominalSize = pages * PAGE_SIZE; + return 0; +} + #ifdef AAC_DETAILED_STATUS_INFO struct aac_srb_status_info { diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 3fcf62724fa..9e933a88a8b 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -12,7 +12,7 @@ *----------------------------------------------------------------------------*/ #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 28900 +# define AAC_DRIVER_BUILD 29800 # define AAC_DRIVER_BRANCH "-ms" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -100,6 +100,13 @@ struct user_sgentryraw { u32 flags; /* reserved for F/W use */ }; +struct sge_ieee1212 { + u32 addrLow; + u32 addrHigh; + u32 length; + u32 flags; +}; + /* * SGMAP * @@ -270,6 +277,8 @@ enum aac_queue_types { */ #define FIB_MAGIC 0x0001 +#define FIB_MAGIC2 0x0004 +#define FIB_MAGIC2_64 0x0005 /* * Define the priority levels the FSA communication routines support. @@ -296,22 +305,20 @@ struct aac_fibhdr { __le32 XferState; /* Current transfer state for this CCB */ __le16 Command; /* Routing information for the destination */ u8 StructType; /* Type FIB */ - u8 Flags; /* Flags for FIB */ + u8 Unused; /* Unused */ __le16 Size; /* Size of this FIB in bytes */ __le16 SenderSize; /* Size of the FIB in the sender (for response sizing) */ __le32 SenderFibAddress; /* Host defined data in the FIB */ - __le32 ReceiverFibAddress;/* Logical address of this FIB for - the adapter */ - u32 SenderData; /* Place holder for the sender to store data */ union { - struct { - __le32 _ReceiverTimeStart; /* Timestamp for - receipt of fib */ - __le32 _ReceiverTimeDone; /* Timestamp for - completion of fib */ - } _s; - } _u; + __le32 ReceiverFibAddress;/* Logical address of this FIB for + the adapter (old) */ + __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */ + __le32 TimeStamp; /* otherwise timestamp for FW internal use */ + } u; + u32 Handle; /* FIB handle used for MSGU commnunication */ + u32 Previous; /* FW internal use */ + u32 Next; /* FW internal use */ }; struct hw_fib { @@ -361,6 +368,7 @@ struct hw_fib { #define ContainerCommand 500 #define ContainerCommand64 501 #define ContainerRawIo 502 +#define ContainerRawIo2 503 /* * Scsi Port commands (scsi passthrough) */ @@ -417,6 +425,7 @@ enum fib_xfer_state { #define ADAPTER_INIT_STRUCT_REVISION 3 #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science #define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ +#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */ struct aac_init { @@ -441,7 +450,9 @@ struct aac_init #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 -#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041 +#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040 +#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080 +#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100 __le32 MaxIoCommands; /* max outstanding commands */ __le32 MaxIoSize; /* largest I/O command */ __le32 MaxFibSize; /* largest FIB to adapter */ @@ -1052,10 +1063,11 @@ struct aac_dev struct adapter_ops a_ops; unsigned long fsrev; /* Main driver's revision number */ - unsigned long dbg_base; /* address of UART + resource_size_t base_start; /* main IO base */ + resource_size_t dbg_base; /* address of UART * debug buffer */ - unsigned base_size, dbg_size; /* Size of + resource_size_t base_size, dbg_size; /* Size of * mapped in region */ struct aac_init *init; /* Holds initialization info to communicate with adapter */ @@ -1123,6 +1135,7 @@ struct aac_dev # define AAC_COMM_PRODUCER 0 # define AAC_COMM_MESSAGE 1 # define AAC_COMM_MESSAGE_TYPE1 3 +# define AAC_COMM_MESSAGE_TYPE2 4 u8 raw_io_interface; u8 raw_io_64; u8 printf_enabled; @@ -1181,6 +1194,7 @@ struct aac_dev #define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) #define FIB_CONTEXT_FLAG (0x00000002) #define FIB_CONTEXT_FLAG_WAIT (0x00000004) +#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008) /* * Define the command values @@ -1287,6 +1301,22 @@ struct aac_dev #define CMDATA_SYNCH 4 #define CMUNSTABLE 5 +#define RIO_TYPE_WRITE 0x0000 +#define RIO_TYPE_READ 0x0001 +#define RIO_SUREWRITE 0x0008 + +#define RIO2_IO_TYPE 0x0003 +#define RIO2_IO_TYPE_WRITE 0x0000 +#define RIO2_IO_TYPE_READ 0x0001 +#define RIO2_IO_TYPE_VERIFY 0x0002 +#define RIO2_IO_ERROR 0x0004 +#define RIO2_IO_SUREWRITE 0x0008 +#define RIO2_SGL_CONFORMANT 0x0010 +#define RIO2_SG_FORMAT 0xF000 +#define RIO2_SG_FORMAT_ARC 0x0000 +#define RIO2_SG_FORMAT_SRL 0x1000 +#define RIO2_SG_FORMAT_IEEE1212 0x2000 + struct aac_read { __le32 command; @@ -1331,9 +1361,6 @@ struct aac_write64 __le32 block; __le16 pad; __le16 flags; -#define IO_TYPE_WRITE 0x00000000 -#define IO_TYPE_READ 0x00000001 -#define IO_SUREWRITE 0x00000008 struct sgmap64 sg; // Must be last in struct because it is variable }; struct aac_write_reply @@ -1354,6 +1381,22 @@ struct aac_raw_io struct sgmapraw sg; }; +struct aac_raw_io2 { + __le32 blockLow; + __le32 blockHigh; + __le32 byteCount; + __le16 cid; + __le16 flags; /* RIO2 flags */ + __le32 sgeFirstSize; /* size of first sge el. */ + __le32 sgeNominalSize; /* size of 2nd sge el. (if conformant) */ + u8 sgeCnt; /* only 8 bits required */ + u8 bpTotal; /* reserved for F/W use */ + u8 bpComplete; /* reserved for F/W use */ + u8 sgeFirstIndex; /* reserved for F/W use */ + u8 unused[4]; + struct sge_ieee1212 sge[1]; +}; + #define CT_FLUSH_CACHE 129 struct aac_synchronize { __le32 command; /* VM_ContainerConfig */ diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 0bd38da4ada..1ef041bc60c 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -498,6 +498,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) return -ENOMEM; } aac_fib_init(srbfib); + /* raw_srb FIB is not FastResponseCapable */ + srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable); srbcmd = (struct aac_srb*) fib_data(srbfib); diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index a35f54ebdce..8e5d3be1612 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -58,7 +58,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co dma_addr_t phys; unsigned long aac_max_hostphysmempages; - if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) host_rrq_size = (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) * sizeof(u32); size = fibsize + sizeof(struct aac_init) + commsize + @@ -75,7 +76,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co dev->comm_phys = phys; dev->comm_size = size; - if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { dev->host_rrq = (u32 *)(base + fibsize); dev->host_rrq_pa = phys + fibsize; memset(dev->host_rrq, 0, host_rrq_size); @@ -115,26 +117,32 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co else init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); - init->InitFlags = 0; + init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | + INITFLAGS_DRIVER_SUPPORTS_PM); + init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); + init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); + init->MaxFibSize = cpu_to_le32(dev->max_fib_size); + init->MaxNumAif = cpu_to_le32(dev->max_num_aif); + if (dev->comm_interface == AAC_COMM_MESSAGE) { init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); - init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED); - dprintk((KERN_WARNING - "aacraid: New Comm Interface type1 enabled\n")); + init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | + INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); + init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32)); + init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff)); + dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n")); + } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { + init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7); + init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | + INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); + init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32)); + init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff)); + init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */ + dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n")); } - init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | - INITFLAGS_DRIVER_SUPPORTS_PM); - init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); - init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); - init->MaxFibSize = cpu_to_le32(dev->max_fib_size); - - init->MaxNumAif = cpu_to_le32(dev->max_num_aif); - init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32); - init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff); - /* * Increment the base address by the amount already used @@ -354,13 +362,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) { /* driver supports TYPE1 (Tupelo) */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; + } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) { + /* driver supports TYPE2 (Denali) */ + dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) || - (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3)) || - (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) { - /* driver doesn't support TYPE2 (Series7), TYPE3 and TYPE4 */ - /* switch to sync. mode */ - dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; - dev->sync_mode = 1; + (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) { + /* driver doesn't TYPE3 and TYPE4 */ + /* switch to sync. mode */ + dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; + dev->sync_mode = 1; } } if ((dev->comm_interface == AAC_COMM_MESSAGE) && diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 4b32ca44243..1be0776a80c 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -136,6 +136,7 @@ int aac_fib_setup(struct aac_dev * dev) i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) { + fibptr->flags = 0; fibptr->dev = dev; fibptr->hw_fib_va = hw_fib; fibptr->data = (void *) fibptr->hw_fib_va->data; @@ -240,11 +241,11 @@ void aac_fib_init(struct fib *fibptr) { struct hw_fib *hw_fib = fibptr->hw_fib_va; + memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr)); hw_fib->header.StructType = FIB_MAGIC; hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); - hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */ - hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); + hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); } @@ -259,7 +260,6 @@ void aac_fib_init(struct fib *fibptr) static void fib_dealloc(struct fib * fibptr) { struct hw_fib *hw_fib = fibptr->hw_fib_va; - BUG_ON(hw_fib->header.StructType != FIB_MAGIC); hw_fib->header.XferState = 0; } @@ -370,7 +370,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); entry->addr = hw_fib->header.SenderFibAddress; /* Restore adapters pointer to the FIB */ - hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ + hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ map = 0; } /* @@ -450,7 +450,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, */ hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); - hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); + hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1; /* * Set FIB state to indicate where it came from and if we want a * response from the adapter. Also load the command from the @@ -460,7 +460,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, */ hw_fib->header.Command = cpu_to_le16(command); hw_fib->header.XferState |= cpu_to_le32(SentFromHost); - fibptr->hw_fib_va->header.Flags = 0; /* 0 the flags field - internal only*/ /* * Set the size of the Fib we want to send to the adapter */ @@ -564,10 +563,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, * functioning because an interrupt routing or other * hardware failure has occurred. */ - unsigned long count = 36000000L; /* 3 minutes */ + unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */ while (down_trylock(&fibptr->event_wait)) { int blink; - if (--count == 0) { + if (time_is_before_eq_jiffies(timeout)) { struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; spin_lock_irqsave(q->lock, qflags); q->numpending--; @@ -588,7 +587,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, } return -EFAULT; } - udelay(5); + /* We used to udelay() here but that absorbed + * a CPU when a timeout occured. Not very + * useful. */ + cpu_relax(); } } else if (down_interruptible(&fibptr->event_wait)) { /* Do nothing ... satisfy @@ -708,7 +710,8 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) unsigned long nointr = 0; unsigned long qflags; - if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { kfree(hw_fib); return 0; } @@ -721,7 +724,9 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) /* * If we plan to do anything check the structure type first. */ - if (hw_fib->header.StructType != FIB_MAGIC) { + if (hw_fib->header.StructType != FIB_MAGIC && + hw_fib->header.StructType != FIB_MAGIC2 && + hw_fib->header.StructType != FIB_MAGIC2_64) { if (dev->comm_interface == AAC_COMM_MESSAGE) kfree(hw_fib); return -EINVAL; @@ -783,7 +788,9 @@ int aac_fib_complete(struct fib *fibptr) * If we plan to do anything check the structure type first. */ - if (hw_fib->header.StructType != FIB_MAGIC) + if (hw_fib->header.StructType != FIB_MAGIC && + hw_fib->header.StructType != FIB_MAGIC2 && + hw_fib->header.StructType != FIB_MAGIC2_64) return -EINVAL; /* * This block completes a cdb which orginated on the host and we diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index f0c66a80ad1..d81b2810f0f 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -101,6 +101,7 @@ unsigned int aac_response_normal(struct aac_queue * q) */ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); + fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; } FIB_COUNTER_INCREMENT(aac_config.FibRecved); @@ -121,7 +122,7 @@ unsigned int aac_response_normal(struct aac_queue * q) * NOTE: we cannot touch the fib after this * call, because it may have been deallocated. */ - fib->flags = 0; + fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; fib->callback(fib->callback_data, fib); } else { unsigned long flagv; @@ -367,6 +368,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, */ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); + fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; } FIB_COUNTER_INCREMENT(aac_config.FibRecved); @@ -387,7 +389,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, * NOTE: we cannot touch the fib after this * call, because it may have been deallocated. */ - fib->flags = 0; + fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; fib->callback(fib->callback_data, fib); } else { unsigned long flagv; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 0d279c445a3..7199534cd07 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1089,8 +1089,17 @@ static struct scsi_host_template aac_driver_template = { static void __aac_shutdown(struct aac_dev * aac) { - if (aac->aif_thread) + if (aac->aif_thread) { + int i; + /* Clear out events first */ + for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) { + struct fib *fib = &aac->fibs[i]; + if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) && + (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) + up(&fib->event_wait); + } kthread_stop(aac->thread); + } aac_send_shutdown(aac); aac_adapter_disable_int(aac); free_irq(aac->pdev->irq, aac); @@ -1145,11 +1154,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, goto out_disable_pdev; shost->irq = pdev->irq; - shost->base = pci_resource_start(pdev, 0); shost->unique_id = unique_id; shost->max_cmd_len = 16; aac = (struct aac_dev *)shost->hostdata; + aac->base_start = pci_resource_start(pdev, 0); aac->scsi_host_ptr = shost; aac->pdev = pdev; aac->name = aac_driver_template.name; @@ -1157,7 +1166,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, aac->cardtype = index; INIT_LIST_HEAD(&aac->entry); - aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL); + aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL); if (!aac->fibs) goto out_free_host; spin_lock_init(&aac->fib_lock); @@ -1191,6 +1200,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, if (IS_ERR(aac->thread)) { printk(KERN_ERR "aacraid: Unable to create command thread.\n"); error = PTR_ERR(aac->thread); + aac->thread = NULL; goto out_deinit; } diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c index f397d21a0c0..6c53b1d8b2b 100644 --- a/drivers/scsi/aacraid/nark.c +++ b/drivers/scsi/aacraid/nark.c @@ -49,14 +49,14 @@ static int aac_nark_ioremap(struct aac_dev * dev, u32 size) dev->base = NULL; return 0; } - dev->scsi_host_ptr->base = pci_resource_start(dev->pdev, 2); + dev->base_start = pci_resource_start(dev->pdev, 2); dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) | ((u64)pci_resource_start(dev->pdev, 1) << 32), sizeof(struct rx_registers) - sizeof(struct rx_inbound)); dev->base = NULL; if (dev->regs.rx == NULL) return -1; - dev->base = ioremap(dev->scsi_host_ptr->base, size); + dev->base = ioremap(dev->base_start, size); if (dev->base == NULL) { iounmap(dev->regs.rx); dev->regs.rx = NULL; diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index be44de92429..7d8013feedd 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c @@ -79,7 +79,7 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size) iounmap(dev->regs.rkt); return 0; } - dev->base = dev->regs.rkt = ioremap(dev->scsi_host_ptr->base, size); + dev->base = dev->regs.rkt = ioremap(dev->base_start, size); if (dev->base == NULL) return -1; dev->IndexRegs = &dev->regs.rkt->IndexRegs; diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index b029c7cc785..dada38aeacc 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -471,7 +471,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size) iounmap(dev->regs.rx); return 0; } - dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size); + dev->base = dev->regs.rx = ioremap(dev->base_start, size); if (dev->base == NULL) return -1; dev->IndexRegs = &dev->regs.rx->IndexRegs; @@ -653,7 +653,7 @@ int _aac_rx_init(struct aac_dev *dev) name, instance); goto error_iounmap; } - dev->dbg_base = dev->scsi_host_ptr->base; + dev->dbg_base = dev->base_start; dev->dbg_base_mapped = dev->base; dev->dbg_size = dev->base_size; diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index beb533630d4..2244f315f33 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c @@ -305,7 +305,7 @@ static int aac_sa_ioremap(struct aac_dev * dev, u32 size) iounmap(dev->regs.sa); return 0; } - dev->base = dev->regs.sa = ioremap(dev->scsi_host_ptr->base, size); + dev->base = dev->regs.sa = ioremap(dev->base_start, size); return (dev->base == NULL) ? -1 : 0; } @@ -393,7 +393,7 @@ int aac_sa_init(struct aac_dev *dev) name, instance); goto error_iounmap; } - dev->dbg_base = dev->scsi_host_ptr->base; + dev->dbg_base = dev->base_start; dev->dbg_base_mapped = dev->base; dev->dbg_size = dev->base_size; diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 76282063630..3b021ec6325 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -56,25 +56,14 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) if (bellbits & PmDoorBellResponseSent) { bellbits = PmDoorBellResponseSent; /* handle async. status */ + src_writel(dev, MUnit.ODR_C, bellbits); + src_readl(dev, MUnit.ODR_C); our_interrupt = 1; index = dev->host_rrq_idx; - if (dev->host_rrq[index] == 0) { - u32 old_index = index; - /* adjust index */ - do { - index++; - if (index == dev->scsi_host_ptr->can_queue + - AAC_NUM_MGT_FIB) - index = 0; - if (dev->host_rrq[index] != 0) - break; - } while (index != old_index); - dev->host_rrq_idx = index; - } for (;;) { isFastResponse = 0; /* remove toggle bit (31) */ - handle = (dev->host_rrq[index] & 0x7fffffff); + handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff; /* check fast response bit (30) */ if (handle & 0x40000000) isFastResponse = 1; @@ -93,6 +82,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) } else { bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); if (bellbits_shifted & DoorBellAifPending) { + src_writel(dev, MUnit.ODR_C, bellbits); + src_readl(dev, MUnit.ODR_C); our_interrupt = 1; /* handle AIF */ aac_intr_normal(dev, 0, 2, 0, NULL); @@ -100,6 +91,13 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) unsigned long sflags; struct list_head *entry; int send_it = 0; + extern int aac_sync_mode; + + if (!aac_sync_mode) { + src_writel(dev, MUnit.ODR_C, bellbits); + src_readl(dev, MUnit.ODR_C); + our_interrupt = 1; + } if (dev->sync_fib) { our_interrupt = 1; @@ -132,7 +130,6 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) } if (our_interrupt) { - src_writel(dev, MUnit.ODR_C, bellbits); return IRQ_HANDLED; } return IRQ_NONE; @@ -336,6 +333,9 @@ static void aac_src_start_adapter(struct aac_dev *dev) { struct aac_init *init; + /* reset host_rrq_idx first */ + dev->host_rrq_idx = 0; + init = dev->init; init->HostElapsedSeconds = cpu_to_le32(get_seconds()); @@ -389,30 +389,51 @@ static int aac_src_deliver_message(struct fib *fib) struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; unsigned long qflags; u32 fibsize; - u64 address; + dma_addr_t address; struct aac_fib_xporthdr *pFibX; + u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size); spin_lock_irqsave(q->lock, qflags); q->numpending++; spin_unlock_irqrestore(q->lock, qflags); - /* Calculate the amount to the fibsize bits */ - fibsize = (sizeof(struct aac_fib_xporthdr) + - fib->hw_fib_va->header.Size + 127) / 128 - 1; - if (fibsize > (ALIGN32 - 1)) - fibsize = ALIGN32 - 1; - - /* Fill XPORT header */ - pFibX = (struct aac_fib_xporthdr *) - ((unsigned char *)fib->hw_fib_va - - sizeof(struct aac_fib_xporthdr)); - pFibX->Handle = fib->hw_fib_va->header.SenderData + 1; - pFibX->HostAddress = fib->hw_fib_pa; - pFibX->Size = fib->hw_fib_va->header.Size; - address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr); - - src_writel(dev, MUnit.IQ_H, (u32)(address >> 32)); - src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize); + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { + /* Calculate the amount to the fibsize bits */ + fibsize = (hdr_size + 127) / 128 - 1; + if (fibsize > (ALIGN32 - 1)) + return -EMSGSIZE; + /* New FIB header, 32-bit */ + address = fib->hw_fib_pa; + fib->hw_fib_va->header.StructType = FIB_MAGIC2; + fib->hw_fib_va->header.SenderFibAddress = (u32)address; + fib->hw_fib_va->header.u.TimeStamp = 0; + BUG_ON((u32)(address >> 32) != 0L); + address |= fibsize; + } else { + /* Calculate the amount to the fibsize bits */ + fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1; + if (fibsize > (ALIGN32 - 1)) + return -EMSGSIZE; + + /* Fill XPORT header */ + pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr); + pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle); + pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa); + pFibX->Size = cpu_to_le32(hdr_size); + + /* + * The xport header has been 32-byte aligned for us so that fibsize + * can be masked out of this address by hardware. -- BenC + */ + address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr); + if (address & (ALIGN32 - 1)) + return -EINVAL; + address |= fibsize; + } + + src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff); + src_writel(dev, MUnit.IQ_L, address & 0xffffffff); + return 0; } @@ -435,8 +456,7 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size) dev->base = NULL; if (dev->regs.src.bar1 == NULL) return -1; - dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, - size); + dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); if (dev->base == NULL) { iounmap(dev->regs.src.bar1); dev->regs.src.bar1 = NULL; @@ -459,7 +479,7 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) dev->base = dev->regs.src.bar0 = NULL; return 0; } - dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, size); + dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); if (dev->base == NULL) return -1; dev->IndexRegs = &((struct src_registers __iomem *) @@ -753,7 +773,7 @@ int aac_srcv_init(struct aac_dev *dev) if (aac_init_adapter(dev) == NULL) goto error_iounmap; - if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) goto error_iounmap; dev->msi = aac_msi && !pci_enable_msi(dev->pdev); if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, @@ -764,7 +784,7 @@ int aac_srcv_init(struct aac_dev *dev) name, instance); goto error_iounmap; } - dev->dbg_base = dev->scsi_host_ptr->base; + dev->dbg_base = dev->base_start; dev->dbg_base_mapped = dev->base; dev->dbg_size = dev->base_size; diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 19a36945e6f..dd4547bf688 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -2984,8 +2984,8 @@ static int get_command(char *pos, Scsi_Cmnd * ptr) char *start = pos; int i; - SPRINTF("0x%08x: target=%d; lun=%d; cmnd=( ", - (unsigned int) ptr, ptr->device->id, ptr->device->lun); + SPRINTF("%p: target=%d; lun=%d; cmnd=( ", + ptr, ptr->device->id, ptr->device->lun); for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) SPRINTF("0x%02x ", ptr->cmnd[i]); diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index f79c8f9e33a..770c48ddbe5 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -49,7 +49,7 @@ #define SCSI_BUF_PA(address) isa_virt_to_bus(address) #define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset) -#include<linux/stat.h> +#include <linux/stat.h> #ifdef DEBUG #define DEB(x) x diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index 532d212b6b2..393e7ce8e95 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb, if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { resp->frame_len = le16_to_cpu(*(__le16 *)(r+6)); - memcpy(&resp->ending_fis[0], r+16, 24); + memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE); ts->buf_valid_size = sizeof(*resp); } } diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index cbde1dca45a..def24a1079a 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -2821,7 +2821,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) int i, count = 0; struct MessageUnit_A __iomem *pmuA = acb->pmuA; struct MessageUnit_C __iomem *pmuC = acb->pmuC; - u32 temp = 0; + /* backup pci config data */ printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); for (i = 0; i < 64; i++) { @@ -2839,7 +2839,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) writel(0x2, &pmuC->write_sequence); writel(0x7, &pmuC->write_sequence); writel(0xD, &pmuC->write_sequence); - } while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); + } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); } else { pci_write_config_byte(acb->pdev, 0x84, 0x20); diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 937000db62a..bcc4966e8ba 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -5722,9 +5722,7 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport) * The memory for the bfad_vport_s is freed from the FC function * template vport_delete entry point. */ - if (vport_drv) - bfad_im_port_delete(vport_drv->drv_port.bfad, - &vport_drv->drv_port); + bfad_im_port_delete(vport_drv->drv_port.bfad, &vport_drv->drv_port); } /* diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 14e6284e48e..8cdb79c2fcd 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -2357,7 +2357,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) return; } - if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) + if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) return; mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 2e4b0be14a2..2c8f0c71307 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -1383,6 +1383,8 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) bfa_sm_set_state(bfad, bfad_sm_uninit); spin_lock_init(&bfad->bfad_lock); + spin_lock_init(&bfad->bfad_aen_spinlock); + pci_set_drvdata(pdev, bfad); bfad->ref_count = 0; diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index e1f4b10df42..9c1495b321d 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -3008,12 +3008,15 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) * buffer of size bsg_data->payload_len */ bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL); - if (!bsg_fcpt) + if (!bsg_fcpt) { + rc = -ENOMEM; goto out; + } if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload, bsg_data->payload_len)) { kfree(bsg_fcpt); + rc = -EIO; goto out; } diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 1ac09afe35e..2eebf8d4d58 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -687,25 +687,21 @@ bfa_status_t bfad_im_probe(struct bfad_s *bfad) { struct bfad_im_s *im; - bfa_status_t rc = BFA_STATUS_OK; im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL); - if (im == NULL) { - rc = BFA_STATUS_ENOMEM; - goto ext; - } + if (im == NULL) + return BFA_STATUS_ENOMEM; bfad->im = im; im->bfad = bfad; if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { kfree(im); - rc = BFA_STATUS_FAILED; + return BFA_STATUS_FAILED; } INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler); -ext: - return rc; + return BFA_STATUS_OK; } void diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile index a92695a2517..141149e8cda 100644 --- a/drivers/scsi/bnx2fc/Makefile +++ b/drivers/scsi/bnx2fc/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o -bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o +bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o \ + bnx2fc_debug.o diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 0578fa0dc14..3486845ba30 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -11,6 +11,8 @@ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> @@ -57,12 +59,12 @@ #include <scsi/fc/fc_fcp.h> #include "57xx_hsi_bnx2fc.h" -#include "bnx2fc_debug.h" #include "../../net/ethernet/broadcom/cnic_if.h" +#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h" #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.11" +#define BNX2FC_VERSION "1.0.12" #define PFX "bnx2fc: " @@ -84,6 +86,8 @@ #define BNX2FC_NUM_MAX_SESS 1024 #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) +#define BNX2FC_MAX_NPIV 256 + #define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 #define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS #define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE @@ -206,6 +210,7 @@ struct bnx2fc_hba { struct fcoe_statistics_params *stats_buffer; dma_addr_t stats_buf_dma; struct completion stat_req_done; + struct fcoe_capabilities fcoe_cap; /*destroy handling */ struct timer_list destroy_timer; @@ -274,6 +279,7 @@ struct bnx2fc_rport { #define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6 #define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7 #define BNX2FC_FLAG_EXPL_LOGO 0x8 +#define BNX2FC_FLAG_DISABLE_FAILED 0x9 u8 src_addr[ETH_ALEN]; u32 max_sqes; @@ -554,4 +560,7 @@ void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req, int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, enum fc_rctl r_ctl); + +#include "bnx2fc_debug.h" + #endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c new file mode 100644 index 00000000000..0cbee1b23ee --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c @@ -0,0 +1,70 @@ +#include "bnx2fc.h" + +void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!(bnx2fc_debug_level & LOG_IO))) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (io_req && io_req->port && io_req->port->lport && + io_req->port->lport->host) + shost_printk(KERN_INFO, io_req->port->lport->host, + PFX "xid:0x%x %pV", + io_req->xid, &vaf); + else + pr_info("NULL %pV", &vaf); + + va_end(args); +} + +void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!(bnx2fc_debug_level & LOG_TGT))) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host && + tgt->rport) + shost_printk(KERN_INFO, tgt->port->lport->host, + PFX "port:%x %pV", + tgt->rport->port_id, &vaf); + else + pr_info("NULL %pV", &vaf); + + va_end(args); +} + +void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!(bnx2fc_debug_level & LOG_HBA))) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (lport && lport->host) + shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf); + else + pr_info("NULL %pV", &vaf); + + va_end(args); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h index 3416d9a746c..4808ff99621 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_debug.h +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h @@ -11,60 +11,23 @@ extern unsigned int bnx2fc_debug_level; -#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \ - do { \ - if (unlikely(bnx2fc_debug_level & LEVEL)) \ - do { \ - CMD; \ - } while (0); \ - } while (0) - -#define BNX2FC_ELS_DBG(fmt, arg...) \ - BNX2FC_CHK_LOGGING(LOG_ELS, \ - printk(KERN_INFO PFX fmt, ##arg)) - -#define BNX2FC_MISC_DBG(fmt, arg...) \ - BNX2FC_CHK_LOGGING(LOG_MISC, \ - printk(KERN_INFO PFX fmt, ##arg)) - -#define BNX2FC_IO_DBG(io_req, fmt, arg...) \ - do { \ - if (!io_req || !io_req->port || !io_req->port->lport || \ - !io_req->port->lport->host) \ - BNX2FC_CHK_LOGGING(LOG_IO, \ - printk(KERN_INFO PFX "NULL " fmt, ##arg)); \ - else \ - BNX2FC_CHK_LOGGING(LOG_IO, \ - shost_printk(KERN_INFO, \ - (io_req)->port->lport->host, \ - PFX "xid:0x%x " fmt, \ - (io_req)->xid, ##arg)); \ - } while (0) - -#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \ - do { \ - if (!tgt || !tgt->port || !tgt->port->lport || \ - !tgt->port->lport->host || !tgt->rport) \ - BNX2FC_CHK_LOGGING(LOG_TGT, \ - printk(KERN_INFO PFX "NULL " fmt, ##arg)); \ - else \ - BNX2FC_CHK_LOGGING(LOG_TGT, \ - shost_printk(KERN_INFO, \ - (tgt)->port->lport->host, \ - PFX "port:%x " fmt, \ - (tgt)->rport->port_id, ##arg)); \ - } while (0) - - -#define BNX2FC_HBA_DBG(lport, fmt, arg...) \ - do { \ - if (!lport || !lport->host) \ - BNX2FC_CHK_LOGGING(LOG_HBA, \ - printk(KERN_INFO PFX "NULL " fmt, ##arg)); \ - else \ - BNX2FC_CHK_LOGGING(LOG_HBA, \ - shost_printk(KERN_INFO, lport->host, \ - PFX fmt, ##arg)); \ - } while (0) +#define BNX2FC_ELS_DBG(fmt, ...) \ +do { \ + if (unlikely(bnx2fc_debug_level & LOG_ELS)) \ + pr_info(fmt, ##__VA_ARGS__); \ +} while (0) + +#define BNX2FC_MISC_DBG(fmt, ...) \ +do { \ + if (unlikely(bnx2fc_debug_level & LOG_MISC)) \ + pr_info(fmt, ##__VA_ARGS__); \ +} while (0) + +__printf(2, 3) +void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...); +__printf(2, 3) +void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...); +__printf(2, 3) +void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...); #endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index f52f668fd24..ae1cb7639d9 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Apr 24, 2012" +#define DRV_MODULE_RELDATE "Jun 04, 2012" static char version[] __devinitdata = @@ -286,7 +286,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) struct fcoe_port *port; struct fcoe_hdr *hp; struct bnx2fc_rport *tgt; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; u8 sof, eof; u32 crc; unsigned int hlen, tlen, elen; @@ -412,7 +412,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) } /*update tx stats */ - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); stats->TxFrames++; stats->TxWords += wlen; put_cpu(); @@ -522,7 +522,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) u32 fr_len; struct fc_lport *lport; struct fcoe_rcv_info *fr; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; struct fc_frame_header *fh; struct fcoe_crc_eof crc_eof; struct fc_frame *fp; @@ -551,7 +551,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); stats->RxFrames++; stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; @@ -942,7 +942,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, FC_PORTTYPE_UNKNOWN; mutex_unlock(&lport->lp_mutex); fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; - per_cpu_ptr(lport->dev_stats, + per_cpu_ptr(lport->stats, get_cpu())->LinkFailureCount++; put_cpu(); fcoe_clean_pending_queue(lport); @@ -1326,6 +1326,7 @@ static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba) static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) { struct bnx2fc_hba *hba; + struct fcoe_capabilities *fcoe_cap; int rc; hba = kzalloc(sizeof(*hba), GFP_KERNEL); @@ -1361,6 +1362,21 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); goto cmgr_err; } + fcoe_cap = &hba->fcoe_cap; + + fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES << + FCOE_IOS_PER_CONNECTION_SHIFT; + fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS << + FCOE_LOGINS_PER_PORT_SHIFT; + fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS << + FCOE_NUMBER_OF_EXCHANGES_SHIFT; + fcoe_cap->capability2 |= BNX2FC_MAX_NPIV << + FCOE_NPIV_WWN_PER_PORT_SHIFT; + fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS << + FCOE_TARGETS_SUPPORTED_SHIFT; + fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS << + FCOE_OUTSTANDING_COMMANDS_SHIFT; + fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL; init_waitqueue_head(&hba->shutdown_wait); init_waitqueue_head(&hba->destroy_wait); @@ -1691,6 +1707,32 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) hba->pcidev = NULL; } +/** + * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats + * + * @handle: transport handle pointing to adapter struture + */ +static int bnx2fc_ulp_get_stats(void *handle) +{ + struct bnx2fc_hba *hba = handle; + struct cnic_dev *cnic; + struct fcoe_stats_info *stats_addr; + + if (!hba) + return -EINVAL; + + cnic = hba->cnic; + stats_addr = &cnic->stats_addr->fcoe_stat; + if (!stats_addr) + return -EINVAL; + + strncpy(stats_addr->version, BNX2FC_VERSION, + sizeof(stats_addr->version)); + stats_addr->txq_size = BNX2FC_SQ_WQES_MAX; + stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX; + + return 0; +} /** @@ -1944,6 +1986,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) adapter_count++; mutex_unlock(&bnx2fc_dev_lock); + dev->fcoe_cap = &hba->fcoe_cap; clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); rc = dev->register_device(dev, CNIC_ULP_FCOE, (void *) hba); @@ -2019,11 +2062,11 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) struct fcoe_ctlr *ctlr; struct bnx2fc_interface *interface; struct bnx2fc_hba *hba; - struct net_device *phys_dev; + struct net_device *phys_dev = netdev; struct fc_lport *lport; struct ethtool_drvinfo drvinfo; int rc = 0; - int vlan_id; + int vlan_id = 0; BNX2FC_MISC_DBG("Entered bnx2fc_create\n"); if (fip_mode != FIP_MODE_FABRIC) { @@ -2041,14 +2084,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) } /* obtain physical netdev */ - if (netdev->priv_flags & IFF_802_1Q_VLAN) { + if (netdev->priv_flags & IFF_802_1Q_VLAN) phys_dev = vlan_dev_real_dev(netdev); - vlan_id = vlan_dev_vlan_id(netdev); - } else { - printk(KERN_ERR PFX "Not a vlan device\n"); - rc = -EINVAL; - goto netdev_err; - } + /* verify if the physical device is a netxtreme2 device */ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { memset(&drvinfo, 0, sizeof(drvinfo)); @@ -2083,9 +2121,13 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) goto ifput_err; } + if (netdev->priv_flags & IFF_802_1Q_VLAN) { + vlan_id = vlan_dev_vlan_id(netdev); + interface->vlan_enabled = 1; + } + ctlr = bnx2fc_to_ctlr(interface); interface->vlan_id = vlan_id; - interface->vlan_enabled = 1; interface->timer_work_queue = create_singlethread_workqueue("bnx2fc_timer_wq"); @@ -2152,13 +2194,10 @@ mod_err: **/ static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic) { - struct list_head *list; - struct list_head *temp; struct bnx2fc_hba *hba; /* Called with bnx2fc_dev_lock held */ - list_for_each_safe(list, temp, &adapter_list) { - hba = (struct bnx2fc_hba *)list; + list_for_each_entry(hba, &adapter_list, list) { if (hba->cnic == cnic) return hba; } @@ -2252,15 +2291,17 @@ static int bnx2fc_fcoe_reset(struct Scsi_Host *shost) static bool bnx2fc_match(struct net_device *netdev) { + struct net_device *phys_dev = netdev; + mutex_lock(&bnx2fc_dev_lock); - if (netdev->priv_flags & IFF_802_1Q_VLAN) { - struct net_device *phys_dev = vlan_dev_real_dev(netdev); + if (netdev->priv_flags & IFF_802_1Q_VLAN) + phys_dev = vlan_dev_real_dev(netdev); - if (bnx2fc_hba_lookup(phys_dev)) { - mutex_unlock(&bnx2fc_dev_lock); - return true; - } + if (bnx2fc_hba_lookup(phys_dev)) { + mutex_unlock(&bnx2fc_dev_lock); + return true; } + mutex_unlock(&bnx2fc_dev_lock); return false; } @@ -2290,9 +2331,9 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu) p = &per_cpu(bnx2fc_percpu, cpu); - thread = kthread_create(bnx2fc_percpu_io_thread, - (void *)p, - "bnx2fc_thread/%d", cpu); + thread = kthread_create_on_node(bnx2fc_percpu_io_thread, + (void *)p, cpu_to_node(cpu), + "bnx2fc_thread/%d", cpu); /* bind thread to the cpu */ if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu); @@ -2643,4 +2684,5 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb = { .cnic_stop = bnx2fc_ulp_stop, .indicate_kcqes = bnx2fc_indicate_kcqe, .indicate_netevent = bnx2fc_indicate_netevent, + .cnic_get_stats = bnx2fc_ulp_get_stats, }; diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 2ca6bfe4ce5..6d6eee42ac7 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1244,7 +1244,9 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, if (disable_kcqe->completion_status) { printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", disable_kcqe->completion_status); - return; + set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); } else { /* disable successful */ BNX2FC_TGT_DBG(tgt, "disable successful\n"); diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 4f7453b9e41..73f231ccd45 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -405,11 +405,10 @@ free_cmd_pool: goto free_cmgr; for (i = 0; i < num_possible_cpus() + 1; i++) { - struct list_head *list; - struct list_head *tmp; + struct bnx2fc_cmd *tmp, *io_req; - list_for_each_safe(list, tmp, &cmgr->free_list[i]) { - struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list; + list_for_each_entry_safe(io_req, tmp, + &cmgr->free_list[i], link) { list_del(&io_req->link); kfree(io_req); } @@ -1436,9 +1435,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct bnx2fc_rport *tgt = io_req->tgt; - struct list_head *list; - struct list_head *tmp; - struct bnx2fc_cmd *cmd; + struct bnx2fc_cmd *cmd, *tmp; int tm_lun = sc_cmd->device->lun; int rc = 0; int lun; @@ -1449,9 +1446,8 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) * Walk thru the active_ios queue and ABORT the IO * that matches with the LUN that was reset */ - list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { + list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); - cmd = (struct bnx2fc_cmd *)list; lun = cmd->sc_cmd->device->lun; if (lun == tm_lun) { /* Initiate ABTS on this cmd */ @@ -1476,9 +1472,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) { struct bnx2fc_rport *tgt = io_req->tgt; - struct list_head *list; - struct list_head *tmp; - struct bnx2fc_cmd *cmd; + struct bnx2fc_cmd *cmd, *tmp; int rc = 0; /* called with tgt_lock held */ @@ -1487,9 +1481,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) * Walk thru the active_ios queue and ABORT the IO * that matches with the LUN that was reset */ - list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { + list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); - cmd = (struct bnx2fc_cmd *)list; /* Initiate ABTS */ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &cmd->req_flags)) { @@ -1980,7 +1973,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct fc_lport *lport = port->lport; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; int task_idx, index; u16 xid; @@ -1991,7 +1984,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, io_req->data_xfer_len = scsi_bufflen(sc_cmd); sc_cmd->SCp.ptr = (char *)io_req; - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { io_req->io_req_flags = BNX2FC_READ; stats->InputRequests++; diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 082a25c3117..b9d0d9cb17f 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -150,8 +150,7 @@ tgt_init_err: void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) { struct bnx2fc_cmd *io_req; - struct list_head *list; - struct list_head *tmp; + struct bnx2fc_cmd *tmp; int rc; int i = 0; BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n", @@ -160,9 +159,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) spin_lock_bh(&tgt->tgt_lock); tgt->flush_in_prog = 1; - list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { + list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) { i++; - io_req = (struct bnx2fc_cmd *)list; list_del_init(&io_req->link); io_req->on_active_queue = 0; BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); @@ -181,13 +179,18 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags); set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); - rc = bnx2fc_initiate_cleanup(io_req); - BUG_ON(rc); + + /* Do not issue cleanup when disable request failed */ + if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) + bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); + else { + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } } - list_for_each_safe(list, tmp, &tgt->active_tm_queue) { + list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) { i++; - io_req = (struct bnx2fc_cmd *)list; list_del_init(&io_req->link); io_req->on_tmf_queue = 0; BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n"); @@ -195,9 +198,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) complete(&io_req->tm_done); } - list_for_each_safe(list, tmp, &tgt->els_queue) { + list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) { i++; - io_req = (struct bnx2fc_cmd *)list; list_del_init(&io_req->link); io_req->on_active_queue = 0; @@ -212,13 +214,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) io_req->cb_arg = NULL; } - rc = bnx2fc_initiate_cleanup(io_req); - BUG_ON(rc); + /* Do not issue cleanup when disable request failed */ + if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) + bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); + else { + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } } - list_for_each_safe(list, tmp, &tgt->io_retire_queue) { + list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) { i++; - io_req = (struct bnx2fc_cmd *)list; list_del_init(&io_req->link); BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); @@ -321,9 +327,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port, del_timer_sync(&tgt->upld_timer); - } else + } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) { + printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy" + " not sent to FW\n"); + } else { printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy" " not sent to FW\n"); + } /* Free session resources */ bnx2fc_free_session_resc(hba, tgt); diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h index dc0a08e69c8..f2db5fe7bdc 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h @@ -267,7 +267,13 @@ struct bnx2i_cmd_request { * task statistics for write response */ struct bnx2i_write_resp_task_stat { - u32 num_data_ins; +#if defined(__BIG_ENDIAN) + u16 num_r2ts; + u16 num_data_outs; +#elif defined(__LITTLE_ENDIAN) + u16 num_data_outs; + u16 num_r2ts; +#endif }; /* @@ -275,11 +281,11 @@ struct bnx2i_write_resp_task_stat { */ struct bnx2i_read_resp_task_stat { #if defined(__BIG_ENDIAN) - u16 num_data_outs; - u16 num_r2ts; + u16 reserved; + u16 num_data_ins; #elif defined(__LITTLE_ENDIAN) - u16 num_r2ts; - u16 num_data_outs; + u16 num_data_ins; + u16 reserved; #endif }; diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 0c53c28dc3d..3f9e7061258 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -44,6 +44,8 @@ #include "57xx_iscsi_hsi.h" #include "57xx_iscsi_constants.h" +#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h" + #define BNX2_ISCSI_DRIVER_NAME "bnx2i" #define BNX2I_MAX_ADAPTERS 8 @@ -126,6 +128,43 @@ #define REG_WR(__hba, offset, val) \ writel(val, __hba->regview + offset) +#ifdef CONFIG_32BIT +#define GET_STATS_64(__hba, dst, field) \ + do { \ + spin_lock_bh(&__hba->stat_lock); \ + dst->field##_lo = __hba->stats.field##_lo; \ + dst->field##_hi = __hba->stats.field##_hi; \ + spin_unlock_bh(&__hba->stat_lock); \ + } while (0) + +#define ADD_STATS_64(__hba, field, len) \ + do { \ + if (spin_trylock(&__hba->stat_lock)) { \ + if (__hba->stats.field##_lo + len < \ + __hba->stats.field##_lo) \ + __hba->stats.field##_hi++; \ + __hba->stats.field##_lo += len; \ + spin_unlock(&__hba->stat_lock); \ + } \ + } while (0) + +#else +#define GET_STATS_64(__hba, dst, field) \ + do { \ + u64 val, *out; \ + \ + val = __hba->bnx2i_stats.field; \ + out = (u64 *)&__hba->stats.field##_lo; \ + *out = cpu_to_le64(val); \ + out = (u64 *)&dst->field##_lo; \ + *out = cpu_to_le64(val); \ + } while (0) + +#define ADD_STATS_64(__hba, field, len) \ + do { \ + __hba->bnx2i_stats.field += len; \ + } while (0) +#endif /** * struct generic_pdu_resc - login pdu resource structure @@ -288,6 +327,15 @@ struct iscsi_cid_queue { struct bnx2i_conn **conn_cid_tbl; }; + +struct bnx2i_stats_info { + u64 rx_pdus; + u64 rx_bytes; + u64 tx_pdus; + u64 tx_bytes; +}; + + /** * struct bnx2i_hba - bnx2i adapter structure * @@ -341,6 +389,8 @@ struct iscsi_cid_queue { * @ctx_ccell_tasks: captures number of ccells and tasks supported by * currently offloaded connection, used to decode * context memory + * @stat_lock: spin lock used by the statistic collector (32 bit) + * @stats: local iSCSI statistic collection place holder * * Adapter Data Structure */ @@ -350,6 +400,7 @@ struct bnx2i_hba { struct pci_dev *pcidev; struct net_device *netdev; void __iomem *regview; + resource_size_t reg_base; u32 age; unsigned long cnic_dev_type; @@ -426,6 +477,12 @@ struct bnx2i_hba { u32 num_sess_opened; u32 num_conn_opened; unsigned int ctx_ccell_tasks; + +#ifdef CONFIG_32BIT + spinlock_t stat_lock; +#endif + struct bnx2i_stats_info bnx2i_stats; + struct iscsi_stats_info stats; }; @@ -749,6 +806,8 @@ extern void bnx2i_ulp_init(struct cnic_dev *dev); extern void bnx2i_ulp_exit(struct cnic_dev *dev); extern void bnx2i_start(void *handle); extern void bnx2i_stop(void *handle); +extern int bnx2i_get_stats(void *handle); + extern struct bnx2i_hba *get_adapter_list_head(void); struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index ece47e50228..33d6630529d 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1350,6 +1350,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; struct bnx2i_cmd_response *resp_cqe; struct bnx2i_cmd *bnx2i_cmd; struct iscsi_task *task; @@ -1367,16 +1368,26 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { conn->datain_pdus_cnt += - resp_cqe->task_stat.read_stat.num_data_outs; + resp_cqe->task_stat.read_stat.num_data_ins; conn->rxdata_octets += bnx2i_cmd->req.total_data_transfer_length; + ADD_STATS_64(hba, rx_pdus, + resp_cqe->task_stat.read_stat.num_data_ins); + ADD_STATS_64(hba, rx_bytes, + bnx2i_cmd->req.total_data_transfer_length); } else { conn->dataout_pdus_cnt += - resp_cqe->task_stat.read_stat.num_data_outs; + resp_cqe->task_stat.write_stat.num_data_outs; conn->r2t_pdus_cnt += - resp_cqe->task_stat.read_stat.num_r2ts; + resp_cqe->task_stat.write_stat.num_r2ts; conn->txdata_octets += bnx2i_cmd->req.total_data_transfer_length; + ADD_STATS_64(hba, tx_pdus, + resp_cqe->task_stat.write_stat.num_data_outs); + ADD_STATS_64(hba, tx_bytes, + bnx2i_cmd->req.total_data_transfer_length); + ADD_STATS_64(hba, rx_pdus, + resp_cqe->task_stat.write_stat.num_r2ts); } bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); @@ -1961,6 +1972,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; + struct bnx2i_hba *hba = bnx2i_conn->hba; struct qp_info *qp; struct bnx2i_nop_in_msg *nopin; int tgt_async_msg; @@ -1973,7 +1985,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) if (!qp->cq_virt) { printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!", - bnx2i_conn->hba->netdev->name); + hba->netdev->name); goto out; } while (1) { @@ -1985,9 +1997,9 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) if (nopin->op_code == ISCSI_OP_NOOP_IN && nopin->itt == (u16) RESERVED_ITT) { printk(KERN_ALERT "bnx2i: Unsolicited " - "NOP-In detected for suspended " - "connection dev=%s!\n", - bnx2i_conn->hba->netdev->name); + "NOP-In detected for suspended " + "connection dev=%s!\n", + hba->netdev->name); bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); goto cqe_out; } @@ -2001,7 +2013,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) /* Run the kthread engine only for data cmds All other cmds will be completed in this bh! */ bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin); - break; + goto done; case ISCSI_OP_LOGIN_RSP: bnx2i_process_login_resp(session, bnx2i_conn, qp->cq_cons_qe); @@ -2044,11 +2056,15 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", nopin->op_code); } + + ADD_STATS_64(hba, rx_pdus, 1); + ADD_STATS_64(hba, rx_bytes, nopin->data_length); +done: if (!tgt_async_msg) { if (!atomic_read(&bnx2i_conn->ep->num_active_cmds)) printk(KERN_ALERT "bnx2i (%s): no active cmd! " "op 0x%x\n", - bnx2i_conn->hba->netdev->name, + hba->netdev->name, nopin->op_code); else atomic_dec(&bnx2i_conn->ep->num_active_cmds); @@ -2692,6 +2708,7 @@ struct cnic_ulp_ops bnx2i_cnic_cb = { .cm_remote_close = bnx2i_cm_remote_close, .cm_remote_abort = bnx2i_cm_remote_abort, .iscsi_nl_send_msg = bnx2i_send_nl_mesg, + .cnic_get_stats = bnx2i_get_stats, .owner = THIS_MODULE }; @@ -2724,7 +2741,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) goto arm_cq; } - reg_base = ep->hba->netdev->base_addr; if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); @@ -2740,7 +2756,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) /* 5709 device in normal node and 5706/5708 devices */ reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); - ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, + ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off, MB_KERNEL_CTX_SIZE); if (!ep->qp.ctx_base) return -ENOMEM; diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 8b6816706ee..b17637aab9a 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -381,6 +381,46 @@ void bnx2i_ulp_exit(struct cnic_dev *dev) /** + * bnx2i_get_stats - Retrieve various statistic from iSCSI offload + * @handle: bnx2i_hba + * + * function callback exported via bnx2i - cnic driver interface to + * retrieve various iSCSI offload related statistics. + */ +int bnx2i_get_stats(void *handle) +{ + struct bnx2i_hba *hba = handle; + struct iscsi_stats_info *stats; + + if (!hba) + return -EINVAL; + + stats = (struct iscsi_stats_info *)hba->cnic->stats_addr; + + if (!stats) + return -ENOMEM; + + strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version)); + memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN); + + stats->max_frame_size = hba->netdev->mtu; + stats->txq_size = hba->max_sqes; + stats->rxq_size = hba->max_cqes; + + stats->txq_avg_depth = 0; + stats->rxq_avg_depth = 0; + + GET_STATS_64(hba, stats, rx_pdus); + GET_STATS_64(hba, stats, rx_bytes); + + GET_STATS_64(hba, stats, tx_pdus); + GET_STATS_64(hba, stats, tx_bytes); + + return 0; +} + + +/** * bnx2i_percpu_thread_create - Create a receive thread for an * online CPU * diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f8d516b5316..3b34c13e2f0 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) bnx2i_identify_device(hba); bnx2i_setup_host_queue_size(hba, shost); + hba->reg_base = pci_resource_start(hba->pcidev, 0); if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { - hba->regview = ioremap_nocache(hba->netdev->base_addr, - BNX2_MQ_CONFIG2); + hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2); if (!hba->regview) goto ioreg_map_err; } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { - hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096); + hba->regview = pci_iomap(hba->pcidev, 0, 4096); if (!hba->regview) goto ioreg_map_err; } @@ -874,6 +874,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) hba->conn_ctx_destroy_tmo = 2 * HZ; } +#ifdef CONFIG_32BIT + spin_lock_init(&hba->stat_lock); +#endif + memset(&hba->stats, 0, sizeof(struct iscsi_stats_info)); + if (iscsi_host_add(shost, &hba->pcidev->dev)) goto free_dump_mem; return hba; @@ -884,7 +889,7 @@ cid_que_err: bnx2i_free_mp_bdt(hba); mp_bdt_mem_err: if (hba->regview) { - iounmap(hba->regview); + pci_iounmap(hba->pcidev, hba->regview); hba->regview = NULL; } ioreg_map_err: @@ -910,7 +915,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba) pci_dev_put(hba->pcidev); if (hba->regview) { - iounmap(hba->regview); + pci_iounmap(hba->pcidev, hba->regview); hba->regview = NULL; } bnx2i_free_mp_bdt(hba); @@ -1181,12 +1186,18 @@ static int bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) { struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; struct bnx2i_cmd *cmd = task->dd_data; memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); bnx2i_setup_cmd_wqe_template(cmd); bnx2i_conn->gen_pdu.req_buf_size = task->data_count; + + /* Tx PDU/data length count */ + ADD_STATS_64(hba, tx_pdus, 1); + ADD_STATS_64(hba, tx_bytes, task->data_count); + if (task->data_count) { memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, task->data_count); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 36739da8bc1..49692a1ac44 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -966,7 +966,8 @@ static int init_act_open(struct cxgbi_sock *csk) csk->saddr.sin_addr.s_addr = chba->ipv4addr; csk->rss_qid = 0; - csk->l2t = t3_l2t_get(t3dev, dst, ndev); + csk->l2t = t3_l2t_get(t3dev, dst, ndev, + &csk->daddr.sin_addr.s_addr); if (!csk->l2t) { pr_err("NO l2t available.\n"); return -EINVAL; diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 5a4a3bfc60c..f924b3c3720 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -438,8 +438,8 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, if (submode) wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) | FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode); - req->tunnel_to_proxy = htonl(wr_ulp_mode) | - FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1); + req->tunnel_to_proxy = htonl(wr_ulp_mode | + FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1)); req->plen = htonl(len); if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); @@ -1142,7 +1142,7 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); - n = dst_get_neighbour_noref(csk->dst); + n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr); if (!n) { pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); goto rel_resource; @@ -1182,9 +1182,12 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); send_act_open_req(csk, skb, csk->l2t); + neigh_release(n); return 0; rel_resource: + if (n) + neigh_release(n); if (skb) __kfree_skb(skb); return -EINVAL; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d9253db1d0e..b44c1cff311 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -494,7 +494,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) goto err_out; } dst = &rt->dst; - n = dst_get_neighbour_noref(dst); + n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr); if (!n) { err = -ENODEV; goto rel_rt; @@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), ndev->name); err = -ENETUNREACH; - goto rel_rt; + goto rel_neigh; } if (ndev->flags & IFF_LOOPBACK) { @@ -521,7 +521,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) pr_info("dst %pI4, %s, NOT cxgbi device.\n", &daddr->sin_addr.s_addr, ndev->name); err = -ENETUNREACH; - goto rel_rt; + goto rel_neigh; } log_debug(1 << CXGBI_DBG_SOCK, "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", @@ -531,7 +531,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) csk = cxgbi_sock_create(cdev); if (!csk) { err = -ENOMEM; - goto rel_rt; + goto rel_neigh; } csk->cdev = cdev; csk->port_id = port; @@ -541,9 +541,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) csk->daddr.sin_port = daddr->sin_port; csk->daddr.sin_family = daddr->sin_family; csk->saddr.sin_addr.s_addr = fl4.saddr; + neigh_release(n); return csk; +rel_neigh: + neigh_release(n); + rel_rt: ip_rt_put(rt); if (csk) diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 48e46f5b77c..33e422e7583 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -468,7 +468,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); /* * scsi_dh_attach - Attach device handler - * @sdev - sdev the handler should be attached to + * @q - Request queue that is associated with the scsi_device + * the handler should be attached to * @name - name of the handler to attach */ int scsi_dh_attach(struct request_queue *q, const char *name) @@ -498,7 +499,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_attach); /* * scsi_dh_detach - Detach device handler - * @sdev - sdev the handler should be detached from + * @q - Request queue that is associated with the scsi_device + * the handler should be detached from * * This function will detach the device handler only * if the sdev is not part of the internal list, ie @@ -527,6 +529,38 @@ void scsi_dh_detach(struct request_queue *q) } EXPORT_SYMBOL_GPL(scsi_dh_detach); +/* + * scsi_dh_attached_handler_name - Get attached device handler's name + * @q - Request queue that is associated with the scsi_device + * that may have a device handler attached + * @gfp - the GFP mask used in the kmalloc() call when allocating memory + * + * Returns name of attached handler, NULL if no handler is attached. + * Caller must take care to free the returned string. + */ +const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) +{ + unsigned long flags; + struct scsi_device *sdev; + const char *handler_name = NULL; + + spin_lock_irqsave(q->queue_lock, flags); + sdev = q->queuedata; + if (!sdev || !get_device(&sdev->sdev_gendev)) + sdev = NULL; + spin_unlock_irqrestore(q->queue_lock, flags); + + if (!sdev) + return NULL; + + if (sdev->scsi_dh_data) + handler_name = kstrdup(sdev->scsi_dh_data->scsi_dh->name, gfp); + + put_device(&sdev->sdev_gendev); + return handler_name; +} +EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name); + static struct notifier_block scsi_dh_nb = { .notifier_call = scsi_dh_notifier }; diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index fda9cdea0e6..08d80a6d272 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -46,13 +46,16 @@ #define TPGS_SUPPORT_OFFLINE 0x40 #define TPGS_SUPPORT_TRANSITION 0x80 +#define RTPG_FMT_MASK 0x70 +#define RTPG_FMT_EXT_HDR 0x10 + #define TPGS_MODE_UNINITIALIZED -1 #define TPGS_MODE_NONE 0x0 #define TPGS_MODE_IMPLICIT 0x1 #define TPGS_MODE_EXPLICIT 0x2 #define ALUA_INQUIRY_SIZE 36 -#define ALUA_FAILOVER_TIMEOUT (60 * HZ) +#define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_RETRIES 5 /* flags passed from user level */ @@ -68,6 +71,7 @@ struct alua_dh_data { unsigned char inq[ALUA_INQUIRY_SIZE]; unsigned char *buff; int bufflen; + unsigned char transition_tmo; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; int senselen; struct scsi_device *sdev; @@ -128,7 +132,7 @@ static struct request *get_alua_req(struct scsi_device *sdev, rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->retries = ALUA_FAILOVER_RETRIES; - rq->timeout = ALUA_FAILOVER_TIMEOUT; + rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ; return rq; } @@ -174,7 +178,8 @@ done: * submit_rtpg - Issue a REPORT TARGET GROUP STATES command * @sdev: sdev the command should be sent to */ -static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) +static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, + bool rtpg_ext_hdr_req) { struct request *rq; int err = SCSI_DH_RES_TEMP_UNAVAIL; @@ -185,7 +190,10 @@ static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) /* Prepare the command. */ rq->cmd[0] = MAINTENANCE_IN; - rq->cmd[1] = MI_REPORT_TARGET_PGS; + if (rtpg_ext_hdr_req) + rq->cmd[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; + else + rq->cmd[1] = MI_REPORT_TARGET_PGS; rq->cmd[6] = (h->bufflen >> 24) & 0xff; rq->cmd[7] = (h->bufflen >> 16) & 0xff; rq->cmd[8] = (h->bufflen >> 8) & 0xff; @@ -518,11 +526,18 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) int len, k, off, valid_states = 0; unsigned char *ucp; unsigned err; - unsigned long expiry, interval = 1000; + bool rtpg_ext_hdr_req = 1; + unsigned long expiry, interval = 0; + unsigned int tpg_desc_tbl_off; + unsigned char orig_transition_tmo; + + if (!h->transition_tmo) + expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ); + else + expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ); - expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT); retry: - err = submit_rtpg(sdev, h); + err = submit_rtpg(sdev, h, rtpg_ext_hdr_req); if (err == SCSI_DH_IO && h->senselen > 0) { err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, @@ -530,6 +545,21 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) if (!err) return SCSI_DH_IO; + /* + * submit_rtpg() has failed on existing arrays + * when requesting extended header info, and + * the array doesn't support extended headers, + * even though it shouldn't according to T10. + * The retry without rtpg_ext_hdr_req set + * handles this. + */ + if (rtpg_ext_hdr_req == 1 && + sense_hdr.sense_key == ILLEGAL_REQUEST && + sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) { + rtpg_ext_hdr_req = 0; + goto retry; + } + err = alua_check_sense(sdev, &sense_hdr); if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry)) goto retry; @@ -556,7 +586,28 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) goto retry; } - for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) { + orig_transition_tmo = h->transition_tmo; + if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0) + h->transition_tmo = h->buff[5]; + else + h->transition_tmo = ALUA_FAILOVER_TIMEOUT; + + if (orig_transition_tmo != h->transition_tmo) { + sdev_printk(KERN_INFO, sdev, + "%s: transition timeout set to %d seconds\n", + ALUA_DH_NAME, h->transition_tmo); + expiry = jiffies + h->transition_tmo * HZ; + } + + if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) + tpg_desc_tbl_off = 8; + else + tpg_desc_tbl_off = 4; + + for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off; + k < len; + k += off, ucp += off) { + if (h->group_id == (ucp[2] << 8) + ucp[3]) { h->state = ucp[0] & 0x0f; h->pref = ucp[0] >> 7; @@ -581,7 +632,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) case TPGS_STATE_TRANSITIONING: if (time_before(jiffies, expiry)) { /* State transition, retry */ - interval *= 2; + interval += 2000; msleep(interval); goto retry; } @@ -691,9 +742,9 @@ static int alua_activate(struct scsi_device *sdev, stpg = 0; break; case TPGS_STATE_STANDBY: + case TPGS_STATE_UNAVAILABLE: stpg = 1; break; - case TPGS_STATE_UNAVAILABLE: case TPGS_STATE_OFFLINE: err = SCSI_DH_IO; break; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index fe30b1b65e1..078d262ac7c 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1529,7 +1529,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, return 0; err: - per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++; + per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; put_cpu(); err2: kfree_skb(skb); @@ -1569,7 +1569,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) struct ethhdr *eh; struct fcoe_crc_eof *cp; struct sk_buff *skb; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; struct fc_frame_header *fh; unsigned int hlen; /* header length implies the version */ unsigned int tlen; /* trailer length */ @@ -1680,7 +1680,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) skb_shinfo(skb)->gso_size = 0; } /* update tx stats: regardless if LLD fails */ - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); stats->TxFrames++; stats->TxWords += wlen; put_cpu(); @@ -1714,7 +1714,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, struct fcoe_interface *fcoe; struct fc_frame_header *fh; struct sk_buff *skb = (struct sk_buff *)fp; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; /* * We only check CRC if no offload is available and if it is @@ -1745,7 +1745,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, return 0; } - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); stats->InvalidCRCCount++; if (stats->InvalidCRCCount < 5) printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); @@ -1762,7 +1762,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) u32 fr_len; struct fc_lport *lport; struct fcoe_rcv_info *fr; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; struct fcoe_crc_eof crc_eof; struct fc_frame *fp; struct fcoe_port *port; @@ -1793,7 +1793,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) */ hp = (struct fcoe_hdr *) skb_network_header(skb); - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { if (stats->ErrorFrames < 5) printk(KERN_WARNING "fcoe: FCoE version " @@ -1851,23 +1851,25 @@ static int fcoe_percpu_receive_thread(void *arg) set_user_nice(current, -20); +retry: while (!kthread_should_stop()) { spin_lock_bh(&p->fcoe_rx_list.lock); skb_queue_splice_init(&p->fcoe_rx_list, &tmp); - spin_unlock_bh(&p->fcoe_rx_list.lock); - - while ((skb = __skb_dequeue(&tmp)) != NULL) - fcoe_recv_frame(skb); - spin_lock_bh(&p->fcoe_rx_list.lock); - if (!skb_queue_len(&p->fcoe_rx_list)) { + if (!skb_queue_len(&tmp)) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&p->fcoe_rx_list.lock); schedule(); set_current_state(TASK_RUNNING); - } else - spin_unlock_bh(&p->fcoe_rx_list.lock); + goto retry; + } + + spin_unlock_bh(&p->fcoe_rx_list.lock); + + while ((skb = __skb_dequeue(&tmp)) != NULL) + fcoe_recv_frame(skb); + } return 0; } @@ -1970,7 +1972,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct fcoe_port *port; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; u32 link_possible = 1; u32 mfs; int rc = NOTIFY_OK; @@ -2024,7 +2026,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, if (link_possible && !fcoe_link_ok(lport)) fcoe_ctlr_link_up(ctlr); else if (fcoe_ctlr_link_down(ctlr)) { - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); stats->LinkFailureCount++; put_cpu(); fcoe_clean_pending_queue(lport); diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index d68d57241ee..2ebe03a4b51 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -788,11 +788,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) unsigned long deadline; unsigned long sel_time = 0; struct list_head del_list; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; INIT_LIST_HEAD(&del_list); - stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); + stats = per_cpu_ptr(fip->lp->stats, get_cpu()); list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; @@ -1104,8 +1104,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) struct fc_frame_header *fh = NULL; struct fip_desc *desc; struct fip_encaps *els; - struct fcoe_dev_stats *stats; struct fcoe_fcf *sel; + struct fc_stats *stats; enum fip_desc_type els_dtype = 0; u8 els_op; u8 sub; @@ -1249,7 +1249,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) fr_dev(fp) = lport; fr_encaps(fp) = els_dtype; - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); stats->RxFrames++; stats->RxWords += skb->len / FIP_BPW; put_cpu(); @@ -1353,7 +1353,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, ntoh24(vp->fd_fc_id)); if (vn_port && (vn_port == lport)) { mutex_lock(&fip->ctlr_mutex); - per_cpu_ptr(lport->dev_stats, + per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++; put_cpu(); fcoe_ctlr_reset(fip); @@ -1383,8 +1383,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, * followed by physical port */ mutex_lock(&fip->ctlr_mutex); - per_cpu_ptr(lport->dev_stats, - get_cpu())->VLinkFailureCount++; + per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++; put_cpu(); fcoe_ctlr_reset(fip); mutex_unlock(&fip->ctlr_mutex); diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 2bc163198d3..5e751689a08 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -102,7 +102,7 @@ static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val) int ret; ret = kstrtoul(buf, 0, val); - if (ret || *val < 0) + if (ret) return -EINVAL; /* * Check for overflow; dev_loss_tmo is u32 diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index b46f43dced7..ac76d8a042d 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -89,7 +89,7 @@ void __fcoe_get_lesb(struct fc_lport *lport, { unsigned int cpu; u32 lfc, vlfc, mdac; - struct fcoe_dev_stats *devst; + struct fc_stats *stats; struct fcoe_fc_els_lesb *lesb; struct rtnl_link_stats64 temp; @@ -99,10 +99,10 @@ void __fcoe_get_lesb(struct fc_lport *lport, lesb = (struct fcoe_fc_els_lesb *)fc_lesb; memset(lesb, 0, sizeof(*lesb)); for_each_possible_cpu(cpu) { - devst = per_cpu_ptr(lport->dev_stats, cpu); - lfc += devst->LinkFailureCount; - vlfc += devst->VLinkFailureCount; - mdac += devst->MissDiscAdvCount; + stats = per_cpu_ptr(lport->stats, cpu); + lfc += stats->LinkFailureCount; + vlfc += stats->VLinkFailureCount; + mdac += stats->MissDiscAdvCount; } lesb->lesb_link_fail = htonl(lfc); lesb->lesb_vlink_fail = htonl(vlfc); @@ -502,7 +502,7 @@ static int __init fcoe_transport_init(void) return 0; } -static int __exit fcoe_transport_exit(void) +static int fcoe_transport_exit(void) { struct fcoe_transport *ft; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index a3a056a9db6..593085a5227 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -42,7 +42,7 @@ #include "scsi_logging.h" -static atomic_t scsi_host_next_hn; /* host_no for next new host */ +static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */ static void scsi_host_cls_release(struct device *dev) @@ -290,6 +290,7 @@ static void scsi_host_dev_release(struct device *dev) struct Scsi_Host *shost = dev_to_shost(dev); struct device *parent = dev->parent; struct request_queue *q; + void *queuedata; scsi_proc_hostdir_rm(shost->hostt); @@ -299,9 +300,9 @@ static void scsi_host_dev_release(struct device *dev) destroy_workqueue(shost->work_q); q = shost->uspace_req_q; if (q) { - kfree(q->queuedata); - q->queuedata = NULL; - scsi_free_queue(q); + queuedata = q->queuedata; + blk_cleanup_queue(q); + kfree(queuedata); } scsi_destroy_command_freelist(shost); diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 10b65556937..192724ed7a3 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver"); static char driver_name[] = "hptiop"; static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; -static const char driver_ver[] = "v1.6 (090910)"; +static const char driver_ver[] = "v1.6 (091225)"; static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, @@ -958,6 +958,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, { struct Scsi_Host *host = NULL; struct hptiop_hba *hba; + struct hptiop_adapter_ops *iop_ops; struct hpt_iop_request_get_config iop_config; struct hpt_iop_request_set_config set_config; dma_addr_t start_phy; @@ -978,7 +979,8 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, pci_set_master(pcidev); /* Enable 64bit DMA if possible */ - if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { + iop_ops = (struct hptiop_adapter_ops *)id->driver_data; + if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) { if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { printk(KERN_ERR "hptiop: fail to set dma_mask\n"); goto disable_pci_device; @@ -998,7 +1000,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, hba = (struct hptiop_hba *)host->hostdata; - hba->ops = (struct hptiop_adapter_ops *)id->driver_data; + hba->ops = iop_ops; hba->pcidev = pcidev; hba->host = host; hba->initialized = 0; @@ -1239,6 +1241,7 @@ static struct hptiop_adapter_ops hptiop_itl_ops = { .iop_intr = iop_intr_itl, .post_msg = hptiop_post_msg_itl, .post_req = hptiop_post_req_itl, + .hw_dma_bit_mask = 64, }; static struct hptiop_adapter_ops hptiop_mv_ops = { @@ -1254,6 +1257,7 @@ static struct hptiop_adapter_ops hptiop_mv_ops = { .iop_intr = iop_intr_mv, .post_msg = hptiop_post_msg_mv, .post_req = hptiop_post_req_mv, + .hw_dma_bit_mask = 33, }; static struct pci_device_id hptiop_id_table[] = { diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h index 0b871c0ae56..baa648d87fd 100644 --- a/drivers/scsi/hptiop.h +++ b/drivers/scsi/hptiop.h @@ -297,6 +297,7 @@ struct hptiop_adapter_ops { int (*iop_intr)(struct hptiop_hba *hba); void (*post_msg)(struct hptiop_hba *hba, u32 msg); void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req); + int hw_dma_bit_mask; }; #define HPT_IOCTL_RESULT_OK 0 diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 47e28b55502..92c1d86d1fc 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -166,6 +166,9 @@ static struct scsi_host_template isci_sht = { .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, + .eh_abort_handler = sas_eh_abort_handler, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_bus_reset_handler = sas_eh_bus_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = isci_host_attrs, diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index aceffadb21c..c772d8d2715 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -99,11 +99,6 @@ struct fc_exch_mgr { u16 max_xid; u16 pool_max_index; - /* - * currently exchange mgr stats are updated but not used. - * either stats can be expose via sysfs or remove them - * all together if not used XXX - */ struct { atomic_t no_free_exch; atomic_t no_free_exch_xid; @@ -124,7 +119,7 @@ struct fc_exch_mgr { * for each anchor to determine if that EM should be used. The last * anchor in the list will always match to handle any exchanges not * handled by other EMs. The non-default EMs would be added to the - * anchor list by HW that provides FCoE offloads. + * anchor list by HW that provides offloads. */ struct fc_exch_mgr_anchor { struct list_head ema_list; @@ -339,6 +334,52 @@ static void fc_exch_release(struct fc_exch *ep) } /** + * fc_exch_timer_cancel() - cancel exch timer + * @ep: The exchange whose timer to be canceled + */ +static inline void fc_exch_timer_cancel(struct fc_exch *ep) +{ + if (cancel_delayed_work(&ep->timeout_work)) { + FC_EXCH_DBG(ep, "Exchange timer canceled\n"); + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ + } +} + +/** + * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the + * the exchange lock held + * @ep: The exchange whose timer will start + * @timer_msec: The timeout period + * + * Used for upper level protocols to time out the exchange. + * The timer is cancelled when it fires or when the exchange completes. + */ +static inline void fc_exch_timer_set_locked(struct fc_exch *ep, + unsigned int timer_msec) +{ + if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) + return; + + FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); + + if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, + msecs_to_jiffies(timer_msec))) + fc_exch_hold(ep); /* hold for timer */ +} + +/** + * fc_exch_timer_set() - Lock the exchange and set the timer + * @ep: The exchange whose timer will start + * @timer_msec: The timeout period + */ +static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) +{ + spin_lock_bh(&ep->ex_lock); + fc_exch_timer_set_locked(ep, timer_msec); + spin_unlock_bh(&ep->ex_lock); +} + +/** * fc_exch_done_locked() - Complete an exchange with the exchange lock held * @ep: The exchange that is complete */ @@ -359,8 +400,7 @@ static int fc_exch_done_locked(struct fc_exch *ep) if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { ep->state |= FC_EX_DONE; - if (cancel_delayed_work(&ep->timeout_work)) - atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ + fc_exch_timer_cancel(ep); rc = 0; } return rc; @@ -424,40 +464,6 @@ static void fc_exch_delete(struct fc_exch *ep) } /** - * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the - * the exchange lock held - * @ep: The exchange whose timer will start - * @timer_msec: The timeout period - * - * Used for upper level protocols to time out the exchange. - * The timer is cancelled when it fires or when the exchange completes. - */ -static inline void fc_exch_timer_set_locked(struct fc_exch *ep, - unsigned int timer_msec) -{ - if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) - return; - - FC_EXCH_DBG(ep, "Exchange timer armed\n"); - - if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, - msecs_to_jiffies(timer_msec))) - fc_exch_hold(ep); /* hold for timer */ -} - -/** - * fc_exch_timer_set() - Lock the exchange and set the timer - * @ep: The exchange whose timer will start - * @timer_msec: The timeout period - */ -static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) -{ - spin_lock_bh(&ep->ex_lock); - fc_exch_timer_set_locked(ep, timer_msec); - spin_unlock_bh(&ep->ex_lock); -} - -/** * fc_seq_send() - Send a frame using existing sequence/exchange pair * @lport: The local port that the exchange will be sent on * @sp: The sequence to be sent @@ -986,7 +992,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, /* * Update sequence_id based on incoming last * frame of sequence exchange. This is needed - * for FCoE target where DDP has been used + * for FC target where DDP has been used * on target where, stack is indicated only * about last frame's (payload _header) header. * Whereas "seq_id" which is part of @@ -1549,8 +1555,10 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); - if (cancel_delayed_work_sync(&ep->timeout_work)) + if (cancel_delayed_work_sync(&ep->timeout_work)) { + FC_EXCH_DBG(ep, "Exchange timer canceled\n"); fc_exch_release(ep); /* release from pending timer hold */ + } spin_lock_bh(&ep->ex_lock); switch (fh->fh_r_ctl) { @@ -1737,8 +1745,7 @@ static void fc_exch_reset(struct fc_exch *ep) spin_lock_bh(&ep->ex_lock); fc_exch_abort_locked(ep, 0); ep->state |= FC_EX_RST_CLEANUP; - if (cancel_delayed_work(&ep->timeout_work)) - atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ + fc_exch_timer_cancel(ep); resp = ep->resp; ep->resp = NULL; if (ep->esb_stat & ESB_ST_REC_QUAL) @@ -2133,10 +2140,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp) ep->esb_stat &= ~ESB_ST_REC_QUAL; atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */ } - if (ep->esb_stat & ESB_ST_COMPLETE) { - if (cancel_delayed_work(&ep->timeout_work)) - atomic_dec(&ep->ex_refcnt); /* drop timer hold */ - } + if (ep->esb_stat & ESB_ST_COMPLETE) + fc_exch_timer_cancel(ep); spin_unlock_bh(&ep->ex_lock); @@ -2156,6 +2161,31 @@ out: } /** + * fc_exch_update_stats() - update exches stats to lport + * @lport: The local port to update exchange manager stats + */ +void fc_exch_update_stats(struct fc_lport *lport) +{ + struct fc_host_statistics *st; + struct fc_exch_mgr_anchor *ema; + struct fc_exch_mgr *mp; + + st = &lport->host_stats; + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + mp = ema->mp; + st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch); + st->fc_no_free_exch_xid += + atomic_read(&mp->stats.no_free_exch_xid); + st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found); + st->fc_xid_busy += atomic_read(&mp->stats.xid_busy); + st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found); + st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp); + } +} +EXPORT_SYMBOL(fc_exch_update_stats); + +/** * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs * @lport: The local port to add the exchange manager to * @mp: The exchange manager to be added to the local port diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index f7357308655..14243fa5f8e 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -158,6 +158,9 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) fsp->timer.data = (unsigned long)fsp; INIT_LIST_HEAD(&fsp->list); spin_lock_init(&fsp->scsi_pkt_lock); + } else { + per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++; + put_cpu(); } return fsp; } @@ -264,6 +267,9 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) if (!fsp->seq_ptr) return -EINVAL; + per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++; + put_cpu(); + fsp->state |= FC_SRB_ABORT_PENDING; return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); } @@ -420,6 +426,8 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, if (likely(fp)) return fp; + per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++; + put_cpu(); /* error case */ fc_fcp_can_queue_ramp_down(lport); return NULL; @@ -434,7 +442,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { struct scsi_cmnd *sc = fsp->cmd; struct fc_lport *lport = fsp->lp; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; struct fc_frame_header *fh; size_t start_offset; size_t offset; @@ -496,7 +504,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) if (~crc != le32_to_cpu(fr_crc(fp))) { crc_err: - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); stats->ErrorFrames++; /* per cpu count, not total count, but OK for limit */ if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT) @@ -1372,10 +1380,10 @@ static void fc_fcp_timeout(unsigned long data) fsp->state |= FC_SRB_FCP_PROCESSING_TMO; - if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) - fc_fcp_rec(fsp); - else if (fsp->state & FC_SRB_RCV_STATUS) + if (fsp->state & FC_SRB_RCV_STATUS) fc_fcp_complete_locked(fsp); + else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) + fc_fcp_rec(fsp); else fc_fcp_recovery(fsp, FC_TIMED_OUT); fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; @@ -1786,7 +1794,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) struct fc_rport_libfc_priv *rpriv; int rval; int rc = 0; - struct fcoe_dev_stats *stats; + struct fc_stats *stats; rval = fc_remote_port_chkready(rport); if (rval) { @@ -1835,7 +1843,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) /* * setup the data direction */ - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu()); if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { fsp->req_flags = FC_SRB_READ; stats->InputRequests++; diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c index 981329a17c4..0382ac06906 100644 --- a/drivers/scsi/libfc/fc_frame.c +++ b/drivers/scsi/libfc/fc_frame.c @@ -49,7 +49,7 @@ u32 fc_frame_crc_check(struct fc_frame *fp) EXPORT_SYMBOL(fc_frame_crc_check); /* - * Allocate a frame intended to be sent via fcoe_xmit. + * Allocate a frame intended to be sent. * Get an sk_buff for the frame and set the length. */ struct fc_frame *_fc_frame_alloc(size_t len) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index c1402fb499a..f04d15c67df 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -299,47 +299,54 @@ EXPORT_SYMBOL(fc_get_host_speed); */ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) { - struct fc_host_statistics *fcoe_stats; + struct fc_host_statistics *fc_stats; struct fc_lport *lport = shost_priv(shost); struct timespec v0, v1; unsigned int cpu; u64 fcp_in_bytes = 0; u64 fcp_out_bytes = 0; - fcoe_stats = &lport->host_stats; - memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); + fc_stats = &lport->host_stats; + memset(fc_stats, 0, sizeof(struct fc_host_statistics)); jiffies_to_timespec(jiffies, &v0); jiffies_to_timespec(lport->boot_time, &v1); - fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); + fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); for_each_possible_cpu(cpu) { - struct fcoe_dev_stats *stats; - - stats = per_cpu_ptr(lport->dev_stats, cpu); - - fcoe_stats->tx_frames += stats->TxFrames; - fcoe_stats->tx_words += stats->TxWords; - fcoe_stats->rx_frames += stats->RxFrames; - fcoe_stats->rx_words += stats->RxWords; - fcoe_stats->error_frames += stats->ErrorFrames; - fcoe_stats->invalid_crc_count += stats->InvalidCRCCount; - fcoe_stats->fcp_input_requests += stats->InputRequests; - fcoe_stats->fcp_output_requests += stats->OutputRequests; - fcoe_stats->fcp_control_requests += stats->ControlRequests; + struct fc_stats *stats; + + stats = per_cpu_ptr(lport->stats, cpu); + + fc_stats->tx_frames += stats->TxFrames; + fc_stats->tx_words += stats->TxWords; + fc_stats->rx_frames += stats->RxFrames; + fc_stats->rx_words += stats->RxWords; + fc_stats->error_frames += stats->ErrorFrames; + fc_stats->invalid_crc_count += stats->InvalidCRCCount; + fc_stats->fcp_input_requests += stats->InputRequests; + fc_stats->fcp_output_requests += stats->OutputRequests; + fc_stats->fcp_control_requests += stats->ControlRequests; fcp_in_bytes += stats->InputBytes; fcp_out_bytes += stats->OutputBytes; - fcoe_stats->link_failure_count += stats->LinkFailureCount; + fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails; + fc_stats->fcp_packet_aborts += stats->FcpPktAborts; + fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails; + fc_stats->link_failure_count += stats->LinkFailureCount; } - fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); - fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); - fcoe_stats->lip_count = -1; - fcoe_stats->nos_count = -1; - fcoe_stats->loss_of_sync_count = -1; - fcoe_stats->loss_of_signal_count = -1; - fcoe_stats->prim_seq_protocol_err_count = -1; - fcoe_stats->dumped_frames = -1; - return fcoe_stats; + fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); + fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); + fc_stats->lip_count = -1; + fc_stats->nos_count = -1; + fc_stats->loss_of_sync_count = -1; + fc_stats->loss_of_signal_count = -1; + fc_stats->prim_seq_protocol_err_count = -1; + fc_stats->dumped_frames = -1; + + /* update exches stats */ + fc_exch_update_stats(lport); + + return fc_stats; } EXPORT_SYMBOL(fc_get_host_stats); @@ -973,7 +980,8 @@ drop: rcu_read_unlock(); FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); fc_frame_free(fp); - lport->tt.exch_done(sp); + if (sp) + lport->tt.exch_done(sp); } /** @@ -1590,8 +1598,9 @@ static void fc_lport_timeout(struct work_struct *work) case LPORT_ST_RPA: case LPORT_ST_DHBA: case LPORT_ST_DPRT: - fc_lport_enter_ms(lport, lport->state); - break; + FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n", + fc_lport_state(lport)); + /* fall thru */ case LPORT_ST_SCR: fc_lport_enter_scr(lport); break; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 441d88ad99a..a59fcdc8fd6 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task) if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || ((stat->stat == SAM_STAT_CHECK_CONDITION && dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { - ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); + memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE); if (!link->sactive) { - qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); + qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]); } else { - link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command); + link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]); if (unlikely(link->eh_info.err_mask)) qc->flags |= ATA_QCFLAG_FAILED; } @@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task) qc->flags |= ATA_QCFLAG_FAILED; } - dev->sata_dev.tf.feature = 0x04; /* status err */ - dev->sata_dev.tf.command = ATA_ERR; + dev->sata_dev.fis[3] = 0x04; /* status err */ + dev->sata_dev.fis[2] = ATA_ERR; } } @@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc) { struct domain_device *dev = qc->ap->private_data; - memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf)); + ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf); return true; } @@ -523,6 +523,31 @@ static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev) i->dft->lldd_ata_set_dmamode(dev); } +static void sas_ata_sched_eh(struct ata_port *ap) +{ + struct domain_device *dev = ap->private_data; + struct sas_ha_struct *ha = dev->port->ha; + unsigned long flags; + + spin_lock_irqsave(&ha->lock, flags); + if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state)) + ha->eh_active++; + ata_std_sched_eh(ap); + spin_unlock_irqrestore(&ha->lock, flags); +} + +void sas_ata_end_eh(struct ata_port *ap) +{ + struct domain_device *dev = ap->private_data; + struct sas_ha_struct *ha = dev->port->ha; + unsigned long flags; + + spin_lock_irqsave(&ha->lock, flags); + if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state)) + ha->eh_active--; + spin_unlock_irqrestore(&ha->lock, flags); +} + static struct ata_port_operations sas_sata_ops = { .prereset = ata_std_prereset, .hardreset = sas_ata_hard_reset, @@ -536,6 +561,8 @@ static struct ata_port_operations sas_sata_ops = { .port_start = ata_sas_port_start, .port_stop = ata_sas_port_stop, .set_dmamode = sas_ata_set_dmamode, + .sched_eh = sas_ata_sched_eh, + .end_eh = sas_ata_end_eh, }; static struct ata_port_info sata_port_info = { @@ -591,7 +618,6 @@ void sas_ata_task_abort(struct sas_task *task) spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(qc->scsicmd->request); spin_unlock_irqrestore(q->queue_lock, flags); - scsi_schedule_eh(qc->scsicmd->device->host); return; } @@ -708,10 +734,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie) struct ata_port *ap = dev->sata_dev.ap; struct sas_ha_struct *ha = dev->port->ha; - /* hold a reference over eh since we may be racing with final - * remove once all commands are completed - */ - kref_get(&dev->kref); sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n"); ata_scsi_port_error_handler(ha->core.shost, ap); sas_put_device(dev); @@ -720,7 +742,7 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie) void sas_ata_strategy_handler(struct Scsi_Host *shost) { struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); - LIST_HEAD(async); + ASYNC_DOMAIN_EXCLUSIVE(async); int i; /* it's ok to defer revalidation events during ata eh, these @@ -742,6 +764,13 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost) list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (!dev_is_sata(dev)) continue; + + /* hold a reference over eh since we may be + * racing with final remove once all commands + * are completed + */ + kref_get(&dev->kref); + async_schedule_domain(async_sas_ata_eh, dev, &async); } spin_unlock(&port->dev_list_lock); diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 629a0865b13..3e9dc1a8435 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -39,18 +39,13 @@ void sas_init_dev(struct domain_device *dev) { switch (dev->dev_type) { case SAS_END_DEV: + INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node); break; case EDGE_DEV: case FANOUT_DEV: INIT_LIST_HEAD(&dev->ex_dev.children); mutex_init(&dev->ex_dev.cmd_mutex); break; - case SATA_DEV: - case SATA_PM: - case SATA_PM_PORT: - case SATA_PENDING: - INIT_LIST_HEAD(&dev->sata_dev.children); - break; default: break; } @@ -286,6 +281,8 @@ void sas_free_device(struct kref *kref) static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev) { + struct sas_ha_struct *ha = port->ha; + sas_notify_lldd_dev_gone(dev); if (!dev->parent) dev->port->port_dev = NULL; @@ -294,8 +291,18 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d spin_lock_irq(&port->dev_list_lock); list_del_init(&dev->dev_list_node); + if (dev_is_sata(dev)) + sas_ata_end_eh(dev->sata_dev.ap); spin_unlock_irq(&port->dev_list_lock); + spin_lock_irq(&ha->lock); + if (dev->dev_type == SAS_END_DEV && + !list_empty(&dev->ssp_dev.eh_list_node)) { + list_del_init(&dev->ssp_dev.eh_list_node); + ha->eh_active--; + } + spin_unlock_irq(&ha->lock); + sas_put_device(dev); } @@ -488,9 +495,9 @@ static void sas_chain_event(int event, unsigned long *pending, if (!test_and_set_bit(event, pending)) { unsigned long flags; - spin_lock_irqsave(&ha->state_lock, flags); + spin_lock_irqsave(&ha->lock, flags); sas_chain_work(ha, sw); - spin_unlock_irqrestore(&ha->state_lock, flags); + spin_unlock_irqrestore(&ha->lock, flags); } } diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index 4e4292d210c..789c4d8bb7a 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c @@ -47,9 +47,9 @@ static void sas_queue_event(int event, unsigned long *pending, if (!test_and_set_bit(event, pending)) { unsigned long flags; - spin_lock_irqsave(&ha->state_lock, flags); + spin_lock_irqsave(&ha->lock, flags); sas_queue_work(ha, work); - spin_unlock_irqrestore(&ha->state_lock, flags); + spin_unlock_irqrestore(&ha->lock, flags); } } @@ -61,18 +61,18 @@ void __sas_drain_work(struct sas_ha_struct *ha) set_bit(SAS_HA_DRAINING, &ha->state); /* flush submitters */ - spin_lock_irq(&ha->state_lock); - spin_unlock_irq(&ha->state_lock); + spin_lock_irq(&ha->lock); + spin_unlock_irq(&ha->lock); drain_workqueue(wq); - spin_lock_irq(&ha->state_lock); + spin_lock_irq(&ha->lock); clear_bit(SAS_HA_DRAINING, &ha->state); list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { list_del_init(&sw->drain_node); sas_queue_work(ha, sw); } - spin_unlock_irq(&ha->state_lock); + spin_unlock_irq(&ha->lock); } int sas_drain_work(struct sas_ha_struct *ha) diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index caa0525d252..efc6e72f09f 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -51,14 +51,14 @@ static void smp_task_timedout(unsigned long _task) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); - complete(&task->completion); + complete(&task->slow_task->completion); } static void smp_task_done(struct sas_task *task) { - if (!del_timer(&task->timer)) + if (!del_timer(&task->slow_task->timer)) return; - complete(&task->completion); + complete(&task->slow_task->completion); } /* Give it some long enough timeout. In seconds. */ @@ -79,7 +79,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size, break; } - task = sas_alloc_task(GFP_KERNEL); + task = sas_alloc_slow_task(GFP_KERNEL); if (!task) { res = -ENOMEM; break; @@ -91,20 +91,20 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size, task->task_done = smp_task_done; - task->timer.data = (unsigned long) task; - task->timer.function = smp_task_timedout; - task->timer.expires = jiffies + SMP_TIMEOUT*HZ; - add_timer(&task->timer); + task->slow_task->timer.data = (unsigned long) task; + task->slow_task->timer.function = smp_task_timedout; + task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; + add_timer(&task->slow_task->timer); res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); if (res) { - del_timer(&task->timer); + del_timer(&task->slow_task->timer); SAS_DPRINTK("executing SMP task failed:%d\n", res); break; } - wait_for_completion(&task->completion); + wait_for_completion(&task->slow_task->completion); res = -ECOMM; if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { SAS_DPRINTK("smp task timed out or aborted\n"); @@ -868,7 +868,7 @@ static struct domain_device *sas_ex_discover_end_dev( } /* See if this phy is part of a wide port */ -static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id) +static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id) { struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; int i; @@ -884,11 +884,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id) sas_port_add_phy(ephy->port, phy->phy); phy->port = ephy->port; phy->phy_state = PHY_DEVICE_DISCOVERED; - return 0; + return true; } } - return -ENODEV; + return false; } static struct domain_device *sas_ex_discover_expander( @@ -1030,8 +1030,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) return res; } - res = sas_ex_join_wide_port(dev, phy_id); - if (!res) { + if (sas_ex_join_wide_port(dev, phy_id)) { SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); return res; @@ -1077,8 +1076,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == SAS_ADDR(child->sas_addr)) { ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; - res = sas_ex_join_wide_port(dev, i); - if (!res) + if (sas_ex_join_wide_port(dev, i)) SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); @@ -1943,32 +1941,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) { struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; struct domain_device *child; - bool found = false; - int res, i; + int res; SAS_DPRINTK("ex %016llx phy%d new device attached\n", SAS_ADDR(dev->sas_addr), phy_id); res = sas_ex_phy_discover(dev, phy_id); if (res) - goto out; - /* to support the wide port inserted */ - for (i = 0; i < dev->ex_dev.num_phys; i++) { - struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i]; - if (i == phy_id) - continue; - if (SAS_ADDR(ex_phy_temp->attached_sas_addr) == - SAS_ADDR(ex_phy->attached_sas_addr)) { - found = true; - break; - } - } - if (found) { - sas_ex_join_wide_port(dev, phy_id); + return res; + + if (sas_ex_join_wide_port(dev, phy_id)) return 0; - } + res = sas_ex_discover_devices(dev, phy_id); - if (!res) - goto out; + if (res) + return res; list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { @@ -1978,7 +1964,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) break; } } -out: return res; } @@ -2005,6 +1990,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) u8 sas_addr[8]; int res; + memset(sas_addr, 0, 8); res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); switch (res) { case SMP_RESP_NO_PHY: @@ -2017,9 +2003,13 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) return res; case SMP_RESP_FUNC_ACC: break; + case -ECOMM: + break; + default: + return res; } - if (SAS_ADDR(sas_addr) == 0) { + if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; @@ -2109,9 +2099,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev) struct domain_device *dev = NULL; res = sas_find_bcast_dev(port_dev, &dev); - if (res) - goto out; - if (dev) { + while (res == 0 && dev) { struct expander_device *ex = &dev->ex_dev; int i = 0, phy_id; @@ -2123,8 +2111,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev) res = sas_rediscover(dev, phy_id); i = phy_id + 1; } while (i < ex->num_phys); + + dev = NULL; + res = sas_find_bcast_dev(port_dev, &dev); } -out: return res; } diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 10cb5ae3097..014297c0588 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -48,18 +48,37 @@ struct sas_task *sas_alloc_task(gfp_t flags) INIT_LIST_HEAD(&task->list); spin_lock_init(&task->task_state_lock); task->task_state_flags = SAS_TASK_STATE_PENDING; - init_timer(&task->timer); - init_completion(&task->completion); } return task; } EXPORT_SYMBOL_GPL(sas_alloc_task); +struct sas_task *sas_alloc_slow_task(gfp_t flags) +{ + struct sas_task *task = sas_alloc_task(flags); + struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags); + + if (!task || !slow) { + if (task) + kmem_cache_free(sas_task_cache, task); + kfree(slow); + return NULL; + } + + task->slow_task = slow; + init_timer(&slow->timer); + init_completion(&slow->completion); + + return task; +} +EXPORT_SYMBOL_GPL(sas_alloc_slow_task); + void sas_free_task(struct sas_task *task) { if (task) { BUG_ON(!list_empty(&task->list)); + kfree(task->slow_task); kmem_cache_free(sas_task_cache, task); } } @@ -114,9 +133,11 @@ int sas_register_ha(struct sas_ha_struct *sas_ha) sas_ha->lldd_queue_size = 128; /* Sanity */ set_bit(SAS_HA_REGISTERED, &sas_ha->state); - spin_lock_init(&sas_ha->state_lock); + spin_lock_init(&sas_ha->lock); mutex_init(&sas_ha->drain_mutex); + init_waitqueue_head(&sas_ha->eh_wait_q); INIT_LIST_HEAD(&sas_ha->defer_q); + INIT_LIST_HEAD(&sas_ha->eh_dev_q); error = sas_register_phys(sas_ha); if (error) { @@ -163,9 +184,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha) * events to be queued, and flush any in-progress drainers */ mutex_lock(&sas_ha->drain_mutex); - spin_lock_irq(&sas_ha->state_lock); + spin_lock_irq(&sas_ha->lock); clear_bit(SAS_HA_REGISTERED, &sas_ha->state); - spin_unlock_irq(&sas_ha->state_lock); + spin_unlock_irq(&sas_ha->lock); __sas_drain_work(sas_ha); mutex_unlock(&sas_ha->drain_mutex); @@ -411,9 +432,9 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset) d->reset_result = 0; d->hard_reset = hard_reset; - spin_lock_irq(&ha->state_lock); + spin_lock_irq(&ha->lock); sas_queue_work(ha, &d->reset_work); - spin_unlock_irq(&ha->state_lock); + spin_unlock_irq(&ha->lock); rc = sas_drain_work(ha); if (rc == 0) @@ -438,9 +459,9 @@ static int queue_phy_enable(struct sas_phy *phy, int enable) d->enable_result = 0; d->enable = enable; - spin_lock_irq(&ha->state_lock); + spin_lock_irq(&ha->lock); sas_queue_work(ha, &d->enable_work); - spin_unlock_irq(&ha->state_lock); + spin_unlock_irq(&ha->lock); rc = sas_drain_work(ha); if (rc == 0) diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index f0b9b7bf188..6e795a174a1 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -460,14 +460,109 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev) } EXPORT_SYMBOL_GPL(sas_get_local_phy); +static void sas_wait_eh(struct domain_device *dev) +{ + struct sas_ha_struct *ha = dev->port->ha; + DEFINE_WAIT(wait); + + if (dev_is_sata(dev)) { + ata_port_wait_eh(dev->sata_dev.ap); + return; + } + retry: + spin_lock_irq(&ha->lock); + + while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) { + prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&ha->lock); + schedule(); + spin_lock_irq(&ha->lock); + } + finish_wait(&ha->eh_wait_q, &wait); + + spin_unlock_irq(&ha->lock); + + /* make sure SCSI EH is complete */ + if (scsi_host_in_recovery(ha->core.shost)) { + msleep(10); + goto retry; + } +} +EXPORT_SYMBOL(sas_wait_eh); + +static int sas_queue_reset(struct domain_device *dev, int reset_type, int lun, int wait) +{ + struct sas_ha_struct *ha = dev->port->ha; + int scheduled = 0, tries = 100; + + /* ata: promote lun reset to bus reset */ + if (dev_is_sata(dev)) { + sas_ata_schedule_reset(dev); + if (wait) + sas_ata_wait_eh(dev); + return SUCCESS; + } + + while (!scheduled && tries--) { + spin_lock_irq(&ha->lock); + if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) && + !test_bit(reset_type, &dev->state)) { + scheduled = 1; + ha->eh_active++; + list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q); + set_bit(SAS_DEV_EH_PENDING, &dev->state); + set_bit(reset_type, &dev->state); + int_to_scsilun(lun, &dev->ssp_dev.reset_lun); + scsi_schedule_eh(ha->core.shost); + } + spin_unlock_irq(&ha->lock); + + if (wait) + sas_wait_eh(dev); + + if (scheduled) + return SUCCESS; + } + + SAS_DPRINTK("%s reset of %s failed\n", + reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus", + dev_name(&dev->rphy->dev)); + + return FAILED; +} + +int sas_eh_abort_handler(struct scsi_cmnd *cmd) +{ + int res; + struct sas_task *task = TO_SAS_TASK(cmd); + struct Scsi_Host *host = cmd->device->host; + struct sas_internal *i = to_sas_internal(host->transportt); + + if (current != host->ehandler) + return FAILED; + + if (!i->dft->lldd_abort_task) + return FAILED; + + res = i->dft->lldd_abort_task(task); + if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) + return SUCCESS; + + return FAILED; +} +EXPORT_SYMBOL_GPL(sas_eh_abort_handler); + /* Attempt to send a LUN reset message to a device */ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) { - struct domain_device *dev = cmd_to_domain_dev(cmd); - struct sas_internal *i = - to_sas_internal(dev->port->ha->core.shost->transportt); - struct scsi_lun lun; int res; + struct scsi_lun lun; + struct Scsi_Host *host = cmd->device->host; + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_internal *i = to_sas_internal(host->transportt); + + if (current != host->ehandler) + return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0); int_to_scsilun(cmd->device->lun, &lun); @@ -481,21 +576,22 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) return FAILED; } -/* Attempt to send a phy (bus) reset */ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) { - struct domain_device *dev = cmd_to_domain_dev(cmd); - struct sas_phy *phy = sas_get_local_phy(dev); int res; + struct Scsi_Host *host = cmd->device->host; + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_internal *i = to_sas_internal(host->transportt); - res = sas_phy_reset(phy, 1); - if (res) - SAS_DPRINTK("Bus reset of %s failed 0x%x\n", - kobject_name(&phy->dev.kobj), - res); - sas_put_local_phy(phy); + if (current != host->ehandler) + return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0); - if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) + if (!i->dft->lldd_I_T_nexus_reset) + return FAILED; + + res = i->dft->lldd_I_T_nexus_reset(dev); + if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE || + res == -ENODEV) return SUCCESS; return FAILED; @@ -667,16 +763,53 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * goto out; } +static void sas_eh_handle_resets(struct Scsi_Host *shost) +{ + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + struct sas_internal *i = to_sas_internal(shost->transportt); + + /* handle directed resets to sas devices */ + spin_lock_irq(&ha->lock); + while (!list_empty(&ha->eh_dev_q)) { + struct domain_device *dev; + struct ssp_device *ssp; + + ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node); + list_del_init(&ssp->eh_list_node); + dev = container_of(ssp, typeof(*dev), ssp_dev); + kref_get(&dev->kref); + WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n"); + + spin_unlock_irq(&ha->lock); + + if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state)) + i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun); + + if (test_and_clear_bit(SAS_DEV_RESET, &dev->state)) + i->dft->lldd_I_T_nexus_reset(dev); + + sas_put_device(dev); + spin_lock_irq(&ha->lock); + clear_bit(SAS_DEV_EH_PENDING, &dev->state); + ha->eh_active--; + } + spin_unlock_irq(&ha->lock); +} + + void sas_scsi_recover_host(struct Scsi_Host *shost) { struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); - unsigned long flags; LIST_HEAD(eh_work_q); + int tries = 0; + bool retry; - spin_lock_irqsave(shost->host_lock, flags); +retry: + tries++; + retry = true; + spin_lock_irq(shost->host_lock); list_splice_init(&shost->eh_cmd_q, &eh_work_q); - shost->host_eh_scheduled = 0; - spin_unlock_irqrestore(shost->host_lock, flags); + spin_unlock_irq(shost->host_lock); SAS_DPRINTK("Enter %s busy: %d failed: %d\n", __func__, shost->host_busy, shost->host_failed); @@ -705,13 +838,26 @@ out: if (ha->lldd_max_execute_num > 1) wake_up_process(ha->core.queue_thread); + sas_eh_handle_resets(shost); + /* now link into libata eh --- if we have any ata devices */ sas_ata_strategy_handler(shost); scsi_eh_flush_done_q(&ha->eh_done_q); - SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n", - __func__, shost->host_busy, shost->host_failed); + /* check if any new eh work was scheduled during the last run */ + spin_lock_irq(&ha->lock); + if (ha->eh_active == 0) { + shost->host_eh_scheduled = 0; + retry = false; + } + spin_unlock_irq(&ha->lock); + + if (retry) + goto retry; + + SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n", + __func__, shost->host_busy, shost->host_failed, tries); } enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) @@ -988,9 +1134,13 @@ void sas_task_abort(struct sas_task *task) /* Escape for libsas internal commands */ if (!sc) { - if (!del_timer(&task->timer)) + struct sas_task_slow *slow = task->slow_task; + + if (!slow) + return; + if (!del_timer(&slow->timer)) return; - task->timer.function(task->timer.data); + slow->timer.function(slow->timer.data); return; } @@ -1003,7 +1153,6 @@ void sas_task_abort(struct sas_task *task) spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(sc->request); spin_unlock_irqrestore(q->queue_lock, flags); - scsi_schedule_eh(sc->device->host); } } diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index fe5d396aca7..e2516ba8ebf 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -22,7 +22,9 @@ ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage ccflags-$(GCOV) += -O0 +ifdef WARNINGS_BECOME_ERRORS ccflags-y += -Werror +endif obj-$(CONFIG_SCSI_LPFC) := lpfc.o diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index e5da6da20f8..a65c05a8d48 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -96,6 +96,10 @@ struct lpfc_sli2_slim; /* queue dump line buffer size */ #define LPFC_LBUF_SZ 128 +/* mailbox system shutdown options */ +#define LPFC_MBX_NO_WAIT 0 +#define LPFC_MBX_WAIT 1 + enum lpfc_polling_flags { ENABLE_FCP_RING_POLLING = 0x1, DISABLE_FCP_RING_INT = 0x2 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5eb2bc11618..adef5bb2100 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3617,6 +3617,91 @@ lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR, lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store); +/** + * lpfc_fcp_imax_store + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: string with the number of fast-path FCP interrupts per second. + * @count: unused variable. + * + * Description: + * If val is in a valid range [636,651042], then set the adapter's + * maximum number of fast-path FCP interrupts per second. + * + * Returns: + * length of the buf on success if val is in range the intended mode + * is supported. + * -EINVAL if val out of range or intended mode is not supported. + **/ +static ssize_t +lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int val = 0, i; + + /* Sanity check on user data */ + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + /* Value range is [636,651042] */ + if (val < LPFC_MIM_IMAX || val > LPFC_DMULT_CONST) + return -EINVAL; + + phba->cfg_fcp_imax = (uint32_t)val; + for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY) + lpfc_modify_fcp_eq_delay(phba, i); + + return strlen(buf); +} + +/* +# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second +# +# Value range is [636,651042]. Default value is 10000. +*/ +static int lpfc_fcp_imax = LPFC_FP_DEF_IMAX; +module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_imax, + "Set the maximum number of fast-path FCP interrupts per second"); +lpfc_param_show(fcp_imax) + +/** + * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [636,651042], then initialize the adapter's + * maximum number of fast-path FCP interrupts per second. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) +{ + if (val >= LPFC_MIM_IMAX && val <= LPFC_DMULT_CONST) { + phba->cfg_fcp_imax = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3016 fcp_imax: %d out of range, using default\n", val); + phba->cfg_fcp_imax = LPFC_FP_DEF_IMAX; + + return 0; +} + +static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, + lpfc_fcp_imax_show, lpfc_fcp_imax_store); + /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. @@ -3758,14 +3843,6 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* -# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second -# -# Value range is [636,651042]. Default value is 10000. -*/ -LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST, - "Set the maximum number of fast-path FCP interrupts per second"); - -/* # lpfc_fcp_wq_count: Set the number of fast-path FCP work queues # # Value range is [1,31]. Default value is 4. diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 9b2a16f3bc7..8a2a514a255 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -183,7 +183,7 @@ int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int); void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); int lpfc_online(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *); -void lpfc_offline_prep(struct lpfc_hba *); +void lpfc_offline_prep(struct lpfc_hba *, int); void lpfc_offline(struct lpfc_hba *); void lpfc_reset_hba(struct lpfc_hba *); @@ -273,7 +273,7 @@ int lpfc_sli_host_down(struct lpfc_vport *); int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_handle_mb_event(struct lpfc_hba *); -void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); +void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *, int); int lpfc_sli_check_eratt(struct lpfc_hba *); void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 616c400dae1..afe368fd1b9 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -395,8 +395,13 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx) for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) break; - if (fcp_cqidx >= phba->cfg_fcp_eq_count) - return; + if (phba->intr_type == MSIX) { + if (fcp_cqidx >= phba->cfg_fcp_eq_count) + return; + } else { + if (fcp_cqidx > 0) + return; + } printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n", fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, @@ -426,8 +431,13 @@ lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx) for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) break; - if (fcp_cqidx >= phba->cfg_fcp_eq_count) - return; + if (phba->intr_type == MSIX) { + if (fcp_cqidx >= phba->cfg_fcp_eq_count) + return; + } else { + if (fcp_cqidx > 0) + return; + } if (phba->cfg_fcp_eq_count == 0) { fcp_eqidx = -1; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 5bb269e224f..9b4f92941dc 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -530,7 +530,7 @@ lpfc_work_list_done(struct lpfc_hba *phba) break; case LPFC_EVT_OFFLINE_PREP: if (phba->link_state >= LPFC_LINK_DOWN) - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); *(int *)(evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); break; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index f1946dfda5b..953603a7a43 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -874,6 +874,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 #define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 #define LPFC_MBOX_OPCODE_NOP 0x21 +#define LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY 0x29 #define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 #define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 #define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 @@ -940,6 +941,13 @@ struct eq_context { uint32_t reserved3; }; +struct eq_delay_info { + uint32_t eq_id; + uint32_t phase; + uint32_t delay_multi; +}; +#define LPFC_MAX_EQ_DELAY 8 + struct sgl_page_pairs { uint32_t sgl_pg0_addr_lo; uint32_t sgl_pg0_addr_hi; @@ -1002,6 +1010,19 @@ struct lpfc_mbx_eq_create { } u; }; +struct lpfc_mbx_modify_eq_delay { + struct mbox_header header; + union { + struct { + uint32_t num_eq; + struct eq_delay_info eq[LPFC_MAX_EQ_DELAY]; + } request; + struct { + uint32_t word0; + } response; + } u; +}; + struct lpfc_mbx_eq_destroy { struct mbox_header header; union { @@ -2875,6 +2896,7 @@ struct lpfc_mqe { struct lpfc_mbx_mq_create mq_create; struct lpfc_mbx_mq_create_ext mq_create_ext; struct lpfc_mbx_eq_create eq_create; + struct lpfc_mbx_modify_eq_delay eq_delay; struct lpfc_mbx_cq_create cq_create; struct lpfc_mbx_wq_create wq_create; struct lpfc_mbx_rq_create rq_create; @@ -3084,6 +3106,28 @@ struct lpfc_acqe_fc_la { #define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2 }; +struct lpfc_acqe_misconfigured_event { + struct { + uint32_t word0; +#define lpfc_sli_misconfigured_port0_SHIFT 0 +#define lpfc_sli_misconfigured_port0_MASK 0x000000FF +#define lpfc_sli_misconfigured_port0_WORD word0 +#define lpfc_sli_misconfigured_port1_SHIFT 8 +#define lpfc_sli_misconfigured_port1_MASK 0x000000FF +#define lpfc_sli_misconfigured_port1_WORD word0 +#define lpfc_sli_misconfigured_port2_SHIFT 16 +#define lpfc_sli_misconfigured_port2_MASK 0x000000FF +#define lpfc_sli_misconfigured_port2_WORD word0 +#define lpfc_sli_misconfigured_port3_SHIFT 24 +#define lpfc_sli_misconfigured_port3_MASK 0x000000FF +#define lpfc_sli_misconfigured_port3_WORD word0 + } theEvent; +#define LPFC_SLI_EVENT_STATUS_VALID 0x00 +#define LPFC_SLI_EVENT_STATUS_NOT_PRESENT 0x01 +#define LPFC_SLI_EVENT_STATUS_WRONG_TYPE 0x02 +#define LPFC_SLI_EVENT_STATUS_UNSUPPORTED 0x03 +}; + struct lpfc_acqe_sli { uint32_t event_data1; uint32_t event_data2; @@ -3094,6 +3138,7 @@ struct lpfc_acqe_sli { #define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3 #define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4 #define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 +#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 }; /* diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 411ed48d79d..45c15208be9 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -73,6 +73,8 @@ static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); +static void lpfc_sli4_disable_intr(struct lpfc_hba *); +static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -1169,7 +1171,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_reset_barrier(phba); @@ -1193,7 +1195,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba) static void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_sli4_brdreset(phba); lpfc_hba_down_post(phba); @@ -1251,7 +1253,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) * There was a firmware error. Take the hba offline and then * attempt to restart it. */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); /* Wait for the ER1 bit to clear.*/ @@ -1372,7 +1374,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) * There was a firmware error. Take the hba offline and then * attempt to restart it. */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); if (lpfc_online(phba) == 0) { /* Initialize the HBA */ @@ -1428,6 +1430,54 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) } /** + * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg + * @phba: pointer to lpfc hba data structure. + * @mbx_action: flag for mailbox shutdown action. + * + * This routine is invoked to perform an SLI4 port PCI function reset in + * response to port status register polling attention. It waits for port + * status register (ERR, RDY, RN) bits before proceeding with function reset. + * During this process, interrupt vectors are freed and later requested + * for handling possible port resource change. + **/ +static int +lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action) +{ + int rc; + uint32_t intr_mode; + + /* + * On error status condition, driver need to wait for port + * ready before performing reset. + */ + rc = lpfc_sli4_pdev_status_reg_wait(phba); + if (!rc) { + /* need reset: attempt for port recovery */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2887 Reset Needed: Attempting Port " + "Recovery...\n"); + lpfc_offline_prep(phba, mbx_action); + lpfc_offline(phba); + /* release interrupt for possible resource change */ + lpfc_sli4_disable_intr(phba); + lpfc_sli_brdrestart(phba); + /* request and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3175 Failed to enable interrupt\n"); + return -EIO; + } else { + phba->intr_mode = intr_mode; + } + rc = lpfc_online(phba); + if (rc == 0) + lpfc_unblock_mgmt_io(phba); + } + return rc; +} + +/** * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * @@ -1506,30 +1556,18 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3145 Port Down: Provisioning\n"); - /* - * On error status condition, driver need to wait for port - * ready before performing reset. - */ - rc = lpfc_sli4_pdev_status_reg_wait(phba); - if (!rc) { - /* need reset: attempt for port recovery */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2887 Reset Needed: Attempting Port " - "Recovery...\n"); - lpfc_offline_prep(phba); - lpfc_offline(phba); - lpfc_sli_brdrestart(phba); - if (lpfc_online(phba) == 0) { - lpfc_unblock_mgmt_io(phba); - /* don't report event on forced debug dump */ - if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && - reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) - return; - else - break; - } - /* fall through for not able to recover */ + + /* Check port status register for function reset */ + rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT); + if (rc == 0) { + /* don't report event on forced debug dump */ + if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && + reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) + return; + else + break; } + /* fall through for not able to recover */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3152 Unrecoverable error, bring the port " "offline\n"); @@ -2494,15 +2532,19 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) * driver prepares the HBA interface for online or offline. **/ static void -lpfc_block_mgmt_io(struct lpfc_hba * phba) +lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) { unsigned long iflag; uint8_t actcmd = MBX_HEARTBEAT; unsigned long timeout; - timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; + spin_unlock_irqrestore(&phba->hbalock, iflag); + if (mbx_action == LPFC_MBX_NO_WAIT) + return; + timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + spin_lock_irqsave(&phba->hbalock, iflag); if (phba->sli.mbox_active) { actcmd = phba->sli.mbox_active->u.mb.mbxCommand; /* Determine how long we might wait for the active mailbox @@ -2592,7 +2634,7 @@ lpfc_online(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0458 Bring Adapter online\n"); - lpfc_block_mgmt_io(phba); + lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); if (!lpfc_sli_queue_setup(phba)) { lpfc_unblock_mgmt_io(phba); @@ -2660,7 +2702,7 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba) * queue to make it ready to be brought offline. **/ void -lpfc_offline_prep(struct lpfc_hba * phba) +lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) { struct lpfc_vport *vport = phba->pport; struct lpfc_nodelist *ndlp, *next_ndlp; @@ -2671,7 +2713,7 @@ lpfc_offline_prep(struct lpfc_hba * phba) if (vport->fc_flag & FC_OFFLINE_MODE) return; - lpfc_block_mgmt_io(phba); + lpfc_block_mgmt_io(phba, mbx_action); lpfc_linkdown(phba); @@ -2718,7 +2760,7 @@ lpfc_offline_prep(struct lpfc_hba * phba) } lpfc_destroy_vport_work_array(phba, vports); - lpfc_sli_mbox_sys_shutdown(phba); + lpfc_sli_mbox_sys_shutdown(phba, mbx_action); } /** @@ -3684,12 +3726,76 @@ out_free_pmb: static void lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2901 Async SLI event - Event Data1:x%08x Event Data2:" - "x%08x SLI Event Type:%d", - acqe_sli->event_data1, acqe_sli->event_data2, - bf_get(lpfc_trailer_type, acqe_sli)); - return; + char port_name; + char message[80]; + uint8_t status; + struct lpfc_acqe_misconfigured_event *misconfigured; + + /* special case misconfigured event as it contains data for all ports */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_2) || + (bf_get(lpfc_trailer_type, acqe_sli) != + LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2901 Async SLI event - Event Data1:x%08x Event Data2:" + "x%08x SLI Event Type:%d\n", + acqe_sli->event_data1, acqe_sli->event_data2, + bf_get(lpfc_trailer_type, acqe_sli)); + return; + } + + port_name = phba->Port[0]; + if (port_name == 0x00) + port_name = '?'; /* get port name is empty */ + + misconfigured = (struct lpfc_acqe_misconfigured_event *) + &acqe_sli->event_data1; + + /* fetch the status for this port */ + switch (phba->sli4_hba.lnk_info.lnk_no) { + case LPFC_LINK_NUMBER_0: + status = bf_get(lpfc_sli_misconfigured_port0, + &misconfigured->theEvent); + break; + case LPFC_LINK_NUMBER_1: + status = bf_get(lpfc_sli_misconfigured_port1, + &misconfigured->theEvent); + break; + case LPFC_LINK_NUMBER_2: + status = bf_get(lpfc_sli_misconfigured_port2, + &misconfigured->theEvent); + break; + case LPFC_LINK_NUMBER_3: + status = bf_get(lpfc_sli_misconfigured_port3, + &misconfigured->theEvent); + break; + default: + status = ~LPFC_SLI_EVENT_STATUS_VALID; + break; + } + + switch (status) { + case LPFC_SLI_EVENT_STATUS_VALID: + return; /* no message if the sfp is okay */ + case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: + sprintf(message, "Not installed"); + break; + case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: + sprintf(message, + "Optics of two types installed"); + break; + case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: + sprintf(message, "Incompatible optics"); + break; + default: + /* firmware is reporting a status we don't know about */ + sprintf(message, "Unknown event status x%02x", status); + break; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3176 Misconfigured Physical Port - " + "Port Name %c %s\n", port_name, message); } /** @@ -4312,7 +4418,7 @@ lpfc_reset_hba(struct lpfc_hba *phba) phba->link_state = LPFC_HBA_ERROR; return; } - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); lpfc_online(phba); @@ -5514,14 +5620,45 @@ lpfc_destroy_shost(struct lpfc_hba *phba) static void lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) { + uint32_t old_mask; + uint32_t old_guard; + int pagecnt = 10; if (lpfc_prot_mask && lpfc_prot_guard) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1478 Registering BlockGuard with the " "SCSI layer\n"); - scsi_host_set_prot(shost, lpfc_prot_mask); - scsi_host_set_guard(shost, lpfc_prot_guard); + + old_mask = lpfc_prot_mask; + old_guard = lpfc_prot_guard; + + /* Only allow supported values */ + lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION); + lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); + + /* DIF Type 1 protection for profiles AST1/C1 is end to end */ + if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) + lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; + + if (lpfc_prot_mask && lpfc_prot_guard) { + if ((old_mask != lpfc_prot_mask) || + (old_guard != lpfc_prot_guard)) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1475 Registering BlockGuard with the " + "SCSI layer: mask %d guard %d\n", + lpfc_prot_mask, lpfc_prot_guard); + + scsi_host_set_prot(shost, lpfc_prot_mask); + scsi_host_set_guard(shost, lpfc_prot_guard); + } else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1479 Not Registering BlockGuard with the SCSI " + "layer, Bad protection parameters: %d %d\n", + old_mask, old_guard); } + if (!_dump_buf_data) { while (pagecnt) { spin_lock_init(&_dump_buf_lock); @@ -8859,7 +8996,7 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) "0473 PCI device Power Management suspend.\n"); /* Bring down the device */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); kthread_stop(phba->worker_thread); @@ -8985,7 +9122,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) "2710 PCI channel disable preparing for reset\n"); /* Block any management I/Os to the device */ - lpfc_block_mgmt_io(phba); + lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); @@ -9129,7 +9266,7 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev) phba->intr_mode = intr_mode; /* Take device offline, it will perform cleanup */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); @@ -9603,7 +9740,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) "2843 PCI device Power Management suspend.\n"); /* Bring down the device */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); kthread_stop(phba->worker_thread); @@ -9729,7 +9866,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) "2826 PCI channel disable preparing for reset\n"); /* Block any management I/Os to the device */ - lpfc_block_mgmt_io(phba); + lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); @@ -9902,7 +10039,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev) */ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { /* Perform device reset */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); /* Bring the device back online */ diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 66e09069f28..925975d2d76 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4275,10 +4275,8 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) * Catch race where our node has transitioned, but the * transport is still transitioning. */ - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - cmnd->result = ScsiResult(DID_IMM_RETRY, 0); - goto out_fail_command; - } + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + goto out_tgt_busy; if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) goto out_tgt_busy; @@ -4412,12 +4410,12 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct lpfc_iocbq *abtsiocb; struct lpfc_scsi_buf *lpfc_cmd; IOCB_t *cmd, *icmd; - int ret = SUCCESS; + int ret = SUCCESS, status = 0; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); - ret = fc_block_scsi_eh(cmnd); - if (ret) - return ret; + status = fc_block_scsi_eh(cmnd); + if (status) + return status; spin_lock_irq(&phba->hbalock); /* driver queued commands are in process of being flushed */ @@ -4435,7 +4433,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "2873 SCSI Layer I/O Abort Request IO CMPL Status " "x%x ID %d LUN %d\n", - ret, cmnd->device->id, cmnd->device->lun); + SUCCESS, cmnd->device->id, cmnd->device->lun); return SUCCESS; } @@ -4762,7 +4760,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; - int status; + int status, ret = SUCCESS; if (!rdata) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, @@ -4803,9 +4801,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) * So, continue on. * We will report success if all the i/o aborts successfully. */ - status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_LUN); - return status; + return ret; } /** @@ -4829,7 +4827,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; - int status; + int status, ret = SUCCESS; if (!rdata) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, @@ -4870,9 +4868,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) * So, continue on. * We will report success if all the i/o aborts successfully. */ - status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, - LPFC_CTX_TGT); - return status; + ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + LPFC_CTX_TGT); + return ret; } /** @@ -4982,7 +4980,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_hba *phba = vport->phba; int rc, ret = SUCCESS; - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); rc = lpfc_sli_brdrestart(phba); if (rc) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index b4720a10981..9cbd20b1328 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -8984,7 +8984,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) int i; /* Shutdown the mailbox command sub-system */ - lpfc_sli_mbox_sys_shutdown(phba); + lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); lpfc_hba_down_prep(phba); @@ -9996,11 +9996,17 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, * sub-system flush routine to gracefully bring down mailbox sub-system. **/ void -lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) +lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) { struct lpfc_sli *psli = &phba->sli; unsigned long timeout; + if (mbx_action == LPFC_MBX_NO_WAIT) { + /* delay 100ms for port state */ + msleep(100); + lpfc_sli_mbox_sys_flush(phba); + return; + } timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; spin_lock_irq(&phba->hbalock); @@ -12042,6 +12048,83 @@ out_fail: } /** + * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs + * @phba: HBA structure that indicates port to create a queue on. + * @startq: The starting FCP EQ to modify + * + * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @startq + * is used to get the starting FCP EQ to change. + * This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +uint32_t +lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) +{ + struct lpfc_mbx_modify_eq_delay *eq_delay; + LPFC_MBOXQ_t *mbox; + struct lpfc_queue *eq; + int cnt, rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + int fcp_eqidx; + union lpfc_sli4_cfg_shdr *shdr; + uint16_t dmult; + + if (startq >= phba->cfg_fcp_eq_count) + return 0; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_modify_eq_delay) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, + length, LPFC_SLI4_MBX_EMBED); + eq_delay = &mbox->u.mqe.un.eq_delay; + + /* Calculate delay multiper from maximum interrupt per second */ + dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1; + + cnt = 0; + for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count; + fcp_eqidx++) { + eq = phba->sli4_hba.fp_eq[fcp_eqidx]; + if (!eq) + continue; + eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; + eq_delay->u.request.eq[cnt].phase = 0; + eq_delay->u.request.eq[cnt].delay_multi = dmult; + cnt++; + if (cnt >= LPFC_MAX_EQ_DELAY) + break; + } + eq_delay->u.request.num_eq = cnt; + + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->context1 = NULL; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2512 MODIFY_EQ_DELAY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** * lpfc_eq_create - Create an Event Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @eq: The queue structure to use to create the event queue. @@ -12228,8 +12311,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0361 Unsupported CQ count. (%d)\n", cq->entry_count); - if (cq->entry_count < 256) - return -EINVAL; + if (cq->entry_count < 256) { + status = -EINVAL; + goto out; + } /* otherwise default to smallest count (drop through) */ case 256: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, @@ -12420,8 +12505,10 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0362 Unsupported MQ count. (%d)\n", mq->entry_count); - if (mq->entry_count < 16) - return -EINVAL; + if (mq->entry_count < 16) { + status = -EINVAL; + goto out; + } /* otherwise default to smallest count (drop through) */ case 16: bf_set(lpfc_mq_context_ring_size, @@ -12710,8 +12797,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2535 Unsupported RQ count. (%d)\n", hrq->entry_count); - if (hrq->entry_count < 512) - return -EINVAL; + if (hrq->entry_count < 512) { + status = -EINVAL; + goto out; + } /* otherwise default to smallest count (drop through) */ case 512: bf_set(lpfc_rq_context_rqe_count, @@ -12791,8 +12880,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2536 Unsupported RQ count. (%d)\n", drq->entry_count); - if (drq->entry_count < 512) - return -EINVAL; + if (drq->entry_count < 512) { + status = -EINVAL; + goto out; + } /* otherwise default to smallest count (drop through) */ case 512: bf_set(lpfc_rq_context_rqe_count, @@ -15855,24 +15946,18 @@ lpfc_drain_txq(struct lpfc_hba *phba) spin_lock_irqsave(&phba->hbalock, iflags); piocbq = lpfc_sli_ringtx_get(phba, pring); + if (!piocbq) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2823 txq empty and txq_cnt is %d\n ", + pring->txq_cnt); + break; + } sglq = __lpfc_sli_get_sglq(phba, piocbq); if (!sglq) { __lpfc_sli_ringtx_put(phba, pring, piocbq); spin_unlock_irqrestore(&phba->hbalock, iflags); break; - } else { - if (!piocbq) { - /* The txq_cnt out of sync. This should - * never happen - */ - sglq = __lpfc_clear_active_sglq(phba, - sglq->sli4_lxritag); - spin_unlock_irqrestore(&phba->hbalock, iflags); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2823 txq empty and txq_cnt is %d\n ", - pring->txq_cnt); - break; - } } /* The xri and iocb resources secured, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index a4a77080091..ec756118c5c 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -598,6 +598,7 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli4_queue_free(struct lpfc_queue *); uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); +uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t, uint32_t); int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 59c57a40998..4704e5b5088 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.31" +#define LPFC_DRIVER_VERSION "8.3.32" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 4d39a9ffc08..97825f11695 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -524,7 +524,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy) mega_passthru *pthru; scb_t *scb; mbox_t *mbox; - long seg; + u32 seg; char islogical; int max_ldrv_num; int channel = 0; @@ -858,7 +858,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy) /* Calculate Scatter-Gather info */ mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, - (u32 *)&mbox->m_out.xferaddr, (u32 *)&seg); + (u32 *)&mbox->m_out.xferaddr, &seg); return scb; diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 35bd13879fe..54b1c5bb310 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -2731,7 +2731,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp) } out: - spin_unlock_irq(&adapter->lock); + spin_unlock(&adapter->lock); return rval; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index b6dd3a5de7f..b3a1a30055d 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -1158,6 +1158,7 @@ extern struct scsi_transport_template *mpt2sas_transport_template; extern int scsi_internal_device_block(struct scsi_device *sdev); extern u8 mpt2sas_stm_zero_smid_handler(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply); -extern int scsi_internal_device_unblock(struct scsi_device *sdev); +extern int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state); #endif /* MPT2SAS_BASE_H_INCLUDED */ diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 76973e8ca4b..b1ebd6f8dab 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2904,7 +2904,7 @@ _scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc) dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, " "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle)); - scsi_internal_device_unblock(sdev); + scsi_internal_device_unblock(sdev, SDEV_RUNNING); } } /** @@ -2933,7 +2933,7 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) "sas address(0x%016llx)\n", ioc->name, (unsigned long long)sas_address)); sas_device_priv_data->block = 0; - scsi_internal_device_unblock(sdev); + scsi_internal_device_unblock(sdev, SDEV_RUNNING); } } } diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index fd3b2839843..4539d59a085 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -885,7 +885,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, struct completion *completion, int is_tmf, struct mvs_tmf_task *tmf) { - struct domain_device *dev = task->dev; struct mvs_info *mvi = NULL; u32 rc = 0; u32 pass = 0; @@ -1365,9 +1364,9 @@ void mvs_dev_gone(struct domain_device *dev) static void mvs_task_done(struct sas_task *task) { - if (!del_timer(&task->timer)) + if (!del_timer(&task->slow_task->timer)) return; - complete(&task->completion); + complete(&task->slow_task->completion); } static void mvs_tmf_timedout(unsigned long data) @@ -1375,7 +1374,7 @@ static void mvs_tmf_timedout(unsigned long data) struct sas_task *task = (struct sas_task *)data; task->task_state_flags |= SAS_TASK_STATE_ABORTED; - complete(&task->completion); + complete(&task->slow_task->completion); } #define MVS_TASK_TIMEOUT 20 @@ -1386,7 +1385,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, struct sas_task *task = NULL; for (retry = 0; retry < 3; retry++) { - task = sas_alloc_task(GFP_KERNEL); + task = sas_alloc_slow_task(GFP_KERNEL); if (!task) return -ENOMEM; @@ -1396,20 +1395,20 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, memcpy(&task->ssp_task, parameter, para_len); task->task_done = mvs_task_done; - task->timer.data = (unsigned long) task; - task->timer.function = mvs_tmf_timedout; - task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; - add_timer(&task->timer); + task->slow_task->timer.data = (unsigned long) task; + task->slow_task->timer.function = mvs_tmf_timedout; + task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; + add_timer(&task->slow_task->timer); res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); if (res) { - del_timer(&task->timer); + del_timer(&task->slow_task->timer); mv_printk("executing internel task failed:%d\n", res); goto ex_err; } - wait_for_completion(&task->completion); + wait_for_completion(&task->slow_task->completion); res = TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 3b11edd4a50..b961112395d 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -650,9 +650,9 @@ int pm8001_dev_found(struct domain_device *dev) static void pm8001_task_done(struct sas_task *task) { - if (!del_timer(&task->timer)) + if (!del_timer(&task->slow_task->timer)) return; - complete(&task->completion); + complete(&task->slow_task->completion); } static void pm8001_tmf_timedout(unsigned long data) @@ -660,7 +660,7 @@ static void pm8001_tmf_timedout(unsigned long data) struct sas_task *task = (struct sas_task *)data; task->task_state_flags |= SAS_TASK_STATE_ABORTED; - complete(&task->completion); + complete(&task->slow_task->completion); } #define PM8001_TASK_TIMEOUT 20 @@ -683,7 +683,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); for (retry = 0; retry < 3; retry++) { - task = sas_alloc_task(GFP_KERNEL); + task = sas_alloc_slow_task(GFP_KERNEL); if (!task) return -ENOMEM; @@ -691,21 +691,21 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, task->task_proto = dev->tproto; memcpy(&task->ssp_task, parameter, para_len); task->task_done = pm8001_task_done; - task->timer.data = (unsigned long)task; - task->timer.function = pm8001_tmf_timedout; - task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; - add_timer(&task->timer); + task->slow_task->timer.data = (unsigned long)task; + task->slow_task->timer.function = pm8001_tmf_timedout; + task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; + add_timer(&task->slow_task->timer); res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); if (res) { - del_timer(&task->timer); + del_timer(&task->slow_task->timer); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing internal task " "failed\n")); goto ex_err; } - wait_for_completion(&task->completion); + wait_for_completion(&task->slow_task->completion); res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { @@ -765,17 +765,17 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, struct sas_task *task = NULL; for (retry = 0; retry < 3; retry++) { - task = sas_alloc_task(GFP_KERNEL); + task = sas_alloc_slow_task(GFP_KERNEL); if (!task) return -ENOMEM; task->dev = dev; task->task_proto = dev->tproto; task->task_done = pm8001_task_done; - task->timer.data = (unsigned long)task; - task->timer.function = pm8001_tmf_timedout; - task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; - add_timer(&task->timer); + task->slow_task->timer.data = (unsigned long)task; + task->slow_task->timer.function = pm8001_tmf_timedout; + task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; + add_timer(&task->slow_task->timer); res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); if (res) @@ -789,13 +789,13 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, pm8001_dev, flag, task_tag, ccb_tag); if (res) { - del_timer(&task->timer); + del_timer(&task->slow_task->timer); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing internal task " "failed\n")); goto ex_err; } - wait_for_completion(&task->completion); + wait_for_completion(&task->slow_task->completion); res = TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { @@ -962,8 +962,9 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev) struct pm8001_device *pm8001_dev; struct pm8001_hba_info *pm8001_ha; struct sas_phy *phy; + if (!dev || !dev->lldd_dev) - return -1; + return -ENODEV; pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index ca508474313..a44653b4216 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -685,7 +685,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha) pcix_set_mmrbc(ha->pdev, 2048); /* PCIe -- adjust Maximum Read Request Size (2048). */ - if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) + if (pci_is_pcie(ha->pdev)) pcie_set_readrq(ha->pdev, 2048); pci_disable_rom(ha->pdev); @@ -721,7 +721,7 @@ qla25xx_pci_config(scsi_qla_host_t *vha) pci_write_config_word(ha->pdev, PCI_COMMAND, w); /* PCIe -- adjust Maximum Read Request Size (2048). */ - if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) + if (pci_is_pcie(ha->pdev)) pcie_set_readrq(ha->pdev, 2048); pci_disable_rom(ha->pdev); diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index caf627ba7fa..9ce3a8f8754 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1620,7 +1620,7 @@ qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str) char lwstr[6]; uint16_t lnk; - pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); + pcie_reg = pci_pcie_cap(ha->pdev); pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; @@ -2528,7 +2528,7 @@ qla82xx_start_firmware(scsi_qla_host_t *vha) } /* Negotiated Link width */ - pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); + pcie_cap = pci_pcie_cap(ha->pdev); pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 6d1d873a20e..fb8cd3847d4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -482,12 +482,12 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) uint32_t pci_bus; int pcie_reg; - pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); + pcie_reg = pci_pcie_cap(ha->pdev); if (pcie_reg) { char lwstr[6]; uint16_t pcie_lstat, lspeed, lwidth; - pcie_reg += 0x12; + pcie_reg += PCI_EXP_LNKCAP; pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); lwidth = (pcie_lstat & diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 6986552b47e..5b30132960c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2643,19 +2643,9 @@ static void qlt_do_work(struct work_struct *work) spin_lock_irqsave(&ha->hardware_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); - if (sess) { - if (unlikely(sess->tearing_down)) { - sess = NULL; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - goto out_term; - } else { - /* - * Do the extra kref_get() before dropping - * qla_hw_data->hardware_lock. - */ - kref_get(&sess->se_sess->sess_kref); - } - } + /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ + if (sess) + kref_get(&sess->se_sess->sess_kref); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (unlikely(!sess)) { @@ -3960,7 +3950,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; - int reason_code; + int login_code; ql_dbg(ql_dbg_tgt, vha, 0xe039, "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", @@ -4003,9 +3993,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, "qla_target(%d): Async LOOP_UP occured " - "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, - le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), - le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); + "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); @@ -4020,23 +4010,24 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, case MBA_RSCN_UPDATE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, "qla_target(%d): Async event %#x occured " - "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code, - le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), - le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); + "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); break; case MBA_PORT_UPDATE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, "qla_target(%d): Port update async event %#x " - "occured: updating the ports database (m[1]=%x, m[2]=%x, " - "m[3]=%x, m[4]=%x)", vha->vp_idx, code, - le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), - le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); - reason_code = le16_to_cpu(mailbox[2]); - if (reason_code == 0x4) + "occured: updating the ports database (m[0]=%x, m[1]=%x, " + "m[2]=%x, m[3]=%x)", vha->vp_idx, code, + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); + + login_code = le16_to_cpu(mailbox[2]); + if (login_code == 0x4) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, "Async MB 2: Got PLOGI Complete\n"); - else if (reason_code == 0x7) + else if (login_code == 0x7) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, "Async MB 2: Port Logged Out\n"); break; @@ -4044,9 +4035,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, "qla_target(%d): Async event %#x occured: " - "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, - code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), - le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); + "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, + code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); break; } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 9f9ef1644fd..170af157121 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -639,7 +639,7 @@ struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, unsigned char *, uint32_t, int, int, int); - int (*handle_data)(struct qla_tgt_cmd *); + void (*handle_data)(struct qla_tgt_cmd *); int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); @@ -813,7 +813,6 @@ struct qla_tgt_sess { unsigned int conf_compl_supported:1; unsigned int deleted:1; unsigned int local:1; - unsigned int tearing_down:1; struct se_session *se_sess; struct scsi_qla_host *vha; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 6e64314dbbb..4752f65a927 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -38,8 +38,6 @@ #include <linux/string.h> #include <linux/configfs.h> #include <linux/ctype.h> -#include <linux/string.h> -#include <linux/ctype.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -466,8 +464,7 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) vha = sess->vha; spin_lock_irqsave(&vha->hw->hardware_lock, flags); - sess->tearing_down = 1; - target_splice_sess_cmd_list(se_sess); + target_sess_cmd_list_set_waiting(se_sess); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); return 1; @@ -600,28 +597,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, return -EINVAL; } - target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], + return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], cmd->unpacked_lun, data_length, fcp_task_attr, data_dir, flags); - return 0; } -static void tcm_qla2xxx_do_rsp(struct work_struct *work) +static void tcm_qla2xxx_handle_data_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); - /* - * Dispatch ->queue_status from workqueue process context - */ - transport_generic_request_failure(&cmd->se_cmd); -} -/* - * Called from qla_target.c:qlt_do_ctio_completion() - */ -static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) -{ - struct se_cmd *se_cmd = &cmd->se_cmd; - unsigned long flags; /* * Ensure that the complete FCP WRITE payload has been received. * Otherwise return an exception via CHECK_CONDITION status. @@ -631,24 +615,26 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) * Check if se_cmd has already been aborted via LUN_RESET, and * waiting upon completion in tcm_qla2xxx_write_pending_status() */ - spin_lock_irqsave(&se_cmd->t_state_lock, flags); - if (se_cmd->transport_state & CMD_T_ABORTED) { - spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); - complete(&se_cmd->t_transport_stop_comp); - return 0; + if (cmd->se_cmd.transport_state & CMD_T_ABORTED) { + complete(&cmd->se_cmd.t_transport_stop_comp); + return; } - spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); - se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; - INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp); - queue_work(tcm_qla2xxx_free_wq, &cmd->work); - return 0; + cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; + transport_generic_request_failure(&cmd->se_cmd); + return; } - /* - * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE - * status to the backstore processing thread. - */ - return transport_generic_handle_data(&cmd->se_cmd); + + return target_execute_cmd(&cmd->se_cmd); +} + +/* + * Called from qla_target.c:qlt_do_ctio_completion() + */ +static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) +{ + INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); } /* @@ -1690,7 +1676,6 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, - .new_cmd_map = NULL, .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, .put_session = tcm_qla2xxx_put_session, diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 96a5616a8fd..7fdba7f1ffb 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -279,6 +279,7 @@ struct qla_ddb_index { struct list_head list; uint16_t fw_ddb_idx; struct dev_db_entry fw_ddb; + uint8_t flash_isid[6]; }; #define DDB_IPADDR_LEN 64 diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 20b49d01904..5b2525c4139 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -183,7 +183,8 @@ int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, struct ddb_entry *ddb_entry, uint32_t state); void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset); -int qla4xxx_post_aen_work(struct scsi_qla_host *ha, uint32_t aen_code, +int qla4xxx_post_aen_work(struct scsi_qla_host *ha, + enum iscsi_host_event_code aen_code, uint32_t data_size, uint8_t *data); int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options, uint32_t payload_size, uint32_t pid, uint8_t *ipaddr); diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index bf36723b84e..ddd9472066c 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -126,7 +126,7 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha) qla4xxx_init_response_q_entries(ha); - /* Initialize mabilbox active array */ + /* Initialize mailbox active array */ for (i = 0; i < MAX_MRB; i++) ha->active_mrb_array[i] = NULL; diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 228b67020d2..939d7261c37 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -1590,7 +1590,7 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start) } /* Negotiated Link width */ - pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); + pcie_cap = pci_pcie_cap(ha->pdev); pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index cd15678f9ad..9da426628b9 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -4299,7 +4299,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, } static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, - struct ql4_tuple_ddb *tddb) + struct ql4_tuple_ddb *tddb, + uint8_t *flash_isid) { uint16_t options = 0; @@ -4314,7 +4315,12 @@ static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); tddb->port = le16_to_cpu(fw_ddb_entry->port); - memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid)); + + if (flash_isid == NULL) + memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], + sizeof(tddb->isid)); + else + memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); } static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, @@ -4385,7 +4391,7 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, goto exit_check; } - qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb); + qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); @@ -4407,6 +4413,102 @@ exit_check: return ret; } +/** + * qla4xxx_check_existing_isid - check if target with same isid exist + * in target list + * @list_nt: list of target + * @isid: isid to check + * + * This routine return QLA_SUCCESS if target with same isid exist + **/ +static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) +{ + struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; + struct dev_db_entry *fw_ddb_entry; + + list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { + fw_ddb_entry = &nt_ddb_idx->fw_ddb; + + if (memcmp(&fw_ddb_entry->isid[0], &isid[0], + sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { + return QLA_SUCCESS; + } + } + return QLA_ERROR; +} + +/** + * qla4xxx_update_isid - compare ddbs and updated isid + * @ha: Pointer to host adapter structure. + * @list_nt: list of nt target + * @fw_ddb_entry: firmware ddb entry + * + * This routine update isid if ddbs have same iqn, same isid and + * different IP addr. + * Return QLA_SUCCESS if isid is updated. + **/ +static int qla4xxx_update_isid(struct scsi_qla_host *ha, + struct list_head *list_nt, + struct dev_db_entry *fw_ddb_entry) +{ + uint8_t base_value, i; + + base_value = fw_ddb_entry->isid[1] & 0x1f; + for (i = 0; i < 8; i++) { + fw_ddb_entry->isid[1] = (base_value | (i << 5)); + if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) + break; + } + + if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) + return QLA_ERROR; + + return QLA_SUCCESS; +} + +/** + * qla4xxx_should_update_isid - check if isid need to update + * @ha: Pointer to host adapter structure. + * @old_tddb: ddb tuple + * @new_tddb: ddb tuple + * + * Return QLA_SUCCESS if different IP, different PORT, same iqn, + * same isid + **/ +static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, + struct ql4_tuple_ddb *old_tddb, + struct ql4_tuple_ddb *new_tddb) +{ + if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { + /* Same ip */ + if (old_tddb->port == new_tddb->port) + return QLA_ERROR; + } + + if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) + /* different iqn */ + return QLA_ERROR; + + if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], + sizeof(old_tddb->isid))) + /* different isid */ + return QLA_ERROR; + + return QLA_SUCCESS; +} + +/** + * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt + * @ha: Pointer to host adapter structure. + * @list_nt: list of nt target. + * @fw_ddb_entry: firmware ddb entry. + * + * This routine check if fw_ddb_entry already exists in list_nt to avoid + * duplicate ddb in list_nt. + * Return QLA_SUCCESS if duplicate ddb exit in list_nl. + * Note: This function also update isid of DDB if required. + **/ + static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, struct list_head *list_nt, struct dev_db_entry *fw_ddb_entry) @@ -4414,7 +4516,7 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; struct ql4_tuple_ddb *fw_tddb = NULL; struct ql4_tuple_ddb *tmp_tddb = NULL; - int ret = QLA_ERROR; + int rval, ret = QLA_ERROR; fw_tddb = vzalloc(sizeof(*fw_tddb)); if (!fw_tddb) { @@ -4432,12 +4534,28 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, goto exit_check; } - qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb); + qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { - qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb); - if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) { - ret = QLA_SUCCESS; /* found */ + qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, + nt_ddb_idx->flash_isid); + ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); + /* found duplicate ddb */ + if (ret == QLA_SUCCESS) + goto exit_check; + } + + list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { + qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); + + ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); + if (ret == QLA_SUCCESS) { + rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); + if (rval == QLA_SUCCESS) + ret = QLA_ERROR; + else + ret = QLA_SUCCESS; + goto exit_check; } } @@ -4788,14 +4906,26 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, nt_ddb_idx->fw_ddb_idx = idx; - memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, - sizeof(struct dev_db_entry)); - - if (qla4xxx_is_flash_ddb_exists(ha, list_nt, - fw_ddb_entry) == QLA_SUCCESS) { + /* Copy original isid as it may get updated in function + * qla4xxx_update_isid(). We need original isid in + * function qla4xxx_compare_tuple_ddb to find duplicate + * target */ + memcpy(&nt_ddb_idx->flash_isid[0], + &fw_ddb_entry->isid[0], + sizeof(nt_ddb_idx->flash_isid)); + + ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, + fw_ddb_entry); + if (ret == QLA_SUCCESS) { + /* free nt_ddb_idx and do not add to list_nt */ vfree(nt_ddb_idx); goto continue_next_nt; } + + /* Copy updated isid */ + memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, + sizeof(struct dev_db_entry)); + list_add_tail(&nt_ddb_idx->list, list_nt); } else if (is_reset == RESET_ADAPTER) { if (qla4xxx_is_session_exists(ha, fw_ddb_entry) == diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index cc1cc3518b8..725034f4252 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.02.00-k17" +#define QLA4XXX_DRIVER_VERSION "5.02.00-k18" diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index bbbc9c918d4..2936b447cae 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -54,6 +54,7 @@ #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> +#include <linux/async.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -91,7 +92,7 @@ EXPORT_SYMBOL(scsi_logging_level); #endif /* sd, scsi core and power management need to coordinate flushing async actions */ -LIST_HEAD(scsi_sd_probe_domain); +ASYNC_DOMAIN(scsi_sd_probe_domain); EXPORT_SYMBOL(scsi_sd_probe_domain); /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. @@ -1354,6 +1355,7 @@ static void __exit exit_scsi(void) scsi_exit_devinfo(); scsi_exit_procfs(); scsi_exit_queue(); + async_unregister_domain(&scsi_sd_probe_domain); } subsys_initcall(init_scsi); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index d0f71e5d065..4a6381c8725 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1687,6 +1687,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost) * requests are started. */ scsi_run_host_queues(shost); + + /* + * if eh is active and host_eh_scheduled is pending we need to re-run + * recovery. we do this check after scsi_run_host_queues() to allow + * everything pent up since the last eh run a chance to make forward + * progress before we sync again. Either we'll immediately re-run + * recovery or scsi_device_unbusy() will wake us again when these + * pending commands complete. + */ + spin_lock_irqsave(shost->host_lock, flags); + if (shost->host_eh_scheduled) + if (scsi_host_set_state(shost, SHOST_RECOVERY)) + WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)); + spin_unlock_irqrestore(shost->host_lock, flags); } /** @@ -1804,15 +1818,14 @@ int scsi_error_handler(void *data) * We never actually get interrupted because kthread_run * disables signal delivery for the created thread. */ - set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || shost->host_failed != shost->host_busy) { SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d sleeping\n", shost->host_no)); schedule(); - set_current_state(TASK_INTERRUPTIBLE); continue; } @@ -1849,7 +1862,6 @@ int scsi_error_handler(void *data) scsi_restart_operations(shost); if (!shost->eh_noresume) scsi_autopm_put_host(shost); - set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6dfb9785d34..ffd77739ae3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -68,6 +68,23 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = { struct kmem_cache *scsi_sdb_cache; +#ifdef CONFIG_ACPI +#include <acpi/acpi_bus.h> + +int scsi_register_acpi_bus_type(struct acpi_bus_type *bus) +{ + bus->bus = &scsi_bus_type; + return register_acpi_bus_type(bus); +} +EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type); + +void scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus) +{ + unregister_acpi_bus_type(bus); +} +EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type); +#endif + /* * When to reinvoke queueing after a resource shortage. It's 3 msecs to * not change behaviour from the previous unplug mechanism, experimentation @@ -109,7 +126,7 @@ static void scsi_unprep_request(struct request *req) * for a requeue after completion, which should only occur in this * file. */ -static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) +static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) { struct Scsi_Host *host = cmd->device->host; struct scsi_device *device = cmd->device; @@ -155,15 +172,14 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) /* * Requeue this command. It will go before all other commands - * that are already in the queue. + * that are already in the queue. Schedule requeue work under + * lock such that the kblockd_schedule_work() call happens + * before blk_cleanup_queue() finishes. */ spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); - spin_unlock_irqrestore(q->queue_lock, flags); - kblockd_schedule_work(q, &device->requeue_work); - - return 0; + spin_unlock_irqrestore(q->queue_lock, flags); } /* @@ -185,9 +201,9 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) * Notes: This could be called either from an interrupt context or a * normal process context. */ -int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) +void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) { - return __scsi_queue_insert(cmd, reason, 1); + __scsi_queue_insert(cmd, reason, 1); } /** * scsi_execute - insert request and wait for the result @@ -406,10 +422,6 @@ static void scsi_run_queue(struct request_queue *q) LIST_HEAD(starved_list); unsigned long flags; - /* if the device is dead, sdev will be NULL, so no queue to run */ - if (!sdev) - return; - shost = sdev->host; if (scsi_target(sdev)->single_lun) scsi_single_lun_run(sdev); @@ -483,15 +495,26 @@ void scsi_requeue_run_queue(struct work_struct *work) */ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) { + struct scsi_device *sdev = cmd->device; struct request *req = cmd->request; unsigned long flags; + /* + * We need to hold a reference on the device to avoid the queue being + * killed after the unlock and before scsi_run_queue is invoked which + * may happen because scsi_unprep_request() puts the command which + * releases its reference on the device. + */ + get_device(&sdev->sdev_gendev); + spin_lock_irqsave(q->queue_lock, flags); scsi_unprep_request(req); blk_requeue_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); scsi_run_queue(q); + + put_device(&sdev->sdev_gendev); } void scsi_next_command(struct scsi_cmnd *cmd) @@ -1173,6 +1196,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { switch (sdev->sdev_state) { case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: /* * If the device is offline we refuse to process any * commands. The device must be brought online @@ -1370,16 +1394,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q, * may be changed after request stacking drivers call the function, * regardless of taking lock or not. * - * When scsi can't dispatch I/Os anymore and needs to kill I/Os - * (e.g. !sdev), scsi needs to return 'not busy'. - * Otherwise, request stacking drivers may hold requests forever. + * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi + * needs to return 'not busy'. Otherwise, request stacking drivers + * may hold requests forever. */ static int scsi_lld_busy(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost; - if (!sdev) + if (blk_queue_dead(q)) return 0; shost = sdev->host; @@ -1490,12 +1514,6 @@ static void scsi_request_fn(struct request_queue *q) struct scsi_cmnd *cmd; struct request *req; - if (!sdev) { - while ((req = blk_peek_request(q)) != NULL) - scsi_kill_request(req, q); - return; - } - if(!get_device(&sdev->sdev_gendev)) /* We must be tearing the block queue down already */ return; @@ -1697,20 +1715,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) return q; } -void scsi_free_queue(struct request_queue *q) -{ - unsigned long flags; - - WARN_ON(q->queuedata); - - /* cause scsi_request_fn() to kill all non-finished requests */ - spin_lock_irqsave(q->queue_lock, flags); - q->request_fn(q); - spin_unlock_irqrestore(q->queue_lock, flags); - - blk_cleanup_queue(q); -} - /* * Function: scsi_block_requests() * @@ -2081,6 +2085,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) switch (oldstate) { case SDEV_CREATED: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: case SDEV_QUIESCE: case SDEV_BLOCK: break; @@ -2093,6 +2098,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) switch (oldstate) { case SDEV_RUNNING: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: break; default: goto illegal; @@ -2100,6 +2106,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) break; case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: switch (oldstate) { case SDEV_CREATED: case SDEV_RUNNING: @@ -2136,6 +2143,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_RUNNING: case SDEV_QUIESCE: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: case SDEV_BLOCK: break; default: @@ -2148,6 +2156,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_CREATED: case SDEV_RUNNING: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: case SDEV_CANCEL: break; default: @@ -2405,7 +2414,6 @@ EXPORT_SYMBOL(scsi_target_resume); * (which must be a legal transition). When the device is in this * state, all commands are deferred until the scsi lld reenables * the device with scsi_device_unblock or device_block_tmo fires. - * This routine assumes the host_lock is held on entry. */ int scsi_internal_device_block(struct scsi_device *sdev) @@ -2438,6 +2446,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); /** * scsi_internal_device_unblock - resume a device after a block request * @sdev: device to resume + * @new_state: state to set devices to after unblocking * * Called by scsi lld's or the midlayer to restart the device queue * for the previously suspended scsi device. Called from interrupt or @@ -2447,25 +2456,29 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); * * Notes: * This routine transitions the device to the SDEV_RUNNING state - * (which must be a legal transition) allowing the midlayer to - * goose the queue for this device. This routine assumes the - * host_lock is held upon entry. + * or to one of the offline states (which must be a legal transition) + * allowing the midlayer to goose the queue for this device. */ int -scsi_internal_device_unblock(struct scsi_device *sdev) +scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state) { struct request_queue *q = sdev->request_queue; unsigned long flags; - - /* - * Try to transition the scsi device to SDEV_RUNNING - * and goose the device queue if successful. + + /* + * Try to transition the scsi device to SDEV_RUNNING or one of the + * offlined states and goose the device queue if successful. */ if (sdev->sdev_state == SDEV_BLOCK) - sdev->sdev_state = SDEV_RUNNING; - else if (sdev->sdev_state == SDEV_CREATED_BLOCK) - sdev->sdev_state = SDEV_CREATED; - else if (sdev->sdev_state != SDEV_CANCEL && + sdev->sdev_state = new_state; + else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { + if (new_state == SDEV_TRANSPORT_OFFLINE || + new_state == SDEV_OFFLINE) + sdev->sdev_state = new_state; + else + sdev->sdev_state = SDEV_CREATED; + } else if (sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_OFFLINE) return -EINVAL; @@ -2506,26 +2519,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block); static void device_unblock(struct scsi_device *sdev, void *data) { - scsi_internal_device_unblock(sdev); + scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); } static int target_unblock(struct device *dev, void *data) { if (scsi_is_target_device(dev)) - starget_for_each_device(to_scsi_target(dev), NULL, + starget_for_each_device(to_scsi_target(dev), data, device_unblock); return 0; } void -scsi_target_unblock(struct device *dev) +scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) { if (scsi_is_target_device(dev)) - starget_for_each_device(to_scsi_target(dev), NULL, + starget_for_each_device(to_scsi_target(dev), &new_state, device_unblock); else - device_for_each_child(dev, NULL, target_unblock); + device_for_each_child(dev, &new_state, target_unblock); } EXPORT_SYMBOL_GPL(scsi_target_unblock); diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index c77628afbf9..8818dd681c1 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -486,6 +486,10 @@ void scsi_netlink_init(void) { int error; + struct netlink_kernel_cfg cfg = { + .input = scsi_nl_rcv_msg, + .groups = SCSI_NL_GRP_CNT, + }; INIT_LIST_HEAD(&scsi_nl_drivers); @@ -497,8 +501,7 @@ scsi_netlink_init(void) } scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, - SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL, - THIS_MODULE); + THIS_MODULE, &cfg); if (!scsi_nl_sock) { printk(KERN_ERR "%s: register of receive handler failed\n", __func__); diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index d4201ded3b2..dc0ad85853e 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -76,23 +76,24 @@ static int scsi_bus_resume_common(struct device *dev) { int err = 0; - if (scsi_is_sdev_device(dev)) { - /* - * Parent device may have runtime suspended as soon as - * it is woken up during the system resume. - * - * Resume it on behalf of child. - */ - pm_runtime_get_sync(dev->parent); - err = scsi_dev_type_resume(dev); - pm_runtime_put_sync(dev->parent); - } + /* + * Parent device may have runtime suspended as soon as + * it is woken up during the system resume. + * + * Resume it on behalf of child. + */ + pm_runtime_get_sync(dev->parent); + if (scsi_is_sdev_device(dev)) + err = scsi_dev_type_resume(dev); if (err == 0) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } + + pm_runtime_put_sync(dev->parent); + return err; } diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 07ce3f51701..8f9a0cadc29 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -2,6 +2,8 @@ #define _SCSI_PRIV_H #include <linux/device.h> +#include <linux/async.h> +#include <scsi/scsi_device.h> struct request_queue; struct request; @@ -79,12 +81,11 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd); /* scsi_lib.c */ extern int scsi_maybe_unblock_host(struct scsi_device *sdev); extern void scsi_device_unbusy(struct scsi_device *sdev); -extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); +extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason); extern void scsi_next_command(struct scsi_cmnd *cmd); extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); extern void scsi_run_host_queues(struct Scsi_Host *shost); extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); -extern void scsi_free_queue(struct request_queue *q); extern int scsi_init_queue(void); extern void scsi_exit_queue(void); struct request_queue; @@ -163,7 +164,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; } static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} #endif /* CONFIG_PM_RUNTIME */ -extern struct list_head scsi_sd_probe_domain; +extern struct async_domain scsi_sd_probe_domain; /* * internal scsi timeout functions: for use by mid-layer and transport @@ -172,6 +173,7 @@ extern struct list_head scsi_sd_probe_domain; #define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */ extern int scsi_internal_device_block(struct scsi_device *sdev); -extern int scsi_internal_device_unblock(struct scsi_device *sdev); +extern int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state); #endif /* _SCSI_PRIV_H */ diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 2e5fe584aad..56a93794c47 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -147,7 +147,7 @@ int scsi_complete_async_scans(void) do { if (list_empty(&scanning_hosts)) - goto out; + return 0; /* If we can't get memory immediately, that's OK. Just * sleep a little. Even if we never get memory, the async * scans will finish eventually. @@ -179,26 +179,11 @@ int scsi_complete_async_scans(void) } done: spin_unlock(&async_scan_lock); - kfree(data); - - out: - async_synchronize_full_domain(&scsi_sd_probe_domain); + kfree(data); return 0; } -/* Only exported for the benefit of scsi_wait_scan */ -EXPORT_SYMBOL_GPL(scsi_complete_async_scans); - -#ifndef MODULE -/* - * For async scanning we need to wait for all the scans to complete before - * trying to mount the root fs. Otherwise non-modular drivers may not be ready - * yet. - */ -late_initcall(scsi_complete_async_scans); -#endif - /** * scsi_unlock_floptical - unlock device via a special MODE SENSE command * @sdev: scsi device to send command to @@ -1717,6 +1702,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost) { struct scsi_device *sdev; shost_for_each_device(sdev, shost) { + /* target removed before the device could be added */ + if (sdev->sdev_state == SDEV_DEL) + continue; if (!scsi_host_scan_allowed(shost) || scsi_sysfs_add_sdev(sdev) != 0) __scsi_remove_device(sdev); @@ -1842,14 +1830,13 @@ static void do_scsi_scan_host(struct Scsi_Host *shost) } } -static int do_scan_async(void *_data) +static void do_scan_async(void *_data, async_cookie_t c) { struct async_scan_data *data = _data; struct Scsi_Host *shost = data->shost; do_scsi_scan_host(shost); scsi_finish_async_scan(data); - return 0; } /** @@ -1858,7 +1845,6 @@ static int do_scan_async(void *_data) **/ void scsi_scan_host(struct Scsi_Host *shost) { - struct task_struct *p; struct async_scan_data *data; if (strncmp(scsi_scan_type, "none", 4) == 0) @@ -1873,9 +1859,11 @@ void scsi_scan_host(struct Scsi_Host *shost) return; } - p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); - if (IS_ERR(p)) - do_scan_async(data); + /* register with the async subsystem so wait_for_device_probe() + * will flush this work + */ + async_schedule(do_scan_async, data); + /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ } EXPORT_SYMBOL(scsi_scan_host); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 04c2a278076..093d4f6a54d 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -35,6 +35,7 @@ static const struct { { SDEV_DEL, "deleted" }, { SDEV_QUIESCE, "quiesce" }, { SDEV_OFFLINE, "offline" }, + { SDEV_TRANSPORT_OFFLINE, "transport-offline" }, { SDEV_BLOCK, "blocked" }, { SDEV_CREATED_BLOCK, "created-blocked" }, }; @@ -966,16 +967,20 @@ void __scsi_remove_device(struct scsi_device *sdev) device_del(dev); } else put_device(&sdev->sdev_dev); + + /* + * Stop accepting new requests and wait until all queuecommand() and + * scsi_run_queue() invocations have finished before tearing down the + * device. + */ scsi_device_set_state(sdev, SDEV_DEL); + blk_cleanup_queue(sdev->request_queue); + cancel_work_sync(&sdev->requeue_work); + if (sdev->host->hostt->slave_destroy) sdev->host->hostt->slave_destroy(sdev); transport_destroy_device(dev); - /* cause the request function to reject all I/O requests */ - sdev->request_queue->queuedata = NULL; - - /* Freeing the queue signals to block that we're done */ - scsi_free_queue(sdev->request_queue); put_device(dev); } @@ -1000,7 +1005,6 @@ static void __scsi_remove_target(struct scsi_target *starget) struct scsi_device *sdev; spin_lock_irqsave(shost->host_lock, flags); - starget->reap_ref++; restart: list_for_each_entry(sdev, &shost->__devices, siblings) { if (sdev->channel != starget->channel || @@ -1014,14 +1018,6 @@ static void __scsi_remove_target(struct scsi_target *starget) goto restart; } spin_unlock_irqrestore(shost->host_lock, flags); - scsi_target_reap(starget); -} - -static int __remove_child (struct device * dev, void * data) -{ - if (scsi_is_target_device(dev)) - __scsi_remove_target(to_scsi_target(dev)); - return 0; } /** @@ -1034,14 +1030,34 @@ static int __remove_child (struct device * dev, void * data) */ void scsi_remove_target(struct device *dev) { - if (scsi_is_target_device(dev)) { - __scsi_remove_target(to_scsi_target(dev)); - return; + struct Scsi_Host *shost = dev_to_shost(dev->parent); + struct scsi_target *starget, *found; + unsigned long flags; + + restart: + found = NULL; + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry(starget, &shost->__targets, siblings) { + if (starget->state == STARGET_DEL) + continue; + if (starget->dev.parent == dev || &starget->dev == dev) { + found = starget; + found->reap_ref++; + break; + } } + spin_unlock_irqrestore(shost->host_lock, flags); - get_device(dev); - device_for_each_child(dev, NULL, __remove_child); - put_device(dev); + if (found) { + __scsi_remove_target(found); + scsi_target_reap(found); + /* in the case where @dev has multiple starget children, + * continue removing. + * + * FIXME: does such a case exist? + */ + goto restart; + } } EXPORT_SYMBOL(scsi_remove_target); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 579760420d5..e894ca7b54c 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -1744,6 +1744,15 @@ fc_host_statistic(fcp_output_requests); fc_host_statistic(fcp_control_requests); fc_host_statistic(fcp_input_megabytes); fc_host_statistic(fcp_output_megabytes); +fc_host_statistic(fcp_packet_alloc_failures); +fc_host_statistic(fcp_packet_aborts); +fc_host_statistic(fcp_frame_alloc_failures); +fc_host_statistic(fc_no_free_exch); +fc_host_statistic(fc_no_free_exch_xid); +fc_host_statistic(fc_xid_not_found); +fc_host_statistic(fc_xid_busy); +fc_host_statistic(fc_seq_not_found); +fc_host_statistic(fc_non_bls_resp); static ssize_t fc_reset_statistics(struct device *dev, struct device_attribute *attr, @@ -1784,6 +1793,15 @@ static struct attribute *fc_statistics_attrs[] = { &device_attr_host_fcp_control_requests.attr, &device_attr_host_fcp_input_megabytes.attr, &device_attr_host_fcp_output_megabytes.attr, + &device_attr_host_fcp_packet_alloc_failures.attr, + &device_attr_host_fcp_packet_aborts.attr, + &device_attr_host_fcp_frame_alloc_failures.attr, + &device_attr_host_fc_no_free_exch.attr, + &device_attr_host_fc_no_free_exch_xid.attr, + &device_attr_host_fc_xid_not_found.attr, + &device_attr_host_fc_xid_busy.attr, + &device_attr_host_fc_seq_not_found.attr, + &device_attr_host_fc_non_bls_resp.attr, &device_attr_host_reset_statistics.attr, NULL }; @@ -2477,11 +2495,9 @@ static void fc_terminate_rport_io(struct fc_rport *rport) i->f->terminate_rport_io(rport); /* - * must unblock to flush queued IO. The caller will have set - * the port_state or flags, so that fc_remote_port_chkready will - * fail IO. + * Must unblock to flush queued IO. scsi-ml will fail incoming reqs. */ - scsi_target_unblock(&rport->dev); + scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE); } /** @@ -2812,8 +2828,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, /* if target, initiate a scan */ if (rport->scsi_target_id != -1) { - scsi_target_unblock(&rport->dev); - + scsi_target_unblock(&rport->dev, + SDEV_RUNNING); spin_lock_irqsave(shost->host_lock, flags); rport->flags |= FC_RPORT_SCAN_PENDING; @@ -2882,7 +2898,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, spin_unlock_irqrestore(shost->host_lock, flags); if (ids->roles & FC_PORT_ROLE_FCP_TARGET) { - scsi_target_unblock(&rport->dev); + scsi_target_unblock(&rport->dev, SDEV_RUNNING); /* initiate a scan of the target */ spin_lock_irqsave(shost->host_lock, flags); @@ -3087,7 +3103,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) /* ensure any stgt delete functions are done */ fc_flush_work(shost); - scsi_target_unblock(&rport->dev); + scsi_target_unblock(&rport->dev, SDEV_RUNNING); /* initiate a scan of the target */ spin_lock_irqsave(shost->host_lock, flags); rport->flags |= FC_RPORT_SCAN_PENDING; @@ -3131,7 +3147,7 @@ fc_timeout_deleted_rport(struct work_struct *work) "blocked FC remote port time out: no longer" " a FCP target, removing starget\n"); spin_unlock_irqrestore(shost->host_lock, flags); - scsi_target_unblock(&rport->dev); + scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE); fc_queue_work(shost, &rport->stgt_delete_work); return; } @@ -4130,45 +4146,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) static void fc_bsg_remove(struct request_queue *q) { - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - if (q) { - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); blk_cleanup_queue(q); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 1cf640e575d..fa1dfaa83e3 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -575,7 +575,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct iscsi_cls_host *ihost = shost->shost_data; if (ihost->bsg_q) { - bsg_remove_queue(ihost->bsg_q); + bsg_unregister_queue(ihost->bsg_q); blk_cleanup_queue(ihost->bsg_q); } return 0; @@ -907,7 +907,7 @@ static void session_recovery_timedout(struct work_struct *work) session->transport->session_recovery_timedout(session); ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); - scsi_target_unblock(&session->dev); + scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); } @@ -930,7 +930,7 @@ static void __iscsi_unblock_session(struct work_struct *work) session->state = ISCSI_SESSION_LOGGED_IN; spin_unlock_irqrestore(&session->lock, flags); /* start IO */ - scsi_target_unblock(&session->dev); + scsi_target_unblock(&session->dev, SDEV_RUNNING); /* * Only do kernel scanning if the driver is properly hooked into * the async scanning code (drivers like iscsi_tcp do login and @@ -1180,7 +1180,7 @@ void iscsi_remove_session(struct iscsi_cls_session *session) session->state = ISCSI_SESSION_FREE; spin_unlock_irqrestore(&session->lock, flags); - scsi_target_unblock(&session->dev); + scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); /* flush running scans then delete devices */ scsi_flush_work(shost); __iscsi_unbind_session(&session->unbind_work); @@ -2936,7 +2936,10 @@ EXPORT_SYMBOL_GPL(iscsi_unregister_transport); static __init int iscsi_transport_init(void) { int err; - + struct netlink_kernel_cfg cfg = { + .groups = 1, + .input = iscsi_if_rx, + }; printk(KERN_INFO "Loading iSCSI transport class v%s.\n", ISCSI_TRANSPORT_VERSION); @@ -2966,8 +2969,8 @@ static __init int iscsi_transport_init(void) if (err) goto unregister_conn_class; - nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, - NULL, THIS_MODULE); + nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, + THIS_MODULE, &cfg); if (!nls) { err = -ENOBUFS; goto unregister_session_class; diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c deleted file mode 100644 index ae781487461..00000000000 --- a/drivers/scsi/scsi_wait_scan.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * scsi_wait_scan.c - * - * Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com> - * - * This is a simple module to wait until all the async scans are - * complete. The idea is to use it in initrd/initramfs scripts. You - * modprobe it after all the modprobes of the root SCSI drivers and it - * will wait until they have all finished scanning their busses before - * allowing the boot to proceed - */ - -#include <linux/module.h> -#include <linux/device.h> -#include "scsi_priv.h" - -static int __init wait_scan_init(void) -{ - /* - * First we need to wait for device probing to finish; - * the drivers we just loaded might just still be probing - * and might not yet have reached the scsi async scanning - */ - wait_for_device_probe(); - /* - * and then we wait for the actual asynchronous scsi scan - * to finish. - */ - scsi_complete_async_scans(); - return 0; -} - -static void __exit wait_scan_exit(void) -{ -} - -MODULE_DESCRIPTION("SCSI wait for scans"); -MODULE_AUTHOR("James Bottomley"); -MODULE_LICENSE("GPL"); - -late_initcall(wait_scan_init); -module_exit(wait_scan_exit); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6f72b80121a..4df73e52a4f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2261,8 +2261,13 @@ bad_sense: sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n"); defaults: - sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n"); - sdkp->WCE = 0; + if (sdp->wce_default_on) { + sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n"); + sdkp->WCE = 1; + } else { + sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n"); + sdkp->WCE = 0; + } sdkp->RCD = 0; sdkp->DPOFUA = 0; } @@ -2704,6 +2709,7 @@ static int sd_probe(struct device *dev) sdkp->disk = gd; sdkp->index = index; atomic_set(&sdkp->openers, 0); + atomic_set(&sdkp->device->ioerr_cnt, 0); if (!sdp->request_queue->rq_timeout) { if (sdp->type != TYPE_MOD) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 6a4fd00117c..58f4ba6fe41 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -232,11 +232,11 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) * the host controller * @reg_hcs - host controller status register value * - * Returns 0 if device present, non-zero if no device detected + * Returns 1 if device present, 0 if no device detected */ static inline int ufshcd_is_device_present(u32 reg_hcs) { - return (DEVICE_PRESENT & reg_hcs) ? 0 : -1; + return (DEVICE_PRESENT & reg_hcs) ? 1 : 0; } /** @@ -911,7 +911,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) /* check if device present */ reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS)); - if (ufshcd_is_device_present(reg)) { + if (!ufshcd_is_device_present(reg)) { dev_err(&hba->pdev->dev, "cc: Device not present\n"); err = -ENXIO; goto out; @@ -1163,6 +1163,8 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) task_result = FAILED; + else + task_result = SUCCESS; } else { task_result = FAILED; dev_err(&hba->pdev->dev, @@ -1556,7 +1558,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba, goto out; } clear_bit(free_slot, &hba->tm_condition); - return ufshcd_task_req_compl(hba, free_slot); + err = ufshcd_task_req_compl(hba, free_slot); out: return err; } @@ -1580,7 +1582,7 @@ static int ufshcd_device_reset(struct scsi_cmnd *cmd) tag = cmd->request->tag; err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); - if (err) + if (err == FAILED) goto out; for (pos = 0; pos < hba->nutrs; pos++) { @@ -1620,7 +1622,7 @@ static int ufshcd_host_reset(struct scsi_cmnd *cmd) if (hba->ufshcd_state == UFSHCD_STATE_RESET) return SUCCESS; - return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED; + return ufshcd_do_reset(hba); } /** @@ -1652,7 +1654,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) spin_unlock_irqrestore(host->host_lock, flags); err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); - if (err) + if (err == FAILED) goto out; scsi_dma_unmap(cmd); @@ -1953,24 +1955,7 @@ static struct pci_driver ufshcd_pci_driver = { #endif }; -/** - * ufshcd_init - Driver registration routine - */ -static int __init ufshcd_init(void) -{ - return pci_register_driver(&ufshcd_pci_driver); -} -module_init(ufshcd_init); - -/** - * ufshcd_exit - Driver exit clean-up routine - */ -static void __exit ufshcd_exit(void) -{ - pci_unregister_driver(&ufshcd_pci_driver); -} -module_exit(ufshcd_exit); - +module_pci_driver(ufshcd_pci_driver); MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, " "Vinayak Holikatti <h.vinayak@samsung.com>"); diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 1b384311726..c7030fbee79 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -25,6 +25,7 @@ #include <scsi/scsi_cmnd.h> #define VIRTIO_SCSI_MEMPOOL_SZ 64 +#define VIRTIO_SCSI_EVENT_LEN 8 /* Command queue element */ struct virtio_scsi_cmd { @@ -43,20 +44,42 @@ struct virtio_scsi_cmd { } resp; } ____cacheline_aligned_in_smp; -/* Driver instance state */ -struct virtio_scsi { - /* Protects ctrl_vq, req_vq and sg[] */ +struct virtio_scsi_event_node { + struct virtio_scsi *vscsi; + struct virtio_scsi_event event; + struct work_struct work; +}; + +struct virtio_scsi_vq { + /* Protects vq */ spinlock_t vq_lock; - struct virtio_device *vdev; - struct virtqueue *ctrl_vq; - struct virtqueue *event_vq; - struct virtqueue *req_vq; + struct virtqueue *vq; +}; + +/* Per-target queue state */ +struct virtio_scsi_target_state { + /* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */ + spinlock_t tgt_lock; /* For sglist construction when adding commands to the virtqueue. */ struct scatterlist sg[]; }; +/* Driver instance state */ +struct virtio_scsi { + struct virtio_device *vdev; + + struct virtio_scsi_vq ctrl_vq; + struct virtio_scsi_vq event_vq; + struct virtio_scsi_vq req_vq; + + /* Get some buffers ready for event vq */ + struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; + + struct virtio_scsi_target_state *tgt[]; +}; + static struct kmem_cache *virtscsi_cmd_cache; static mempool_t *virtscsi_cmd_pool; @@ -147,26 +170,25 @@ static void virtscsi_complete_cmd(void *buf) static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf)) { - struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); - struct virtio_scsi *vscsi = shost_priv(sh); void *buf; - unsigned long flags; unsigned int len; - spin_lock_irqsave(&vscsi->vq_lock, flags); - do { virtqueue_disable_cb(vq); while ((buf = virtqueue_get_buf(vq, &len)) != NULL) fn(buf); } while (!virtqueue_enable_cb(vq)); - - spin_unlock_irqrestore(&vscsi->vq_lock, flags); } static void virtscsi_req_done(struct virtqueue *vq) { + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + unsigned long flags; + + spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags); virtscsi_vq_done(vq, virtscsi_complete_cmd); + spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags); }; static void virtscsi_complete_free(void *buf) @@ -181,12 +203,123 @@ static void virtscsi_complete_free(void *buf) static void virtscsi_ctrl_done(struct virtqueue *vq) { + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + unsigned long flags; + + spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags); virtscsi_vq_done(vq, virtscsi_complete_free); + spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags); }; +static int virtscsi_kick_event(struct virtio_scsi *vscsi, + struct virtio_scsi_event_node *event_node) +{ + int ret; + struct scatterlist sg; + unsigned long flags; + + sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); + + spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); + + ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC); + if (ret >= 0) + virtqueue_kick(vscsi->event_vq.vq); + + spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); + + return ret; +} + +static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) +{ + int i; + + for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { + vscsi->event_list[i].vscsi = vscsi; + virtscsi_kick_event(vscsi, &vscsi->event_list[i]); + } + + return 0; +} + +static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) +{ + int i; + + for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) + cancel_work_sync(&vscsi->event_list[i].work); +} + +static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, + struct virtio_scsi_event *event) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + unsigned int target = event->lun[1]; + unsigned int lun = (event->lun[2] << 8) | event->lun[3]; + + switch (event->reason) { + case VIRTIO_SCSI_EVT_RESET_RESCAN: + scsi_add_device(shost, 0, target, lun); + break; + case VIRTIO_SCSI_EVT_RESET_REMOVED: + sdev = scsi_device_lookup(shost, 0, target, lun); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + pr_err("SCSI device %d 0 %d %d not found\n", + shost->host_no, target, lun); + } + break; + default: + pr_info("Unsupport virtio scsi event reason %x\n", event->reason); + } +} + +static void virtscsi_handle_event(struct work_struct *work) +{ + struct virtio_scsi_event_node *event_node = + container_of(work, struct virtio_scsi_event_node, work); + struct virtio_scsi *vscsi = event_node->vscsi; + struct virtio_scsi_event *event = &event_node->event; + + if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) { + event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; + scsi_scan_host(virtio_scsi_host(vscsi->vdev)); + } + + switch (event->event) { + case VIRTIO_SCSI_T_NO_EVENT: + break; + case VIRTIO_SCSI_T_TRANSPORT_RESET: + virtscsi_handle_transport_reset(vscsi, event); + break; + default: + pr_err("Unsupport virtio scsi event %x\n", event->event); + } + virtscsi_kick_event(vscsi, event_node); +} + +static void virtscsi_complete_event(void *buf) +{ + struct virtio_scsi_event_node *event_node = buf; + + INIT_WORK(&event_node->work, virtscsi_handle_event); + schedule_work(&event_node->work); +} + static void virtscsi_event_done(struct virtqueue *vq) { - virtscsi_vq_done(vq, virtscsi_complete_free); + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + unsigned long flags; + + spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); + virtscsi_vq_done(vq, virtscsi_complete_event); + spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); }; static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx, @@ -212,25 +345,17 @@ static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx, * @req_size : size of the request buffer * @resp_size : size of the response buffer * - * Called with vq_lock held. + * Called with tgt_lock held. */ -static void virtscsi_map_cmd(struct virtio_scsi *vscsi, +static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt, struct virtio_scsi_cmd *cmd, unsigned *out_num, unsigned *in_num, size_t req_size, size_t resp_size) { struct scsi_cmnd *sc = cmd->sc; - struct scatterlist *sg = vscsi->sg; + struct scatterlist *sg = tgt->sg; unsigned int idx = 0; - if (sc) { - struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); - BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); - - /* TODO: check feature bit and fail if unsupported? */ - BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); - } - /* Request header. */ sg_set_buf(&sg[idx++], &cmd->req, req_size); @@ -250,7 +375,8 @@ static void virtscsi_map_cmd(struct virtio_scsi *vscsi, *in_num = idx - *out_num; } -static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq, +static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt, + struct virtio_scsi_vq *vq, struct virtio_scsi_cmd *cmd, size_t req_size, size_t resp_size, gfp_t gfp) { @@ -258,24 +384,35 @@ static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq, unsigned long flags; int ret; - spin_lock_irqsave(&vscsi->vq_lock, flags); - - virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size); + spin_lock_irqsave(&tgt->tgt_lock, flags); + virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size); - ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp); + spin_lock(&vq->vq_lock); + ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp); + spin_unlock(&tgt->tgt_lock); if (ret >= 0) - virtqueue_kick(vq); + ret = virtqueue_kick_prepare(vq->vq); + + spin_unlock_irqrestore(&vq->vq_lock, flags); - spin_unlock_irqrestore(&vscsi->vq_lock, flags); + if (ret > 0) + virtqueue_notify(vq->vq); return ret; } static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sh); + struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id]; struct virtio_scsi_cmd *cmd; int ret; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); + + /* TODO: check feature bit and fail if unsupported? */ + BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); + dev_dbg(&sc->device->sdev_gendev, "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); @@ -300,7 +437,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); - if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd, + if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd, sizeof cmd->req.cmd, sizeof cmd->resp.cmd, GFP_ATOMIC) >= 0) ret = 0; @@ -312,10 +449,11 @@ out: static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) { DECLARE_COMPLETION_ONSTACK(comp); + struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id]; int ret = FAILED; cmd->comp = ∁ - if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, + if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd, sizeof cmd->req.tmf, sizeof cmd->resp.tmf, GFP_NOIO) < 0) goto out; @@ -408,11 +546,63 @@ static struct scsi_host_template virtscsi_host_template = { &__val, sizeof(__val)); \ }) +static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, + struct virtqueue *vq) +{ + spin_lock_init(&virtscsi_vq->vq_lock); + virtscsi_vq->vq = vq; +} + +static struct virtio_scsi_target_state *virtscsi_alloc_tgt( + struct virtio_device *vdev, int sg_elems) +{ + struct virtio_scsi_target_state *tgt; + gfp_t gfp_mask = GFP_KERNEL; + + /* We need extra sg elements at head and tail. */ + tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2), + gfp_mask); + + if (!tgt) + return NULL; + + spin_lock_init(&tgt->tgt_lock); + sg_init_table(tgt->sg, sg_elems + 2); + return tgt; +} + +static void virtscsi_scan(struct virtio_device *vdev) +{ + struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv; + + scsi_scan_host(shost); +} + +static void virtscsi_remove_vqs(struct virtio_device *vdev) +{ + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + u32 i, num_targets; + + /* Stop all the virtqueues. */ + vdev->config->reset(vdev); + + num_targets = sh->max_id; + for (i = 0; i < num_targets; i++) { + kfree(vscsi->tgt[i]); + vscsi->tgt[i] = NULL; + } + + vdev->config->del_vqs(vdev); +} + static int virtscsi_init(struct virtio_device *vdev, - struct virtio_scsi *vscsi) + struct virtio_scsi *vscsi, int num_targets) { int err; struct virtqueue *vqs[3]; + u32 i, sg_elems; + vq_callback_t *callbacks[] = { virtscsi_ctrl_done, virtscsi_event_done, @@ -429,13 +619,32 @@ static int virtscsi_init(struct virtio_device *vdev, if (err) return err; - vscsi->ctrl_vq = vqs[0]; - vscsi->event_vq = vqs[1]; - vscsi->req_vq = vqs[2]; + virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); + virtscsi_init_vq(&vscsi->event_vq, vqs[1]); + virtscsi_init_vq(&vscsi->req_vq, vqs[2]); virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); - return 0; + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_kick_event_all(vscsi); + + /* We need to know how many segments before we allocate. */ + sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; + + for (i = 0; i < num_targets; i++) { + vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems); + if (!vscsi->tgt[i]) { + err = -ENOMEM; + goto out; + } + } + err = 0; + +out: + if (err) + virtscsi_remove_vqs(vdev); + return err; } static int __devinit virtscsi_probe(struct virtio_device *vdev) @@ -443,31 +652,25 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev) struct Scsi_Host *shost; struct virtio_scsi *vscsi; int err; - u32 sg_elems; + u32 sg_elems, num_targets; u32 cmd_per_lun; - /* We need to know how many segments before we allocate. - * We need an extra sg elements at head and tail. - */ - sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; - /* Allocate memory and link the structs together. */ + num_targets = virtscsi_config_get(vdev, max_target) + 1; shost = scsi_host_alloc(&virtscsi_host_template, - sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2)); + sizeof(*vscsi) + + num_targets * sizeof(struct virtio_scsi_target_state)); if (!shost) return -ENOMEM; + sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; shost->sg_tablesize = sg_elems; vscsi = shost_priv(shost); vscsi->vdev = vdev; vdev->priv = shost; - /* Random initializations. */ - spin_lock_init(&vscsi->vq_lock); - sg_init_table(vscsi->sg, sg_elems + 2); - - err = virtscsi_init(vdev, vscsi); + err = virtscsi_init(vdev, vscsi, num_targets); if (err) goto virtscsi_init_failed; @@ -475,15 +678,16 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev) shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1; - shost->max_id = virtscsi_config_get(vdev, max_target) + 1; + shost->max_id = num_targets; shost->max_channel = 0; shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; err = scsi_add_host(shost, &vdev->dev); if (err) goto scsi_add_host_failed; - - scsi_scan_host(shost); - + /* + * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan() + * after VIRTIO_CONFIG_S_DRIVER_OK has been set.. + */ return 0; scsi_add_host_failed: @@ -493,17 +697,13 @@ virtscsi_init_failed: return err; } -static void virtscsi_remove_vqs(struct virtio_device *vdev) -{ - /* Stop all the virtqueues. */ - vdev->config->reset(vdev); - - vdev->config->del_vqs(vdev); -} - static void __devexit virtscsi_remove(struct virtio_device *vdev) { struct Scsi_Host *shost = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(shost); + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_cancel_event_work(vscsi); scsi_remove_host(shost); @@ -523,7 +723,7 @@ static int virtscsi_restore(struct virtio_device *vdev) struct Scsi_Host *sh = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(sh); - return virtscsi_init(vdev, vscsi); + return virtscsi_init(vdev, vscsi, sh->max_id); } #endif @@ -532,11 +732,18 @@ static struct virtio_device_id id_table[] = { { 0 }, }; +static unsigned int features[] = { + VIRTIO_SCSI_F_HOTPLUG +}; + static struct virtio_driver virtio_scsi_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtscsi_probe, + .scan = virtscsi_scan, #ifdef CONFIG_PM .freeze = virtscsi_freeze, .restore = virtscsi_restore, |