diff options
Diffstat (limited to 'drivers/scsi')
40 files changed, 583 insertions, 197 deletions
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 2a889853a10..7e26ebc2666 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -293,7 +293,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag) status = -EINVAL; } } - aac_fib_complete(fibptr); + /* Do not set XferState to zero unless receives a response from F/W */ + if (status >= 0) + aac_fib_complete(fibptr); + /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ if (status >= 0) { if ((aac_commit == 1) || commit_flag) { @@ -310,13 +313,18 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag) FsaNormal, 1, 1, NULL, NULL); - aac_fib_complete(fibptr); + /* Do not set XferState to zero unless + * receives a response from F/W */ + if (status >= 0) + aac_fib_complete(fibptr); } else if (aac_commit == 0) { printk(KERN_WARNING "aac_get_config_status: Foreign device configurations are being ignored\n"); } } - aac_fib_free(fibptr); + /* FIB should be freed only after getting the response from the F/W */ + if (status != -ERESTARTSYS) + aac_fib_free(fibptr); return status; } @@ -355,7 +363,9 @@ int aac_get_containers(struct aac_dev *dev) maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); aac_fib_complete(fibptr); } - aac_fib_free(fibptr); + /* FIB should be freed only after getting the response from the F/W */ + if (status != -ERESTARTSYS) + aac_fib_free(fibptr); if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) maximum_num_containers = MAXIMUM_NUM_CONTAINERS; @@ -1245,8 +1255,12 @@ int aac_get_adapter_info(struct aac_dev* dev) NULL); if (rcode < 0) { - aac_fib_complete(fibptr); - aac_fib_free(fibptr); + /* FIB should be freed only after + * getting the response from the F/W */ + if (rcode != -ERESTARTSYS) { + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + } return rcode; } memcpy(&dev->adapter_info, info, sizeof(*info)); @@ -1270,6 +1284,12 @@ int aac_get_adapter_info(struct aac_dev* dev) if (rcode >= 0) memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); + if (rcode == -ERESTARTSYS) { + fibptr = aac_fib_alloc(dev); + if (!fibptr) + return -ENOMEM; + } + } @@ -1470,9 +1490,11 @@ int aac_get_adapter_info(struct aac_dev* dev) (dev->scsi_host_ptr->sg_tablesize * 8) + 112; } } - - aac_fib_complete(fibptr); - aac_fib_free(fibptr); + /* FIB should be freed only after getting the response from the F/W */ + if (rcode != -ERESTARTSYS) { + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + } return rcode; } @@ -1633,6 +1655,7 @@ static int aac_read(struct scsi_cmnd * scsicmd) * Alocate and initialize a Fib */ if (!(cmd_fibcontext = aac_fib_alloc(dev))) { + printk(KERN_WARNING "aac_read: fib allocation failed\n"); return -1; } @@ -1712,9 +1735,14 @@ static int aac_write(struct scsi_cmnd * scsicmd) * Allocate and initialize a Fib then setup a BlockWrite command */ if (!(cmd_fibcontext = aac_fib_alloc(dev))) { - scsicmd->result = DID_ERROR << 16; - scsicmd->scsi_done(scsicmd); - return 0; + /* FIB temporarily unavailable,not catastrophic failure */ + + /* scsicmd->result = DID_ERROR << 16; + * scsicmd->scsi_done(scsicmd); + * return 0; + */ + printk(KERN_WARNING "aac_write: fib allocation failed\n"); + return -1; } status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 83986ed8655..619c02d9c86 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -12,7 +12,7 @@ *----------------------------------------------------------------------------*/ #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 2461 +# define AAC_DRIVER_BUILD 24702 # define AAC_DRIVER_BRANCH "-ms" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -1036,6 +1036,9 @@ struct aac_dev u8 printf_enabled; u8 in_reset; u8 msi; + int management_fib_count; + spinlock_t manage_lock; + }; #define aac_adapter_interrupt(dev) \ diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 0391d759dfd..9c0c9117853 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -153,7 +153,7 @@ cleanup: fibptr->hw_fib_pa = hw_fib_pa; fibptr->hw_fib_va = hw_fib; } - if (retval != -EINTR) + if (retval != -ERESTARTSYS) aac_fib_free(fibptr); return retval; } @@ -322,7 +322,7 @@ return_fib: } if (f.wait) { if(down_interruptible(&fibctx->wait_sem) < 0) { - status = -EINTR; + status = -ERESTARTSYS; } else { /* Lock again and retry */ spin_lock_irqsave(&dev->fib_lock, flags); @@ -593,10 +593,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) u64 addr; void* p; if (upsg->sg[i].count > - (dev->adapter_info.options & + ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : - 65536) { + 65536)) { rcode = -EINVAL; goto cleanup; } @@ -645,10 +645,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) u64 addr; void* p; if (usg->sg[i].count > - (dev->adapter_info.options & + ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : - 65536) { + 65536)) { rcode = -EINVAL; goto cleanup; } @@ -695,10 +695,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) uintptr_t addr; void* p; if (usg->sg[i].count > - (dev->adapter_info.options & + ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : - 65536) { + 65536)) { rcode = -EINVAL; goto cleanup; } @@ -734,10 +734,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) dma_addr_t addr; void* p; if (upsg->sg[i].count > - (dev->adapter_info.options & + ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : - 65536) { + 65536)) { rcode = -EINVAL; goto cleanup; } @@ -772,8 +772,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) psg->count = cpu_to_le32(sg_indx+1); status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } - if (status == -EINTR) { - rcode = -EINTR; + if (status == -ERESTARTSYS) { + rcode = -ERESTARTSYS; goto cleanup; } @@ -810,7 +810,7 @@ cleanup: for(i=0; i <= sg_indx; i++){ kfree(sg_list[i]); } - if (rcode != -EINTR) { + if (rcode != -ERESTARTSYS) { aac_fib_complete(srbfib); aac_fib_free(srbfib); } @@ -848,7 +848,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) */ status = aac_dev_ioctl(dev, cmd, arg); - if(status != -ENOTTY) + if (status != -ENOTTY) return status; switch (cmd) { diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 666d5151d62..a7261486ccd 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -194,7 +194,9 @@ int aac_send_shutdown(struct aac_dev * dev) if (status >= 0) aac_fib_complete(fibctx); - aac_fib_free(fibctx); + /* FIB should be freed only after getting the response from the F/W */ + if (status != -ERESTARTSYS) + aac_fib_free(fibctx); return status; } @@ -304,6 +306,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) /* * Check the preferred comm settings, defaults from template. */ + dev->management_fib_count = 0; + spin_lock_init(&dev->manage_lock); dev->max_fib_size = sizeof(struct hw_fib); dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 956261f2518..94d2954d79a 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -189,7 +189,14 @@ struct fib *aac_fib_alloc(struct aac_dev *dev) void aac_fib_free(struct fib *fibptr) { - unsigned long flags; + unsigned long flags, flagsv; + + spin_lock_irqsave(&fibptr->event_lock, flagsv); + if (fibptr->done == 2) { + spin_unlock_irqrestore(&fibptr->event_lock, flagsv); + return; + } + spin_unlock_irqrestore(&fibptr->event_lock, flagsv); spin_lock_irqsave(&fibptr->dev->fib_lock, flags); if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) @@ -390,6 +397,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, struct hw_fib * hw_fib = fibptr->hw_fib_va; unsigned long flags = 0; unsigned long qflags; + unsigned long mflags = 0; + if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) return -EBUSY; @@ -471,9 +480,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, if (!dev->queues) return -EBUSY; - if(wait) + if (wait) { + + spin_lock_irqsave(&dev->manage_lock, mflags); + if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { + printk(KERN_INFO "No management Fibs Available:%d\n", + dev->management_fib_count); + spin_unlock_irqrestore(&dev->manage_lock, mflags); + return -EBUSY; + } + dev->management_fib_count++; + spin_unlock_irqrestore(&dev->manage_lock, mflags); spin_lock_irqsave(&fibptr->event_lock, flags); - aac_adapter_deliver(fibptr); + } + + if (aac_adapter_deliver(fibptr) != 0) { + printk(KERN_ERR "aac_fib_send: returned -EBUSY\n"); + if (wait) { + spin_unlock_irqrestore(&fibptr->event_lock, flags); + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + } + return -EBUSY; + } + /* * If the caller wanted us to wait for response wait now. @@ -516,14 +547,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, udelay(5); } } else if (down_interruptible(&fibptr->event_wait)) { - fibptr->done = 2; - up(&fibptr->event_wait); + /* Do nothing ... satisfy + * down_interruptible must_check */ } + spin_lock_irqsave(&fibptr->event_lock, flags); - if ((fibptr->done == 0) || (fibptr->done == 2)) { + if (fibptr->done == 0) { fibptr->done = 2; /* Tell interrupt we aborted */ spin_unlock_irqrestore(&fibptr->event_lock, flags); - return -EINTR; + return -ERESTARTSYS; } spin_unlock_irqrestore(&fibptr->event_lock, flags); BUG_ON(fibptr->done == 0); @@ -689,6 +721,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) int aac_fib_complete(struct fib *fibptr) { + unsigned long flags; struct hw_fib * hw_fib = fibptr->hw_fib_va; /* @@ -709,6 +742,13 @@ int aac_fib_complete(struct fib *fibptr) * command is complete that we had sent to the adapter and this * cdb could be reused. */ + spin_lock_irqsave(&fibptr->event_lock, flags); + if (fibptr->done == 2) { + spin_unlock_irqrestore(&fibptr->event_lock, flags); + return 0; + } + spin_unlock_irqrestore(&fibptr->event_lock, flags); + if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) { @@ -1355,7 +1395,10 @@ int aac_reset_adapter(struct aac_dev * aac, int forced) if (status >= 0) aac_fib_complete(fibctx); - aac_fib_free(fibctx); + /* FIB should be freed only after getting + * the response from the F/W */ + if (status != -ERESTARTSYS) + aac_fib_free(fibctx); } } @@ -1759,6 +1802,7 @@ int aac_command_thread(void *data) struct fib *fibptr; if ((fibptr = aac_fib_alloc(dev))) { + int status; __le32 *info; aac_fib_init(fibptr); @@ -1769,15 +1813,21 @@ int aac_command_thread(void *data) *info = cpu_to_le32(now.tv_sec); - (void)aac_fib_send(SendHostTime, + status = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal, 1, 1, NULL, NULL); - aac_fib_complete(fibptr); - aac_fib_free(fibptr); + /* Do not set XferState to zero unless + * receives a response from F/W */ + if (status >= 0) + aac_fib_complete(fibptr); + /* FIB should be freed only after + * getting the response from the F/W */ + if (status != -ERESTARTSYS) + aac_fib_free(fibptr); } difference = (long)(unsigned)update_interval*HZ; } else { diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index abc9ef5d1b1..9c7408fe8c7 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -57,9 +57,9 @@ unsigned int aac_response_normal(struct aac_queue * q) struct hw_fib * hwfib; struct fib * fib; int consumed = 0; - unsigned long flags; + unsigned long flags, mflags; - spin_lock_irqsave(q->lock, flags); + spin_lock_irqsave(q->lock, flags); /* * Keep pulling response QEs off the response queue and waking * up the waiters until there are no more QEs. We then return @@ -125,12 +125,21 @@ unsigned int aac_response_normal(struct aac_queue * q) } else { unsigned long flagv; spin_lock_irqsave(&fib->event_lock, flagv); - if (!fib->done) + if (!fib->done) { fib->done = 1; - up(&fib->event_wait); + up(&fib->event_wait); + } spin_unlock_irqrestore(&fib->event_lock, flagv); + + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + FIB_COUNTER_INCREMENT(aac_config.NormalRecved); if (fib->done == 2) { + spin_lock_irqsave(&fib->event_lock, flagv); + fib->done = 0; + spin_unlock_irqrestore(&fib->event_lock, flagv); aac_fib_complete(fib); aac_fib_free(fib); } @@ -232,6 +241,7 @@ unsigned int aac_command_normal(struct aac_queue *q) unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) { + unsigned long mflags; dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); if ((index & 0x00000002L)) { struct hw_fib * hw_fib; @@ -320,11 +330,25 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) unsigned long flagv; dprintk((KERN_INFO "event_wait up\n")); spin_lock_irqsave(&fib->event_lock, flagv); - if (!fib->done) + if (!fib->done) { fib->done = 1; - up(&fib->event_wait); + up(&fib->event_wait); + } spin_unlock_irqrestore(&fib->event_lock, flagv); + + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + FIB_COUNTER_INCREMENT(aac_config.NormalRecved); + if (fib->done == 2) { + spin_lock_irqsave(&fib->event_lock, flagv); + fib->done = 0; + spin_unlock_irqrestore(&fib->event_lock, flagv); + aac_fib_complete(fib); + aac_fib_free(fib); + } + } return 0; } diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 4d419c155ce..78971db5b60 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -3171,13 +3171,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; - /* - * Remove any SCBs in the waiting for selection - * queue that may also be for this target so - * that command ordering is preserved. - */ - ahd_freeze_devq(ahd, scb); - ahd_qinfifo_requeue_tail(ahd, scb); + if (scb != NULL) { + /* + * Remove any SCBs in the waiting + * for selection queue that may + * also be for this target so that + * command ordering is preserved. + */ + ahd_freeze_devq(ahd, scb); + ahd_qinfifo_requeue_tail(ahd, scb); + } printerror = 0; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) @@ -3194,13 +3197,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); - /* - * Remove any SCBs in the waiting for selection - * queue that may also be for this target so that - * command ordering is preserved. - */ - ahd_freeze_devq(ahd, scb); - ahd_qinfifo_requeue_tail(ahd, scb); + if (scb != NULL) { + /* + * Remove any SCBs in the waiting for + * selection queue that may also be for + * this target so that command ordering + * is preserved. + */ + ahd_freeze_devq(ahd, scb); + ahd_qinfifo_requeue_tail(ahd, scb); + } printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) && ppr_busfree == 0) { @@ -3217,13 +3223,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); - /* - * Remove any SCBs in the waiting for selection - * queue that may also be for this target so that - * command ordering is preserved. - */ - ahd_freeze_devq(ahd, scb); - ahd_qinfifo_requeue_tail(ahd, scb); + if (scb != NULL) { + /* + * Remove any SCBs in the waiting for + * selection queue that may also be for + * this target so that command ordering + * is preserved. + */ + ahd_freeze_devq(ahd, scb); + ahd_qinfifo_requeue_tail(ahd, scb); + } printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 && ahd_sent_msg(ahd, AHDMSG_1B, @@ -3251,7 +3260,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) * the message phases. We check it last in case we * had to send some other message that caused a busfree. */ - if (printerror != 0 + if (scb != NULL && printerror != 0 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 47754260228..9e71ac61114 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt) if (info->scsi.phase == PHASE_IDLE) fas216_kick(info); - mod_timer(&info->eh_timer, 30 * HZ); + mod_timer(&info->eh_timer, jiffies + 30 * HZ); spin_unlock_irqrestore(&info->host_lock, flags); /* diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c index 26ffdcd5a43..15a00e8b712 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c @@ -1440,6 +1440,10 @@ void cxgb3i_c3cn_release(struct s3_conn *c3cn) static int is_cxgb3_dev(struct net_device *dev) { struct cxgb3i_sdev_data *cdata; + struct net_device *ndev = dev; + + if (dev->priv_flags & IFF_802_1Q_VLAN) + ndev = vlan_dev_real_dev(dev); write_lock(&cdata_rwlock); list_for_each_entry(cdata, &cdata_list, list) { @@ -1447,7 +1451,7 @@ static int is_cxgb3_dev(struct net_device *dev) int i; for (i = 0; i < ports->nports; i++) - if (dev == ports->lldevs[i]) { + if (ndev == ports->lldevs[i]) { write_unlock(&cdata_rwlock); return 1; } @@ -1566,6 +1570,26 @@ out_err: return -EINVAL; } +/** + * cxgb3i_find_dev - find the interface associated with the given address + * @ipaddr: ip address + */ +static struct net_device * +cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr) +{ + struct flowi fl; + int err; + struct rtable *rt; + + memset(&fl, 0, sizeof(fl)); + fl.nl_u.ip4_u.daddr = ipaddr; + + err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl); + if (!err) + return (&rt->u.dst)->dev; + + return NULL; +} /** * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address @@ -1581,6 +1605,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata; struct t3cdev *cdev; __be32 sipv4; + struct net_device *dstdev; int err; c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); @@ -1591,6 +1616,13 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, c3cn->daddr.sin_port = usin->sin_port; c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; + dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr); + if (!dstdev || !is_cxgb3_dev(dstdev)) + return -ENETUNREACH; + + if (dstdev->priv_flags & IFF_802_1Q_VLAN) + dev = dstdev; + rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, c3cn->daddr.sin_addr.s_addr, c3cn->saddr.sin_port, diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 10be9f36a4c..2f47ae7cce9 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2009,6 +2009,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) fcoe_interface_cleanup(fcoe); rtnl_unlock(); fcoe_if_destroy(fcoe->ctlr.lp); + module_put(THIS_MODULE); + out_putdev: dev_put(netdev); out_nodev: @@ -2059,6 +2061,11 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) } #endif + if (!try_module_get(THIS_MODULE)) { + rc = -EINVAL; + goto out_nomod; + } + rtnl_lock(); netdev = fcoe_if_to_netdev(buffer); if (!netdev) { @@ -2099,17 +2106,24 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) if (!fcoe_link_ok(lport)) fcoe_ctlr_link_up(&fcoe->ctlr); - rc = 0; -out_free: /* * Release from init in fcoe_interface_create(), on success lport * should be holding a reference taken in fcoe_if_create(). */ fcoe_interface_put(fcoe); + dev_put(netdev); + rtnl_unlock(); + mutex_unlock(&fcoe_config_mutex); + + return 0; +out_free: + fcoe_interface_put(fcoe); out_putdev: dev_put(netdev); out_nodev: rtnl_unlock(); + module_put(THIS_MODULE); +out_nomod: mutex_unlock(&fcoe_config_mutex); return rc; } diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 9823291395a..511cb6b371e 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -1187,7 +1187,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) next_timer = fip->ctlr_ka_time; if (time_after_eq(jiffies, fip->port_ka_time)) { - fip->port_ka_time += jiffies + + fip->port_ka_time = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); fip->send_port_ka = 1; } diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 19d711cb938..7f4364770e4 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1890,7 +1890,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, fc_exch_setup_hdr(ep, fp, ep->f_ctl); sp->cnt++; - if (ep->xid <= lport->lro_xid) + if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); if (unlikely(lport->tt.frame_send(lport, fp))) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 881d5dfe8c7..6fde2fabfd9 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -298,9 +298,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) { struct fc_lport *lport; - if (!fsp) - return; - lport = fsp->lp; if ((fsp->req_flags & FC_SRB_READ) && (lport->lro_enabled) && (lport->tt.ddp_setup)) { diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 0b165024a21..7ec8ce75007 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1800,7 +1800,8 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) u32 did; job->reply->reply_payload_rcv_len = 0; - rsp->resid_len = job->reply_payload.payload_len; + if (rsp) + rsp->resid_len = job->reply_payload.payload_len; mutex_lock(&lport->lp_mutex); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 02300523b23..97923bb0776 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -623,7 +623,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, tov = ntohl(plp->fl_csp.sp_e_d_tov); if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) - tov /= 1000; + tov /= 1000000; if (tov > rdata->e_d_tov) rdata->e_d_tov = tov; csp_seq = ntohs(plp->fl_csp.sp_tot_seq); diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index db6856c138f..4ad87fd74dd 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -992,12 +992,10 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) if (r2t == NULL) { if (kfifo_out(&tcp_task->r2tqueue, (void *)&tcp_task->r2t, sizeof(void *)) != - sizeof(void *)) { - WARN_ONCE(1, "unexpected fifo state"); + sizeof(void *)) r2t = NULL; - } - - r2t = tcp_task->r2t; + else + r2t = tcp_task->r2t; } spin_unlock_bh(&session->lock); } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index ce522702a6c..2cc39684ce9 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -4142,8 +4142,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, spin_lock_irq(shost->host_lock); if (vport->fc_rscn_flush) { /* Another thread is walking fc_rscn_id_list on this vport */ - spin_unlock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_DISCOVERY; + spin_unlock_irq(shost->host_lock); /* Send back ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return 0; @@ -5948,8 +5948,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_initial_fdisc(vport); break; } - } else { + vport->vpi_state |= LPFC_VPI_REGISTERED; if (vport == phba->pport) if (phba->sli_rev < LPFC_SLI_REV4) lpfc_issue_fabric_reglogin(vport); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 3b942442765..2445e399fd6 100755..100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -747,6 +747,10 @@ lpfc_linkdown(struct lpfc_hba *phba) if (phba->link_state == LPFC_LINK_DOWN) return 0; + + /* Block all SCSI stack I/Os */ + lpfc_scsi_dev_block(phba); + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); if (phba->link_state > LPFC_LINK_DOWN) { @@ -1555,10 +1559,16 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * to book keeping the FCFIs can be used. */ if (shdr_status || shdr_add_status) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2521 READ_FCF_RECORD mailbox failed " - "with status x%x add_status x%x, mbx\n", - shdr_status, shdr_add_status); + if (shdr_status == STATUS_FCF_TABLE_EMPTY) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2726 READ_FCF_RECORD Indicates empty " + "FCF table.\n"); + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2521 READ_FCF_RECORD mailbox failed " + "with status x%x add_status x%x, mbx\n", + shdr_status, shdr_add_status); + } goto out; } /* Interpreting the returned information of FCF records */ @@ -1698,7 +1708,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } + spin_lock_irq(&phba->hbalock); vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(&phba->hbalock); if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) lpfc_initial_fdisc(vport); @@ -2259,7 +2271,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) mb->mbxStatus); break; } + spin_lock_irq(&phba->hbalock); vport->vpi_state &= ~LPFC_VPI_REGISTERED; + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(&phba->hbalock); vport->unreg_vpi_cmpl = VPORT_OK; mempool_free(pmb, phba->mbox_mem_pool); /* @@ -4475,8 +4490,10 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_mbx_unreg_vpi(vports[i]); + spin_lock_irq(&phba->hbalock); vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(&phba->hbalock); } lpfc_destroy_vport_work_array(phba, vports); diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1585148a17e..8a2a1c5935c 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1013,7 +1013,7 @@ struct lpfc_mbx_wq_destroy { }; #define LPFC_HDR_BUF_SIZE 128 -#define LPFC_DATA_BUF_SIZE 4096 +#define LPFC_DATA_BUF_SIZE 2048 struct rq_context { uint32_t word0; #define lpfc_rq_context_rq_size_SHIFT 16 @@ -1371,6 +1371,7 @@ struct lpfc_mbx_query_fw_cfg { #define STATUS_ERROR_ACITMAIN 0x2a #define STATUS_REBOOT_REQUIRED 0x2c #define STATUS_FCF_IN_USE 0x3a +#define STATUS_FCF_TABLE_EMPTY 0x43 struct lpfc_mbx_sli4_config { struct mbox_header header; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d4da6bdd0e7..b8eb1b6e5e7 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3006,6 +3006,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; + uint32_t link_state; phba->fc_eventTag = acqe_fcoe->event_tag; phba->fcoe_eventtag = acqe_fcoe->event_tag; @@ -3052,9 +3053,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, break; /* * Currently, driver support only one FCF - so treat this as - * a link down. + * a link down, but save the link state because we don't want + * it to be changed to Link Down unless it is already down. */ + link_state = phba->link_state; lpfc_linkdown(phba); + phba->link_state = link_state; /* Unregister FCF if no devices connected to it */ lpfc_unregister_unused_fcf(phba); break; @@ -7226,8 +7230,6 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2711 PCI channel permanent disable for failure\n"); - /* Block all SCSI devices' I/Os on the host */ - lpfc_scsi_dev_block(phba); /* Clean up all driver's outstanding SCSI I/Os */ lpfc_sli_flush_fcp_rings(phba); } @@ -7256,6 +7258,9 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + switch (state) { case pci_channel_io_normal: /* Non-fatal error, prepare for recovery */ @@ -7507,6 +7512,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) error = -ENODEV; goto out_free_sysfs_attr; } + /* Default to single FCP EQ for non-MSI-X */ + if (phba->intr_type != MSIX) + phba->cfg_fcp_eq_count = 1; /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 7935667b81a..589549b2bf0 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1383,7 +1383,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, /* HBQ for ELS and CT traffic. */ static struct lpfc_hbq_init lpfc_els_hbq = { .rn = 1, - .entry_count = 200, + .entry_count = 256, .mask_count = 0, .profile = 0, .ring_mask = (1 << LPFC_ELS_RING), @@ -1482,8 +1482,11 @@ err: int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) { - return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, - lpfc_hbq_defs[qno]->add_count)); + if (phba->sli_rev == LPFC_SLI_REV4) + return 0; + else + return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->add_count); } /** @@ -1498,8 +1501,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) static int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) { - return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, - lpfc_hbq_defs[qno]->init_count)); + if (phba->sli_rev == LPFC_SLI_REV4) + return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->entry_count); + else + return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->init_count); } /** @@ -4110,6 +4117,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, if (rc) { dma_free_coherent(&phba->pcidev->dev, dma_size, dmabuf->virt, dmabuf->phys); + kfree(dmabuf); return -EIO; } @@ -5848,7 +5856,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, iocbq->iocb.un.ulpWord[3]); wqe->generic.word3 = 0; bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); - bf_set(wqe_xc, &wqe->generic, 1); /* The entire sequence is transmitted for this IOCB */ xmit_len = total_len; cmnd = CMD_XMIT_SEQUENCE64_CR; @@ -10944,7 +10951,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) return dmabuf; } temp_hdr = seq_dmabuf->hbuf.virt; - if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { + if (be16_to_cpu(new_hdr->fh_seq_cnt) < + be16_to_cpu(temp_hdr->fh_seq_cnt)) { list_del_init(&seq_dmabuf->hbuf.list); list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); @@ -10955,6 +10963,11 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); seq_dmabuf->time_stamp = jiffies; lpfc_update_rcv_time_stamp(vport); + if (list_empty(&seq_dmabuf->dbuf.list)) { + temp_hdr = dmabuf->hbuf.virt; + list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); + return seq_dmabuf; + } /* find the correct place in the sequence to insert this frame */ list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); @@ -10963,7 +10976,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) * If the frame's sequence count is greater than the frame on * the list then insert the frame right after this frame */ - if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { + if (be16_to_cpu(new_hdr->fh_seq_cnt) > + be16_to_cpu(temp_hdr->fh_seq_cnt)) { list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); return seq_dmabuf; } @@ -11210,7 +11224,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf) seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; /* If there is a hole in the sequence count then fail. */ - if (++seq_count != hdr->fh_seq_cnt) + if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) return 0; fctl = (hdr->fh_f_ctl[0] << 16 | hdr->fh_f_ctl[1] << 8 | @@ -11242,6 +11256,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) struct lpfc_iocbq *first_iocbq, *iocbq; struct fc_frame_header *fc_hdr; uint32_t sid; + struct ulp_bde64 *pbde; fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; /* remove from receive buffer list */ @@ -11283,8 +11298,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) if (!iocbq->context3) { iocbq->context3 = d_buf; iocbq->iocb.ulpBdeCount++; - iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = - LPFC_DATA_BUF_SIZE; + pbde = (struct ulp_bde64 *) + &iocbq->iocb.unsli3.sli3Words[4]; + pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; first_iocbq->iocb.unsli3.rcvsli3.acc_len += bf_get(lpfc_rcqe_length, &seq_dmabuf->cq_event.cqe.rcqe_cmpl); @@ -11401,15 +11417,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, return; } /* If not last frame in sequence continue processing frames. */ - if (!lpfc_seq_complete(seq_dmabuf)) { - /* - * When saving off frames post a new one and mark this - * frame to be freed when it is finished. - **/ - lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); - dmabuf->tag = -1; + if (!lpfc_seq_complete(seq_dmabuf)) return; - } + /* Send the complete sequence to the upper layer protocol */ lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); } diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 25d66d070cf..44e5f574236 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -28,7 +28,7 @@ /* Multi-queue arrangement for fast-path FCP work queues */ #define LPFC_FN_EQN_MAX 8 #define LPFC_SP_EQN_DEF 1 -#define LPFC_FP_EQN_DEF 1 +#define LPFC_FP_EQN_DEF 4 #define LPFC_FP_EQN_MIN 1 #define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c7f3aed2aab..792f72263f1 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.6" +#define LPFC_DRIVER_VERSION "8.3.7" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 7d6dd83d359..e3c7fa64230 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -512,8 +512,10 @@ enable_vport(struct fc_vport *fc_vport) return VPORT_OK; } + spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_LOADING; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(&phba->hbalock); /* Use the Physical nodes Fabric NDLP to determine if the link is * up and ready to FDISC. @@ -700,7 +702,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) } spin_unlock_irq(&phba->ndlp_lock); } - if (vport->vpi_state != LPFC_VPI_REGISTERED) + if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) goto skip_logo; vport->unreg_vpi_cmpl = VPORT_INVAL; timeout = msecs_to_jiffies(phba->fc_ratov * 2000); diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 99ff99e45be..d9b8ca5116b 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -3781,6 +3781,7 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) compat_alloc_user_space(sizeof(struct megasas_iocpacket)); int i; int error = 0; + compat_uptr_t ptr; if (clear_user(ioc, sizeof(*ioc))) return -EFAULT; @@ -3793,9 +3794,22 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) return -EFAULT; - for (i = 0; i < MAX_IOCTL_SGE; i++) { - compat_uptr_t ptr; + /* + * The sense_ptr is used in megasas_mgmt_fw_ioctl only when + * sense_len is not null, so prepare the 64bit value under + * the same condition. + */ + if (ioc->sense_len) { + void __user **sense_ioc_ptr = + (void __user **)(ioc->frame.raw + ioc->sense_off); + compat_uptr_t *sense_cioc_ptr = + (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); + if (get_user(ptr, sense_cioc_ptr) || + put_user(compat_ptr(ptr), sense_ioc_ptr)) + return -EFAULT; + } + for (i = 0; i < MAX_IOCTL_SGE; i++) { if (get_user(ptr, &cioc->sgl[i].iov_base) || put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || copy_in_user(&ioc->sgl[i].iov_len, @@ -4046,7 +4060,7 @@ megasas_aen_polling(struct work_struct *work) } -static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO, +static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR, megasas_sysfs_show_poll_mode_io, megasas_sysfs_set_poll_mode_io); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index e7d2688fbeb..b6f1ef954af 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -2483,14 +2483,12 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) sense_copied = 1; } - if (RES_IS_GSCSI(res->cfg_entry)) { + if (RES_IS_GSCSI(res->cfg_entry)) pmcraid_cancel_all(cmd, sense_copied); - } else if (sense_copied) { + else if (sense_copied) pmcraid_erp_done(cmd); - return 0; - } else { + else pmcraid_request_sense(cmd); - } return 1; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 21e2bc4d740..3a9f5b288ae 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -232,6 +232,9 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, if (off) return 0; + if (unlikely(pci_channel_offline(ha->pdev))) + return 0; + if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) return -EINVAL; if (start > ha->optrom_size) @@ -379,6 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; + if (unlikely(pci_channel_offline(ha->pdev))) + return 0; + if (!capable(CAP_SYS_ADMIN)) return 0; @@ -398,6 +404,9 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj, struct qla_hw_data *ha = vha->hw; uint8_t *tmp_data; + if (unlikely(pci_channel_offline(ha->pdev))) + return 0; + if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || !ha->isp_ops->write_nvram) return 0; @@ -1238,10 +1247,11 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - int rval; + int rval = QLA_FUNCTION_FAILED; uint16_t state[5]; - rval = qla2x00_get_firmware_state(vha, state); + if (!vha->hw->flags.eeh_busy) + rval = qla2x00_get_firmware_state(vha, state); if (rval != QLA_SUCCESS) memset(state, -1, sizeof(state)); @@ -1452,10 +1462,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) if (!fcport) return; - if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) + if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) + return; + + if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); - else - qla2x00_abort_fcport_cmds(fcport); + return; + } /* * Transport has effectively 'deleted' the rport, clear @@ -1475,6 +1488,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) if (!fcport) return; + if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) + return; + if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); return; @@ -1515,6 +1531,12 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) pfc_host_stat = &ha->fc_host_stat; memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); + if (test_bit(UNLOADING, &vha->dpc_flags)) + goto done; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto done; + stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); if (stats == NULL) { DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index f660dd70b72..d6d9c86cb05 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -26,7 +26,7 @@ /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ -/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */ +/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ /* * Macros use for debugging the driver. @@ -132,6 +132,13 @@ #else #define DEBUG16(x) do {} while (0) #endif + +#if defined(QL_DEBUG_LEVEL_17) +#define DEBUG17(x) do {x;} while (0) +#else +#define DEBUG17(x) do {} while (0) +#endif + /* * Firmware Dump structure definition */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 384afda7dbe..1263d9796e8 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1586,8 +1586,7 @@ typedef struct fc_port { */ #define FCF_FABRIC_DEVICE BIT_0 #define FCF_LOGIN_NEEDED BIT_1 -#define FCF_TAPE_PRESENT BIT_2 -#define FCF_FCP2_DEVICE BIT_3 +#define FCF_FCP2_DEVICE BIT_2 /* No loop ID flag. */ #define FC_NO_LOOP_ID 0x1000 @@ -2256,11 +2255,13 @@ struct qla_hw_data { uint32_t disable_serdes :1; uint32_t gpsc_supported :1; uint32_t npiv_supported :1; + uint32_t pci_channel_io_perm_failure :1; uint32_t fce_enabled :1; uint32_t fac_supported :1; uint32_t chip_reset_done :1; uint32_t port0 :1; uint32_t running_gold_fw :1; + uint32_t eeh_busy :1; uint32_t cpu_affinity_enabled :1; uint32_t disable_msix_handshake :1; } flags; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 0b6801fc638..8bc6f53691e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -324,6 +324,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *); extern int qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); +extern int qla2x00_get_data_rate(scsi_qla_host_t *); /* * Global Function Prototypes in qla_isr.c source file. */ @@ -452,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); -extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 73a793539d4..3f8e8495b74 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -205,7 +205,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, switch (data[0]) { case MBS_COMMAND_COMPLETE: - if (fcport->flags & FCF_TAPE_PRESENT) + if (fcport->flags & FCF_FCP2_DEVICE) opts |= BIT_1; rval = qla2x00_get_port_database(vha, fcport, opts); if (rval != QLA_SUCCESS) @@ -269,6 +269,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) vha->flags.online = 0; ha->flags.chip_reset_done = 0; vha->flags.reset_active = 0; + ha->flags.pci_channel_io_perm_failure = 0; + ha->flags.eeh_busy = 0; atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&vha->loop_state, LOOP_DOWN); vha->device_flags = DFLG_NO_CABLE; @@ -581,6 +583,9 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) uint32_t cnt; uint16_t cmd; + if (unlikely(pci_channel_offline(ha->pdev))) + return; + ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); @@ -786,6 +791,12 @@ void qla24xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + + if (pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure) { + return; + } + ha->isp_ops->disable_intrs(ha); /* Perform RISC reset. */ @@ -2266,6 +2277,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); clear_bit(RSCN_UPDATE, &vha->dpc_flags); + qla2x00_get_data_rate(vha); + /* Determine what we need to do */ if (ha->current_topology == ISP_CFG_FL && (test_bit(LOCAL_LOOP_UPDATE, &flags))) { @@ -2713,7 +2726,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) /* * Logout all previous fabric devices marked lost, except - * tape devices. + * FCP2 devices. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) @@ -2726,7 +2739,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) qla2x00_mark_device_lost(vha, fcport, ql2xplogiabsentdevice, 0); if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_TAPE_PRESENT) == 0 && + (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_BROADCAST) { ha->isp_ops->fabric_logout(vha, @@ -3005,7 +3018,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags |= FCF_LOGIN_NEEDED; if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_TAPE_PRESENT) == 0 && + (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_BROADCAST) { ha->isp_ops->fabric_logout(vha, fcport->loop_id, @@ -3259,9 +3272,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, rval = qla2x00_fabric_login(vha, fcport, next_loopid); if (rval == QLA_SUCCESS) { - /* Send an ADISC to tape devices.*/ + /* Send an ADISC to FCP2 devices.*/ opts = 0; - if (fcport->flags & FCF_TAPE_PRESENT) + if (fcport->flags & FCF_FCP2_DEVICE) opts |= BIT_1; rval = qla2x00_get_port_database(vha, fcport, opts); if (rval != QLA_SUCCESS) { @@ -3560,6 +3573,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); + if (unlikely(pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure)) { + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + status = 0; + return status; + } + ha->isp_ops->get_flash_version(vha, req->ring); ha->isp_ops->nvram_config(vha); @@ -4458,6 +4478,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) int ret, retries; struct qla_hw_data *ha = vha->hw; + if (ha->flags.pci_channel_io_perm_failure) + return; if (!IS_FWI2_CAPABLE(ha)) return; if (!ha->fw_major_version) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 1692a883f4d..6fc63b98818 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -152,7 +152,7 @@ qla2300_intr_handler(int irq, void *dev_id) for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->u.isp2300.host_status); if (stat & HSR_RISC_PAUSED) { - if (pci_channel_offline(ha->pdev)) + if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_WORD(®->hccr); @@ -1846,12 +1846,15 @@ qla24xx_intr_handler(int irq, void *dev_id) reg = &ha->iobase->isp24; status = 0; + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { - if (pci_channel_offline(ha->pdev)) + if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_DWORD(®->hccr); @@ -1914,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; struct scsi_qla_host *vha; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1924,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) ha = rsp->hw; reg = &ha->iobase->isp24; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); - vha = qla25xx_get_host(rsp); + vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); } - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } @@ -1943,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1955,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) /* Clear the interrupt, if enabled, for this response queue */ if (rsp->options & ~BIT_6) { reg = &ha->iobase->isp24; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); } queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); @@ -1976,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id) uint32_t stat; uint32_t hccr; uint16_t mb[4]; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1987,12 +1993,12 @@ qla24xx_msix_default(int irq, void *dev_id) reg = &ha->iobase->isp24; status = 0; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { - if (pci_channel_offline(ha->pdev)) + if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_DWORD(®->hccr); @@ -2036,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id) } WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); } while (0); - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { @@ -2274,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp) msix->rsp = rsp; return ret; } - -struct scsi_qla_host * -qla25xx_get_host(struct rsp_que *rsp) -{ - srb_t *sp; - struct qla_hw_data *ha = rsp->hw; - struct scsi_qla_host *vha = NULL; - struct sts_entry_24xx *pkt; - struct req_que *req; - uint16_t que; - uint32_t handle; - - pkt = (struct sts_entry_24xx *) rsp->ring_ptr; - que = MSW(pkt->handle); - handle = (uint32_t) LSW(pkt->handle); - req = ha->req_q_map[que]; - if (handle < MAX_OUTSTANDING_COMMANDS) { - sp = req->outstanding_cmds[handle]; - if (sp) - return sp->fcport->vha; - else - goto base_que; - } -base_que: - vha = pci_get_drvdata(ha->pdev); - return vha; -} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 05d595d9a7e..056e4d4505f 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -56,6 +56,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no)); + if (ha->flags.pci_channel_io_perm_failure) { + DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX " + "Exiting.\n", __func__, vha->host_no)); + return QLA_FUNCTION_TIMEOUT; + } + /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during @@ -154,10 +160,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); - if (command != MBC_LOAD_RISC_RAM_EXTENDED && - !ha->flags.mbox_int) + if (!ha->flags.mbox_int && + !(IS_QLA2200(ha) && + command == MBC_LOAD_RISC_RAM_EXTENDED)) msleep(10); } /* while */ + DEBUG17(qla_printk(KERN_WARNING, ha, + "Waited %d sec\n", + (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ))); } /* Check whether we timed out */ @@ -227,7 +237,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (rval == QLA_FUNCTION_TIMEOUT && mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { - if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { + if (!io_lock_on || (mcp->flags & IOCTL_CMD) || + ha->flags.eeh_busy) { /* not in dpc. schedule it for dpc to take over. */ DEBUG(printk("%s(%ld): timeout schedule " "isp_abort_needed.\n", __func__, @@ -237,7 +248,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) base_vha->host_no)); qla_printk(KERN_WARNING, ha, "Mailbox command timeout occurred. Scheduling ISP " - "abort.\n"); + "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy); set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(vha); } else if (!abort_active) { @@ -2530,6 +2541,9 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; @@ -2565,6 +2579,9 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha) if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; @@ -2595,6 +2612,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; @@ -2639,6 +2659,9 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; @@ -3643,3 +3666,36 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) return rval; } + +int +qla2x00_get_data_rate(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_DATA_RATE; + mcp->mb[1] = 0; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", + __func__, vha->host_no, rval, mcp->mb[0])); + } else { + DEBUG11(printk(KERN_INFO + "%s(%ld): done.\n", __func__, vha->host_no)); + if (mcp->mb[1] != 0x7) + ha->link_data_rate = mcp->mb[1]; + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 2a4c7f4e7b6..ff17dee2861 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -636,11 +636,15 @@ failed: static void qla_do_work(struct work_struct *work) { + unsigned long flags; struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); struct scsi_qla_host *vha; + struct qla_hw_data *ha = rsp->hw; - vha = qla25xx_get_host(rsp); + spin_lock_irqsave(&rsp->hw->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); + spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); } /* create response queue */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2f873d23732..8529eb1f3cd 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -475,11 +475,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) srb_t *sp; int rval; - if (unlikely(pci_channel_offline(ha->pdev))) { - if (ha->pdev->error_state == pci_channel_io_frozen) - cmd->result = DID_REQUEUE << 16; - else + if (ha->flags.eeh_busy) { + if (ha->flags.pci_channel_io_perm_failure) cmd->result = DID_NO_CONNECT << 16; + else + cmd->result = DID_REQUEUE << 16; goto qc24_fail_command; } @@ -552,8 +552,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) #define ABORT_POLLING_PERIOD 1000 #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) unsigned long wait_iter = ABORT_WAIT_ITER; + scsi_qla_host_t *vha = shost_priv(cmd->device->host); + struct qla_hw_data *ha = vha->hw; int ret = QLA_SUCCESS; + if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { + DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); + return ret; + } + while (CMD_SP(cmd) && wait_iter--) { msleep(ABORT_POLLING_PERIOD); } @@ -1181,7 +1188,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev) scsi_qla_host_t *vha = shost_priv(sdev->host); struct qla_hw_data *ha = vha->hw; struct fc_rport *rport = starget_to_rport(sdev->sdev_target); - fc_port_t *fcport = *(fc_port_t **)rport->dd_data; struct req_que *req = vha->req; if (sdev->tagged_supported) @@ -1190,8 +1196,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev) scsi_deactivate_tcq(sdev, req->max_q_depth); rport->dev_loss_tmo = ha->port_down_retry_count; - if (sdev->type == TYPE_TAPE) - fcport->flags |= FCF_TAPE_PRESENT; return 0; } @@ -1810,6 +1814,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Set ISP-type information. */ qla2x00_set_isp_flags(ha); + + /* Set EEH reset type to fundamental if required by hba */ + if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { + pdev->needs_freset = 1; + pci_save_state(pdev); + } + /* Configure PCI I/O space */ ret = qla2x00_iospace_config(ha); if (ret) @@ -2174,6 +2185,24 @@ qla2x00_free_device(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + + /* Disable timer */ + if (vha->timer_active) + qla2x00_stop_timer(vha); + + /* Kill the kernel thread for this host */ + if (ha->dpc_thread) { + struct task_struct *t = ha->dpc_thread; + + /* + * qla2xxx_wake_dpc checks for ->dpc_thread + * so we need to zero it out. + */ + ha->dpc_thread = NULL; + kthread_stop(t); + } + qla25xx_delete_queues(vha); if (ha->flags.fce_enabled) @@ -2185,6 +2214,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) /* Stop currently executing firmware. */ qla2x00_try_to_stop_firmware(vha); + vha->flags.online = 0; + /* turn-off interrupts on the card */ if (ha->interrupts_on) ha->isp_ops->disable_intrs(ha); @@ -2771,7 +2802,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) fcport->login_retry--; if (fcport->flags & FCF_FABRIC_DEVICE) { - if (fcport->flags & FCF_TAPE_PRESENT) + if (fcport->flags & FCF_FCP2_DEVICE) ha->isp_ops->fabric_logout(vha, fcport->loop_id, fcport->d_id.b.domain, @@ -2859,6 +2890,13 @@ qla2x00_do_dpc(void *data) if (!base_vha->flags.init_done) continue; + if (ha->flags.eeh_busy) { + DEBUG17(qla_printk(KERN_WARNING, ha, + "qla2x00_do_dpc: dpc_flags: %lx\n", + base_vha->dpc_flags)); + continue; + } + DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no)); ha->dpc_active = 1; @@ -3049,8 +3087,13 @@ qla2x00_timer(scsi_qla_host_t *vha) int index; srb_t *sp; int t; + uint16_t w; struct qla_hw_data *ha = vha->hw; struct req_que *req; + + /* Hardware read to raise pending EEH errors during mailbox waits. */ + if (!pci_channel_offline(ha->pdev)) + pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); /* * Ports - Port down timer. * @@ -3095,7 +3138,10 @@ qla2x00_timer(scsi_qla_host_t *vha) if (!IS_QLA2100(ha) && vha->link_down_timeout) atomic_set(&vha->loop_state, LOOP_DEAD); - /* Schedule an ISP abort to return any tape commands. */ + /* + * Schedule an ISP abort to return any FCP2-device + * commands. + */ /* NPIV - scan physical port only */ if (!vha->vp_idx) { spin_lock_irqsave(&ha->hardware_lock, @@ -3112,7 +3158,7 @@ qla2x00_timer(scsi_qla_host_t *vha) if (sp->ctx) continue; sfcp = sp->fcport; - if (!(sfcp->flags & FCF_TAPE_PRESENT)) + if (!(sfcp->flags & FCF_FCP2_DEVICE)) continue; set_bit(ISP_ABORT_NEEDED, @@ -3252,16 +3298,23 @@ qla2x00_release_firmware(void) static pci_ers_result_t qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { - scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + scsi_qla_host_t *vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = vha->hw; + + DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", + state)); switch (state) { case pci_channel_io_normal: + ha->flags.eeh_busy = 0; return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: + ha->flags.eeh_busy = 1; pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: - qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + ha->flags.pci_channel_io_perm_failure = 1; + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; @@ -3312,6 +3365,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) struct qla_hw_data *ha = base_vha->hw; int rc; + DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); + if (ha->mem_only) rc = pci_enable_device_mem(pdev); else @@ -3320,19 +3375,33 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) if (rc) { qla_printk(KERN_WARNING, ha, "Can't re-enable PCI device after reset.\n"); - return ret; } - pci_set_master(pdev); if (ha->isp_ops->pci_config(base_vha)) return ret; +#ifdef QL_DEBUG_LEVEL_17 + { + uint8_t b; + uint32_t i; + + printk("slot_reset_1: "); + for (i = 0; i < 256; i++) { + pci_read_config_byte(ha->pdev, i, &b); + printk("%s%02x", (i%16) ? " " : "\n", b); + } + printk("\n"); + } +#endif set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) ret = PCI_ERS_RESULT_RECOVERED; clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + DEBUG17(qla_printk(KERN_WARNING, ha, + "slot_reset-return:ret=%x\n", ret)); + return ret; } @@ -3343,12 +3412,17 @@ qla2xxx_pci_resume(struct pci_dev *pdev) struct qla_hw_data *ha = base_vha->hw; int ret; + DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); + ret = qla2x00_wait_for_hba_online(base_vha); if (ret != QLA_SUCCESS) { qla_printk(KERN_ERR, ha, "the device failed to resume I/O " "from slot/link_reset"); } + + ha->flags.eeh_busy = 0; + pci_cleanup_aer_uncorrect_error_status(pdev); } diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 010e69b29af..371dc895972 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -2292,11 +2292,14 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, uint32_t faddr, left, burst; struct qla_hw_data *ha = vha->hw; + if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) + goto try_fast; if (offset & 0xfff) goto slow_read; if (length < OPTROM_BURST_SIZE) goto slow_read; +try_fast: optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index c482220f7ee..ed36279a33c 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.01-k8" +#define QLA2XXX_VERSION "8.03.01-k10" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index d8927681ec8..c6642423cc6 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) */ req->next_rq->resid_len = scsi_in(cmd)->resid; + scsi_release_buffers(cmd); blk_end_request_all(req, 0); - scsi_release_buffers(cmd); scsi_next_command(cmd); return; } diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index ddfcecd5099..653f22a8deb 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3527,7 +3527,10 @@ fc_bsg_job_timeout(struct request *req) if (!done && i->f->bsg_timeout) { /* call LLDD to abort the i/o as it has timed out */ err = i->f->bsg_timeout(job); - if (err) + if (err == -EAGAIN) { + job->ref_cnt--; + return BLK_EH_RESET_TIMER; + } else if (err) printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " "abort failed with status %d\n", err); } diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 3058bb1aff9..fd7b15be764 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -623,6 +623,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) } break; case INQUIRY: + if (lun >= host->max_lun) { + cmd->result = DID_NO_CONNECT << 16; + done(cmd); + return 0; + } if (id != host->max_id - 1) break; if (!lun && !cmd->device->channel && |