summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c40
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c12
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c46
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c18
-rw-r--r--drivers/scsi/arm/acornscsi-io.S2
-rw-r--r--drivers/scsi/arm/fas216.c4
-rw-r--r--drivers/scsi/ch.c1
-rw-r--r--drivers/scsi/device_handler/Kconfig8
-rw-r--r--drivers/scsi/device_handler/Makefile1
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c446
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c802
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c644
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c348
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c262
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/hosts.c7
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c223
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h44
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c49
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c4
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c2
-rw-r--r--drivers/scsi/ide-scsi.c68
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.h6
-rw-r--r--drivers/scsi/libsas/sas_ata.c16
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_port.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/libsrp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6
-rw-r--r--drivers/scsi/megaraid/mega_common.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c4
-rw-r--r--drivers/scsi/nsp32.c4
-rw-r--r--drivers/scsi/nsp32_debug.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_debug.c2
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/qla1280.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c119
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c55
-rw-r--r--drivers/scsi/scsi_debug.c12
-rw-r--r--drivers/scsi/scsi_devinfo.c6
-rw-r--r--drivers/scsi/scsi_error.c34
-rw-r--r--drivers/scsi/scsi_lib.c55
-rw-r--r--drivers/scsi/scsi_netlink.c8
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c13
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_tgt_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c21
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c8
-rw-r--r--drivers/scsi/sd.c291
-rw-r--r--drivers/scsi/sd.h54
-rw-r--r--drivers/scsi/sd_dif.c538
-rw-r--r--drivers/scsi/ses.c18
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/sun_esp.c1
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/tmscsim.c8
-rw-r--r--drivers/scsi/wd7000.c8
-rw-r--r--drivers/scsi/zalon.c8
91 files changed, 3810 insertions, 1035 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 7045511f9ad..b92c19bb687 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
- Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
+ Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -71,6 +71,10 @@
Add support for 9650SE controllers.
2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
2.26.02.010 - Add support for 9690SA controllers.
+ 2.26.02.011 - Increase max AENs drained to 256.
+ Add MSI support and "use_msi" module parameter.
+ Fix bug in twa_get_param() on 4GB+.
+ Use pci_resource_len() for ioremap().
*/
#include <linux/module.h>
@@ -95,7 +99,7 @@
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.010"
+#define TW_DRIVER_VERSION "2.26.02.011"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
@@ -107,6 +111,10 @@ MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(TW_DRIVER_VERSION);
+static int use_msi = 0;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
+
/* Function prototypes */
static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
@@ -1038,7 +1046,6 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- unsigned long param_value;
void *retval = NULL;
/* Setup the command packet */
@@ -1057,9 +1064,8 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
param->table_id = cpu_to_le16(table_id | 0x8000);
param->parameter_id = cpu_to_le16(parameter_id);
param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
- param_value = tw_dev->generic_buffer_phys[request_id];
- command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(param_value);
+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
/* Post the command packet to the board */
@@ -2000,7 +2006,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
- u32 mem_addr;
+ unsigned long mem_addr, mem_len;
int retval = -ENODEV;
retval = pci_enable_device(pdev);
@@ -2045,13 +2051,16 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
goto out_free_device_extension;
}
- if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
+ if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
mem_addr = pci_resource_start(pdev, 1);
- else
+ mem_len = pci_resource_len(pdev, 1);
+ } else {
mem_addr = pci_resource_start(pdev, 2);
+ mem_len = pci_resource_len(pdev, 2);
+ }
/* Save base address */
- tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
+ tw_dev->base_addr = ioremap(mem_addr, mem_len);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
goto out_release_mem_region;
@@ -2086,7 +2095,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_drvdata(pdev, host);
- printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n",
+ printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
host->host_no, mem_addr, pdev->irq);
printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
host->host_no,
@@ -2097,6 +2106,11 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
+ /* Try to enable MSI */
+ if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
+ !pci_enable_msi(pdev))
+ set_bit(TW_USING_MSI, &tw_dev->flags);
+
/* Now setup the interrupt handler */
retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
if (retval) {
@@ -2120,6 +2134,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
return 0;
out_remove_host:
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_disable_msi(pdev);
scsi_remove_host(host);
out_iounmap:
iounmap(tw_dev->base_addr);
@@ -2151,6 +2167,10 @@ static void twa_remove(struct pci_dev *pdev)
/* Shutdown the card */
__twa_shutdown(tw_dev);
+ /* Disable MSI if enabled */
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_disable_msi(pdev);
+
/* Free IO remapping */
iounmap(tw_dev->base_addr);
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index d14a9479e38..1729a8785fe 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
- Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
+ Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -319,8 +319,8 @@ static twa_message_type twa_error_table[] = {
/* Compatibility defines */
#define TW_9000_ARCH_ID 0x5
-#define TW_CURRENT_DRIVER_SRL 30
-#define TW_CURRENT_DRIVER_BUILD 80
+#define TW_CURRENT_DRIVER_SRL 35
+#define TW_CURRENT_DRIVER_BUILD 0
#define TW_CURRENT_DRIVER_BRANCH 0
/* Phase defines */
@@ -352,8 +352,9 @@ static twa_message_type twa_error_table[] = {
#define TW_MAX_RESET_TRIES 2
#define TW_MAX_CMDS_PER_LUN 254
#define TW_MAX_RESPONSE_DRAIN 256
-#define TW_MAX_AEN_DRAIN 40
+#define TW_MAX_AEN_DRAIN 255
#define TW_IN_RESET 2
+#define TW_USING_MSI 3
#define TW_IN_ATTENTION_LOOP 4
#define TW_MAX_SECTORS 256
#define TW_AEN_WAIT_TIME 1000
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 26be540d1dd..c7f06298bd3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -63,6 +63,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
+ select CRC_T10DIF
---help---
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a8149677de2..72fd5043cfa 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -151,6 +151,8 @@ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
sd_mod-objs := sd.o
+sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
+
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
:= -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 8591585e5cc..218777bfc14 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2278,7 +2278,7 @@ do { \
#define ASC_DBG(lvl, format, arg...) { \
if (asc_dbglvl >= (lvl)) \
printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \
- __FUNCTION__ , ## arg); \
+ __func__ , ## arg); \
}
#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 0899cb61e3d..b5a868d85eb 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -288,20 +288,20 @@ static LIST_HEAD(aha152x_host_list);
#define DO_LOCK(flags) \
do { \
if(spin_is_locked(&QLOCK)) { \
- DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \
+ DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
} \
- DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
+ DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
spin_lock_irqsave(&QLOCK,flags); \
- DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
- QLOCKER=__FUNCTION__; \
+ DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
+ QLOCKER=__func__; \
QLOCKERL=__LINE__; \
} while(0)
#define DO_UNLOCK(flags) \
do { \
- DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \
+ DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
spin_unlock_irqrestore(&QLOCK,flags); \
- DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
+ DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
QLOCKER="(not locked)"; \
QLOCKERL=0; \
} while(0)
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 2ef459e9cda..2863a9d2285 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -39,9 +39,9 @@
#ifdef ASD_ENTER_EXIT
#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \
- __FUNCTION__)
+ __func__)
#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \
- __FUNCTION__)
+ __func__)
#else
#define ENTER
#define EXIT
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 83a78222896..eb9dc3195fd 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1359,7 +1359,7 @@ int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
struct asd_ascb *ascb_list;
if (!phy_mask) {
- asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__);
+ asd_printk("%s called with phy_mask of 0!?\n", __func__);
return 0;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 46643319c52..ca55013b6ae 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -211,7 +211,7 @@ static void asd_form_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
phy->asd_port = port;
}
ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n",
- __FUNCTION__, phy->asd_port->phy_mask, sas_phy->id);
+ __func__, phy->asd_port->phy_mask, sas_phy->id);
asd_update_port_links(asd_ha, phy);
spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
}
@@ -294,7 +294,7 @@ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
GFP_ATOMIC);
if (!cp) {
- asd_printk("%s: out of memory\n", __FUNCTION__);
+ asd_printk("%s: out of memory\n", __func__);
goto out;
}
ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
@@ -446,7 +446,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
struct domain_device *failed_dev = NULL;
ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
- __FUNCTION__, dl->status_block[3]);
+ __func__, dl->status_block[3]);
/*
* Find the task that caused the abort and abort it first.
@@ -474,7 +474,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
if (!failed_dev) {
ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n",
- __FUNCTION__, tc_abort);
+ __func__, tc_abort);
goto out;
}
@@ -502,7 +502,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
conn_handle = *((u16*)(&dl->status_block[1]));
conn_handle = le16_to_cpu(conn_handle);
- ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
+ ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __func__,
dl->status_block[3]);
/* Find the last pending task for the device... */
@@ -522,7 +522,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
if (!last_dev_task) {
ASD_DPRINTK("%s: Device reset for idle device %d?\n",
- __FUNCTION__, conn_handle);
+ __func__, conn_handle);
goto out;
}
@@ -549,10 +549,10 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
goto out;
}
case SIGNAL_NCQ_ERROR:
- ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
+ ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __func__);
goto out;
case CLEAR_NCQ_ERROR:
- ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
+ ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __func__);
goto out;
}
@@ -560,26 +560,26 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
switch (sb_opcode) {
case BYTES_DMAED:
- ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id);
+ ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __func__, phy_id);
asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
break;
case PRIMITIVE_RECVD:
- ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __func__,
phy_id);
asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
break;
case PHY_EVENT:
- ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id);
+ ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __func__, phy_id);
asd_phy_event_tasklet(ascb, dl);
break;
case LINK_RESET_ERROR:
- ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __func__,
phy_id);
asd_link_reset_err_tasklet(ascb, dl, phy_id);
break;
case TIMER_EVENT:
ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
- __FUNCTION__, phy_id);
+ __func__, phy_id);
asd_turn_led(asd_ha, phy_id, 0);
/* the device is gone */
sas_phy_disconnected(sas_phy);
@@ -587,7 +587,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
break;
default:
- ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
phy_id, sb_opcode);
ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
edb, dl->opcode);
@@ -654,7 +654,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
if (status != 0) {
ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
- __FUNCTION__, phy_id, status);
+ __func__, phy_id, status);
goto out;
}
@@ -663,7 +663,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
asd_turn_led(asd_ha, phy_id, 0);
asd_control_led(asd_ha, phy_id, 0);
- ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id);
+ ASD_DPRINTK("%s: disable phy%d\n", __func__, phy_id);
break;
case ENABLE_PHY:
@@ -673,40 +673,40 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
get_lrate_mode(phy, oob_mode);
asd_turn_led(asd_ha, phy_id, 1);
ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
- __FUNCTION__, phy_id,phy->sas_phy.linkrate,
+ __func__, phy_id,phy->sas_phy.linkrate,
phy->sas_phy.iproto);
} else if (oob_status & CURRENT_SPINUP_HOLD) {
asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
asd_turn_led(asd_ha, phy_id, 1);
- ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d, spinup hold\n", __func__,
phy_id);
} else if (oob_status & CURRENT_ERR_MASK) {
asd_turn_led(asd_ha, phy_id, 0);
ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
- __FUNCTION__, phy_id, oob_status);
+ __func__, phy_id, oob_status);
} else if (oob_status & (CURRENT_HOT_PLUG_CNCT
| CURRENT_DEVICE_PRESENT)) {
asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
asd_turn_led(asd_ha, phy_id, 1);
ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
- __FUNCTION__, phy_id);
+ __func__, phy_id);
} else {
asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
asd_turn_led(asd_ha, phy_id, 0);
ASD_DPRINTK("%s: phy%d: no device present: "
"oob_status:0x%x\n",
- __FUNCTION__, phy_id, oob_status);
+ __func__, phy_id, oob_status);
}
break;
case RELEASE_SPINUP_HOLD:
case PHY_NO_OP:
case EXECUTE_HARD_RESET:
- ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __func__,
phy_id, control_phy->sub_func);
/* XXX finish */
break;
default:
- ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __func__,
phy_id, control_phy->sub_func);
break;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 326765c9caf..75d20f72501 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -320,7 +320,7 @@ Again:
case TC_RESUME:
case TC_PARTIAL_SG_LIST:
default:
- ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode);
+ ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
break;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 633ff40c736..d4640ef6d44 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -75,12 +75,12 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
struct done_list_struct *dl)
{
struct tasklet_completion_status *tcs = ascb->uldd_task;
- ASD_DPRINTK("%s: here\n", __FUNCTION__);
+ ASD_DPRINTK("%s: here\n", __func__);
if (!del_timer(&ascb->timer)) {
- ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
+ ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
return;
}
- ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
+ ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
tcs->dl_opcode = dl->opcode;
complete(ascb->completion);
asd_ascb_free(ascb);
@@ -91,7 +91,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
struct asd_ascb *ascb = (void *)data;
struct tasklet_completion_status *tcs = ascb->uldd_task;
- ASD_DPRINTK("%s: here\n", __FUNCTION__);
+ ASD_DPRINTK("%s: here\n", __func__);
tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
complete(ascb->completion);
}
@@ -103,7 +103,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
DECLARE_COMPLETION_ONSTACK(completion); \
DECLARE_TCS(tcs); \
\
- ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
+ ASD_DPRINTK("%s: PRE\n", __func__); \
res = 1; \
ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
if (!ascb) \
@@ -115,12 +115,12 @@ static void asd_clear_nexus_timedout(unsigned long data)
scb->header.opcode = CLEAR_NEXUS
#define CLEAR_NEXUS_POST \
- ASD_DPRINTK("%s: POST\n", __FUNCTION__); \
+ ASD_DPRINTK("%s: POST\n", __func__); \
res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
asd_clear_nexus_timedout); \
if (res) \
goto out_err; \
- ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
+ ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
wait_for_completion(&completion); \
res = tcs.dl_opcode; \
if (res == TC_NO_ERROR) \
@@ -417,7 +417,7 @@ int asd_abort_task(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
res = TMF_RESP_FUNC_COMPLETE;
- ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
+ ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
goto out_done;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -481,7 +481,7 @@ int asd_abort_task(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
res = TMF_RESP_FUNC_COMPLETE;
- ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
+ ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
goto out_done;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
diff --git a/drivers/scsi/arm/acornscsi-io.S b/drivers/scsi/arm/acornscsi-io.S
index 5cebe310526..22171b2110a 100644
--- a/drivers/scsi/arm/acornscsi-io.S
+++ b/drivers/scsi/arm/acornscsi-io.S
@@ -8,7 +8,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#if defined(__APCS_32__)
#define LOADREGS(t,r,l...) ldm##t r, l
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index a715632e19d..47754260228 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -240,7 +240,7 @@ static void __fas216_checkmagic(FAS216_Info *info, const char *func)
panic("scsi memory space corrupted in %s", func);
}
}
-#define fas216_checkmagic(info) __fas216_checkmagic((info), __FUNCTION__)
+#define fas216_checkmagic(info) __fas216_checkmagic((info), __func__)
#else
#define fas216_checkmagic(info)
#endif
@@ -2658,7 +2658,7 @@ int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
fas216_checkmagic(info);
printk("scsi%d.%c: %s: resetting host\n",
- info->host->host_no, '0' + SCpnt->device->id, __FUNCTION__);
+ info->host->host_no, '0' + SCpnt->device->id, __func__);
/*
* Reset the SCSI chip.
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index aa2011b6468..3c257fe0893 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -930,6 +930,7 @@ static int ch_probe(struct device *dev)
if (init)
ch_init_elem(ch);
+ dev_set_drvdata(dev, ch);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
return 0;
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 2adc0f666b6..67070257919 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -30,3 +30,11 @@ config SCSI_DH_EMC
depends on SCSI_DH
help
If you have a EMC CLARiiON select y. Otherwise, say N.
+
+config SCSI_DH_ALUA
+ tristate "SPC-3 ALUA Device Handler (EXPERIMENTAL)"
+ depends on SCSI_DH && EXPERIMENTAL
+ help
+ SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
+ Access (ALUA).
+
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index 35272e93b1c..e1d2ea083e1 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_SCSI_DH) += scsi_dh.o
obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
+obj-$(CONFIG_SCSI_DH_ALUA) += scsi_dh_alua.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index ab6c21cd968..a518f2eff19 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -24,8 +24,16 @@
#include <scsi/scsi_dh.h>
#include "../scsi_priv.h"
+struct scsi_dh_devinfo_list {
+ struct list_head node;
+ char vendor[9];
+ char model[17];
+ struct scsi_device_handler *handler;
+};
+
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
+static LIST_HEAD(scsi_dh_dev_list);
static struct scsi_device_handler *get_device_handler(const char *name)
{
@@ -33,7 +41,7 @@ static struct scsi_device_handler *get_device_handler(const char *name)
spin_lock(&list_lock);
list_for_each_entry(tmp, &scsi_dh_list, list) {
- if (!strcmp(tmp->name, name)) {
+ if (!strncmp(tmp->name, name, strlen(tmp->name))) {
found = tmp;
break;
}
@@ -42,11 +50,307 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
+
+static struct scsi_device_handler *
+scsi_dh_cache_lookup(struct scsi_device *sdev)
+{
+ struct scsi_dh_devinfo_list *tmp;
+ struct scsi_device_handler *found_dh = NULL;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
+ if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
+ !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
+ found_dh = tmp->handler;
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+
+ return found_dh;
+}
+
+static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
+ struct scsi_device *sdev)
+{
+ int i, found = 0;
+
+ for(i = 0; scsi_dh->devlist[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
+ strlen(scsi_dh->devlist[i].vendor)) &&
+ !strncmp(sdev->model, scsi_dh->devlist[i].model,
+ strlen(scsi_dh->devlist[i].model))) {
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
+/*
+ * device_handler_match - Attach a device handler to a device
+ * @scsi_dh - The device handler to match against or NULL
+ * @sdev - SCSI device to be tested against @scsi_dh
+ *
+ * Tests @sdev against the device handler @scsi_dh or against
+ * all registered device_handler if @scsi_dh == NULL.
+ * Returns the found device handler or NULL if not found.
+ */
+static struct scsi_device_handler *
+device_handler_match(struct scsi_device_handler *scsi_dh,
+ struct scsi_device *sdev)
+{
+ struct scsi_device_handler *found_dh = NULL;
+ struct scsi_dh_devinfo_list *tmp;
+
+ found_dh = scsi_dh_cache_lookup(sdev);
+ if (found_dh)
+ return found_dh;
+
+ if (scsi_dh) {
+ if (scsi_dh_handler_lookup(scsi_dh, sdev))
+ found_dh = scsi_dh;
+ } else {
+ struct scsi_device_handler *tmp_dh;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
+ if (scsi_dh_handler_lookup(tmp_dh, sdev))
+ found_dh = tmp_dh;
+ }
+ spin_unlock(&list_lock);
+ }
+
+ if (found_dh) { /* If device is found, add it to the cache */
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (tmp) {
+ strncpy(tmp->vendor, sdev->vendor, 8);
+ strncpy(tmp->model, sdev->model, 16);
+ tmp->vendor[8] = '\0';
+ tmp->model[16] = '\0';
+ tmp->handler = found_dh;
+ spin_lock(&list_lock);
+ list_add(&tmp->node, &scsi_dh_dev_list);
+ spin_unlock(&list_lock);
+ } else {
+ found_dh = NULL;
+ }
+ }
+
+ return found_dh;
+}
+
+/*
+ * scsi_dh_handler_attach - Attach a device handler to a device
+ * @sdev - SCSI device the device handler should attach to
+ * @scsi_dh - The device handler to attach
+ */
+static int scsi_dh_handler_attach(struct scsi_device *sdev,
+ struct scsi_device_handler *scsi_dh)
+{
+ int err = 0;
+
+ if (sdev->scsi_dh_data) {
+ if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
+ err = -EBUSY;
+ } else if (scsi_dh->attach)
+ err = scsi_dh->attach(sdev);
+
+ return err;
+}
+
+/*
+ * scsi_dh_handler_detach - Detach a device handler from a device
+ * @sdev - SCSI device the device handler should be detached from
+ * @scsi_dh - Device handler to be detached
+ *
+ * Detach from a device handler. If a device handler is specified,
+ * only detach if the currently attached handler matches @scsi_dh.
+ */
+static void scsi_dh_handler_detach(struct scsi_device *sdev,
+ struct scsi_device_handler *scsi_dh)
+{
+ if (!sdev->scsi_dh_data)
+ return;
+
+ if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh)
+ return;
+
+ if (!scsi_dh)
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+
+ if (scsi_dh && scsi_dh->detach)
+ scsi_dh->detach(sdev);
+}
+
+/*
+ * Functions for sysfs attribute 'dh_state'
+ */
+static ssize_t
+store_dh_state(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_device_handler *scsi_dh;
+ int err = -EINVAL;
+
+ if (!sdev->scsi_dh_data) {
+ /*
+ * Attach to a device handler
+ */
+ if (!(scsi_dh = get_device_handler(buf)))
+ return err;
+ err = scsi_dh_handler_attach(sdev, scsi_dh);
+ } else {
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+ if (!strncmp(buf, "detach", 6)) {
+ /*
+ * Detach from a device handler
+ */
+ scsi_dh_handler_detach(sdev, scsi_dh);
+ err = 0;
+ } else if (!strncmp(buf, "activate", 8)) {
+ /*
+ * Activate a device handler
+ */
+ if (scsi_dh->activate)
+ err = scsi_dh->activate(sdev);
+ else
+ err = 0;
+ }
+ }
+
+ return err<0?err:count;
+}
+
+static ssize_t
+show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!sdev->scsi_dh_data)
+ return snprintf(buf, 20, "detached\n");
+
+ return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name);
+}
+
+static struct device_attribute scsi_dh_state_attr =
+ __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
+ store_dh_state);
+
+/*
+ * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh
+ */
+static int scsi_dh_sysfs_attr_add(struct device *dev, void *data)
+{
+ struct scsi_device *sdev;
+ int err;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ err = device_create_file(&sdev->sdev_gendev,
+ &scsi_dh_state_attr);
+
+ return 0;
+}
+
+/*
+ * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh
+ */
+static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data)
+{
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ device_remove_file(&sdev->sdev_gendev,
+ &scsi_dh_state_attr);
+
+ return 0;
+}
+
+/*
+ * scsi_dh_notifier - notifier chain callback
+ */
+static int scsi_dh_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct scsi_device *sdev;
+ int err = 0;
+ struct scsi_device_handler *devinfo = NULL;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE) {
+ devinfo = device_handler_match(NULL, sdev);
+ if (!devinfo)
+ goto out;
+
+ err = scsi_dh_handler_attach(sdev, devinfo);
+ if (!err)
+ err = device_create_file(dev, &scsi_dh_state_attr);
+ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+ device_remove_file(dev, &scsi_dh_state_attr);
+ scsi_dh_handler_detach(sdev, NULL);
+ }
+out:
+ return err;
+}
+
+/*
+ * scsi_dh_notifier_add - Callback for scsi_register_device_handler
+ */
static int scsi_dh_notifier_add(struct device *dev, void *data)
{
struct scsi_device_handler *scsi_dh = data;
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ if (!get_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ if (device_handler_match(scsi_dh, sdev))
+ scsi_dh_handler_attach(sdev, scsi_dh);
+
+ put_device(dev);
+
+ return 0;
+}
+
+/*
+ * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler
+ */
+static int scsi_dh_notifier_remove(struct device *dev, void *data)
+{
+ struct scsi_device_handler *scsi_dh = data;
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ if (!get_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ scsi_dh_handler_detach(sdev, scsi_dh);
+
+ put_device(dev);
- scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
return 0;
}
@@ -59,33 +363,19 @@ static int scsi_dh_notifier_add(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
- int ret = -EBUSY;
- struct scsi_device_handler *tmp;
+ if (get_device_handler(scsi_dh->name))
+ return -EBUSY;
- tmp = get_device_handler(scsi_dh->name);
- if (tmp)
- goto done;
-
- ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
-
- bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
spin_lock(&list_lock);
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
+ bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
+ printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
-done:
- return ret;
+ return SCSI_DH_OK;
}
EXPORT_SYMBOL_GPL(scsi_register_device_handler);
-static int scsi_dh_notifier_remove(struct device *dev, void *data)
-{
- struct scsi_device_handler *scsi_dh = data;
-
- scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
- return 0;
-}
-
/*
* scsi_unregister_device_handler - register a device handler personality
* module.
@@ -95,23 +385,26 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
- int ret = -ENODEV;
- struct scsi_device_handler *tmp;
-
- tmp = get_device_handler(scsi_dh->name);
- if (!tmp)
- goto done;
+ struct scsi_dh_devinfo_list *tmp, *pos;
- ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb);
+ if (!get_device_handler(scsi_dh->name))
+ return -ENODEV;
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
- scsi_dh_notifier_remove);
+ scsi_dh_notifier_remove);
+
spin_lock(&list_lock);
list_del(&scsi_dh->list);
+ list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
+ if (pos->handler == scsi_dh) {
+ list_del(&pos->node);
+ kfree(pos);
+ }
+ }
spin_unlock(&list_lock);
+ printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
-done:
- return ret;
+ return SCSI_DH_OK;
}
EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
@@ -157,6 +450,97 @@ int scsi_dh_handler_exist(const char *name)
}
EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
+/*
+ * scsi_dh_handler_attach - Attach device handler
+ * @sdev - sdev the handler should be attached to
+ * @name - name of the handler to attach
+ */
+int scsi_dh_attach(struct request_queue *q, const char *name)
+{
+ unsigned long flags;
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh;
+ int err = 0;
+
+ scsi_dh = get_device_handler(name);
+ if (!scsi_dh)
+ return -EINVAL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ err = -ENODEV;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!err) {
+ err = scsi_dh_handler_attach(sdev, scsi_dh);
+
+ put_device(&sdev->sdev_gendev);
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_attach);
+
+/*
+ * scsi_dh_handler_detach - Detach device handler
+ * @sdev - sdev the handler should be detached from
+ *
+ * This function will detach the device handler only
+ * if the sdev is not part of the internal list, ie
+ * if it has been attached manually.
+ */
+void scsi_dh_detach(struct request_queue *q)
+{
+ unsigned long flags;
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ sdev = NULL;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!sdev)
+ return;
+
+ if (sdev->scsi_dh_data) {
+ /* if sdev is not on internal list, detach */
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+ if (!device_handler_match(scsi_dh, sdev))
+ scsi_dh_handler_detach(sdev, scsi_dh);
+ }
+ put_device(&sdev->sdev_gendev);
+}
+EXPORT_SYMBOL_GPL(scsi_dh_detach);
+
+static struct notifier_block scsi_dh_nb = {
+ .notifier_call = scsi_dh_notifier
+};
+
+static int __init scsi_dh_init(void)
+{
+ int r;
+
+ r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
+
+ if (!r)
+ bus_for_each_dev(&scsi_bus_type, NULL, NULL,
+ scsi_dh_sysfs_attr_add);
+
+ return r;
+}
+
+static void __exit scsi_dh_exit(void)
+{
+ bus_for_each_dev(&scsi_bus_type, NULL, NULL,
+ scsi_dh_sysfs_attr_remove);
+ bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
+}
+
+module_init(scsi_dh_init);
+module_exit(scsi_dh_exit);
+
MODULE_DESCRIPTION("SCSI device handler");
MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
new file mode 100644
index 00000000000..994da56fffe
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -0,0 +1,802 @@
+/*
+ * Generic SCSI-3 ALUA SCSI Device Handler
+ *
+ * Copyright (C) 2007, 2008 Hannes Reinecke, SUSE Linux Products GmbH.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define ALUA_DH_NAME "alua"
+#define ALUA_DH_VER "1.2"
+
+#define TPGS_STATE_OPTIMIZED 0x0
+#define TPGS_STATE_NONOPTIMIZED 0x1
+#define TPGS_STATE_STANDBY 0x2
+#define TPGS_STATE_UNAVAILABLE 0x3
+#define TPGS_STATE_OFFLINE 0xe
+#define TPGS_STATE_TRANSITIONING 0xf
+
+#define TPGS_SUPPORT_NONE 0x00
+#define TPGS_SUPPORT_OPTIMIZED 0x01
+#define TPGS_SUPPORT_NONOPTIMIZED 0x02
+#define TPGS_SUPPORT_STANDBY 0x04
+#define TPGS_SUPPORT_UNAVAILABLE 0x08
+#define TPGS_SUPPORT_OFFLINE 0x40
+#define TPGS_SUPPORT_TRANSITION 0x80
+
+#define TPGS_MODE_UNINITIALIZED -1
+#define TPGS_MODE_NONE 0x0
+#define TPGS_MODE_IMPLICIT 0x1
+#define TPGS_MODE_EXPLICIT 0x2
+
+#define ALUA_INQUIRY_SIZE 36
+#define ALUA_FAILOVER_TIMEOUT (60 * HZ)
+#define ALUA_FAILOVER_RETRIES 5
+
+struct alua_dh_data {
+ int group_id;
+ int rel_port;
+ int tpgs;
+ int state;
+ unsigned char inq[ALUA_INQUIRY_SIZE];
+ unsigned char *buff;
+ int bufflen;
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ int senselen;
+};
+
+#define ALUA_POLICY_SWITCH_CURRENT 0
+#define ALUA_POLICY_SWITCH_ALL 1
+
+static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+ BUG_ON(scsi_dh_data == NULL);
+ return ((struct alua_dh_data *) scsi_dh_data->buf);
+}
+
+static int realloc_buffer(struct alua_dh_data *h, unsigned len)
+{
+ if (h->buff && h->buff != h->inq)
+ kfree(h->buff);
+
+ h->buff = kmalloc(len, GFP_NOIO);
+ if (!h->buff) {
+ h->buff = h->inq;
+ h->bufflen = ALUA_INQUIRY_SIZE;
+ return 1;
+ }
+ h->bufflen = len;
+ return 0;
+}
+
+static struct request *get_alua_req(struct scsi_device *sdev,
+ void *buffer, unsigned buflen, int rw)
+{
+ struct request *rq;
+ struct request_queue *q = sdev->request_queue;
+
+ rq = blk_get_request(q, rw, GFP_NOIO);
+
+ if (!rq) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: blk_get_request failed\n", __func__);
+ return NULL;
+ }
+
+ if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
+ blk_put_request(rq);
+ sdev_printk(KERN_INFO, sdev,
+ "%s: blk_rq_map_kern failed\n", __func__);
+ return NULL;
+ }
+
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+ rq->retries = ALUA_FAILOVER_RETRIES;
+ rq->timeout = ALUA_FAILOVER_TIMEOUT;
+
+ return rq;
+}
+
+/*
+ * submit_std_inquiry - Issue a standard INQUIRY command
+ * @sdev: sdev the command should be send to
+ */
+static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = INQUIRY;
+ rq->cmd[1] = 0;
+ rq->cmd[2] = 0;
+ rq->cmd[4] = ALUA_INQUIRY_SIZE;
+ rq->cmd_len = COMMAND_SIZE(INQUIRY);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: std inquiry failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command
+ * @sdev: sdev the command should be sent to
+ */
+static int submit_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = INQUIRY;
+ rq->cmd[1] = 1;
+ rq->cmd[2] = 0x83;
+ rq->cmd[4] = h->bufflen;
+ rq->cmd_len = COMMAND_SIZE(INQUIRY);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: evpd inquiry failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * submit_rtpg - Issue a REPORT TARGET GROUP STATES command
+ * @sdev: sdev the command should be sent to
+ */
+static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = MAINTENANCE_IN;
+ rq->cmd[1] = MI_REPORT_TARGET_PGS;
+ rq->cmd[6] = (h->bufflen >> 24) & 0xff;
+ rq->cmd[7] = (h->bufflen >> 16) & 0xff;
+ rq->cmd[8] = (h->bufflen >> 8) & 0xff;
+ rq->cmd[9] = h->bufflen & 0xff;
+ rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: rtpg failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * submit_stpg - Issue a SET TARGET GROUP STATES command
+ * @sdev: sdev the command should be sent to
+ *
+ * Currently we're only setting the current target port group state
+ * to 'active/optimized' and let the array firmware figure out
+ * the states of the remaining groups.
+ */
+static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+ int stpg_len = 8;
+
+ /* Prepare the data buffer */
+ memset(h->buff, 0, stpg_len);
+ h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f;
+ h->buff[6] = (h->group_id >> 8) & 0x0f;
+ h->buff[7] = h->group_id & 0x0f;
+
+ rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = MAINTENANCE_OUT;
+ rq->cmd[1] = MO_SET_TARGET_PGS;
+ rq->cmd[6] = (stpg_len >> 24) & 0xff;
+ rq->cmd[7] = (stpg_len >> 16) & 0xff;
+ rq->cmd[8] = (stpg_len >> 8) & 0xff;
+ rq->cmd[9] = stpg_len & 0xff;
+ rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: stpg failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * alua_std_inquiry - Evaluate standard INQUIRY command
+ * @sdev: device to be checked
+ *
+ * Just extract the TPGS setting to find out if ALUA
+ * is supported.
+ */
+static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ int err;
+
+ err = submit_std_inquiry(sdev, h);
+
+ if (err != SCSI_DH_OK)
+ return err;
+
+ /* Check TPGS setting */
+ h->tpgs = (h->inq[5] >> 4) & 0x3;
+ switch (h->tpgs) {
+ case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
+ sdev_printk(KERN_INFO, sdev,
+ "%s: supports implicit and explicit TPGS\n",
+ ALUA_DH_NAME);
+ break;
+ case TPGS_MODE_EXPLICIT:
+ sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n",
+ ALUA_DH_NAME);
+ break;
+ case TPGS_MODE_IMPLICIT:
+ sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n",
+ ALUA_DH_NAME);
+ break;
+ default:
+ h->tpgs = TPGS_MODE_NONE;
+ sdev_printk(KERN_INFO, sdev, "%s: not supported\n",
+ ALUA_DH_NAME);
+ err = SCSI_DH_DEV_UNSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * alua_vpd_inquiry - Evaluate INQUIRY vpd page 0x83
+ * @sdev: device to be checked
+ *
+ * Extract the relative target port and the target port group
+ * descriptor from the list of identificators.
+ */
+static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ int len;
+ unsigned err;
+ unsigned char *d;
+
+ retry:
+ err = submit_vpd_inquiry(sdev, h);
+
+ if (err != SCSI_DH_OK)
+ return err;
+
+ /* Check if vpd page exceeds initial buffer */
+ len = (h->buff[2] << 8) + h->buff[3] + 4;
+ if (len > h->bufflen) {
+ /* Resubmit with the correct length */
+ if (realloc_buffer(h, len)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: kmalloc buffer failed\n",
+ ALUA_DH_NAME);
+ /* Temporary failure, bypass */
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
+ goto retry;
+ }
+
+ /*
+ * Now look for the correct descriptor.
+ */
+ d = h->buff + 4;
+ while (d < h->buff + len) {
+ switch (d[1] & 0xf) {
+ case 0x4:
+ /* Relative target port */
+ h->rel_port = (d[6] << 8) + d[7];
+ break;
+ case 0x5:
+ /* Target port group */
+ h->group_id = (d[6] << 8) + d[7];
+ break;
+ default:
+ break;
+ }
+ d += d[3] + 4;
+ }
+
+ if (h->group_id == -1) {
+ /*
+ * Internal error; TPGS supported but required
+ * VPD identification descriptors not present.
+ * Disable ALUA support
+ */
+ sdev_printk(KERN_INFO, sdev,
+ "%s: No target port descriptors found\n",
+ ALUA_DH_NAME);
+ h->state = TPGS_STATE_OPTIMIZED;
+ h->tpgs = TPGS_MODE_NONE;
+ err = SCSI_DH_DEV_UNSUPP;
+ } else {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x rel port %02x\n",
+ ALUA_DH_NAME, h->group_id, h->rel_port);
+ }
+
+ return err;
+}
+
+static char print_alua_state(int state)
+{
+ switch (state) {
+ case TPGS_STATE_OPTIMIZED:
+ return 'A';
+ case TPGS_STATE_NONOPTIMIZED:
+ return 'N';
+ case TPGS_STATE_STANDBY:
+ return 'S';
+ case TPGS_STATE_UNAVAILABLE:
+ return 'U';
+ case TPGS_STATE_OFFLINE:
+ return 'O';
+ case TPGS_STATE_TRANSITIONING:
+ return 'T';
+ default:
+ return 'X';
+ }
+}
+
+static int alua_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
+{
+ switch (sense_hdr->sense_key) {
+ case NOT_READY:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a)
+ /*
+ * LUN Not Accessible - ALUA state transition
+ */
+ return NEEDS_RETRY;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b)
+ /*
+ * LUN Not Accessible -- Target port in standby state
+ */
+ return SUCCESS;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0c)
+ /*
+ * LUN Not Accessible -- Target port in unavailable state
+ */
+ return SUCCESS;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x12)
+ /*
+ * LUN Not Ready -- Offline
+ */
+ return SUCCESS;
+ break;
+ case UNIT_ATTENTION:
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+ /*
+ * Power On, Reset, or Bus Device Reset, just retry.
+ */
+ return NEEDS_RETRY;
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
+ /*
+ * ALUA state changed
+ */
+ return NEEDS_RETRY;
+ }
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
+ /*
+ * Implicit ALUA state transition failed
+ */
+ return NEEDS_RETRY;
+ }
+ break;
+ }
+
+ return SCSI_RETURN_NOT_HANDLED;
+}
+
+/*
+ * alua_stpg - Evaluate SET TARGET GROUP STATES
+ * @sdev: the device to be evaluated
+ * @state: the new target group state
+ *
+ * Send a SET TARGET GROUP STATES command to the device.
+ * We only have to test here if we should resubmit the command;
+ * any other error is assumed as a failure.
+ */
+static int alua_stpg(struct scsi_device *sdev, int state,
+ struct alua_dh_data *h)
+{
+ struct scsi_sense_hdr sense_hdr;
+ unsigned err;
+ int retry = ALUA_FAILOVER_RETRIES;
+
+ retry:
+ err = submit_stpg(sdev, h);
+ if (err == SCSI_DH_IO && h->senselen > 0) {
+ err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
+ &sense_hdr);
+ if (!err)
+ return SCSI_DH_IO;
+ err = alua_check_sense(sdev, &sense_hdr);
+ if (retry > 0 && err == NEEDS_RETRY) {
+ retry--;
+ goto retry;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "%s: stpg sense code: %02x/%02x/%02x\n",
+ ALUA_DH_NAME, sense_hdr.sense_key,
+ sense_hdr.asc, sense_hdr.ascq);
+ err = SCSI_DH_IO;
+ }
+ if (err == SCSI_DH_OK) {
+ h->state = state;
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x switched to state %c\n",
+ ALUA_DH_NAME, h->group_id,
+ print_alua_state(h->state) );
+ }
+ return err;
+}
+
+/*
+ * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
+ * @sdev: the device to be evaluated.
+ *
+ * Evaluate the Target Port Group State.
+ * Returns SCSI_DH_DEV_OFFLINED if the path is
+ * found to be unuseable.
+ */
+static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct scsi_sense_hdr sense_hdr;
+ int len, k, off, valid_states = 0;
+ char *ucp;
+ unsigned err;
+
+ retry:
+ err = submit_rtpg(sdev, h);
+
+ if (err == SCSI_DH_IO && h->senselen > 0) {
+ err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
+ &sense_hdr);
+ if (!err)
+ return SCSI_DH_IO;
+
+ err = alua_check_sense(sdev, &sense_hdr);
+ if (err == NEEDS_RETRY)
+ goto retry;
+ sdev_printk(KERN_INFO, sdev,
+ "%s: rtpg sense code %02x/%02x/%02x\n",
+ ALUA_DH_NAME, sense_hdr.sense_key,
+ sense_hdr.asc, sense_hdr.ascq);
+ err = SCSI_DH_IO;
+ }
+ if (err != SCSI_DH_OK)
+ return err;
+
+ len = (h->buff[0] << 24) + (h->buff[1] << 16) +
+ (h->buff[2] << 8) + h->buff[3] + 4;
+
+ if (len > h->bufflen) {
+ /* Resubmit with the correct length */
+ if (realloc_buffer(h, len)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: kmalloc buffer failed\n",__func__);
+ /* Temporary failure, bypass */
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
+ goto retry;
+ }
+
+ for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
+ if (h->group_id == (ucp[2] << 8) + ucp[3]) {
+ h->state = ucp[0] & 0x0f;
+ valid_states = ucp[1];
+ }
+ off = 8 + (ucp[7] * 4);
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x state %c supports %c%c%c%c%c%c\n",
+ ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
+ valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+
+ if (h->tpgs & TPGS_MODE_EXPLICIT) {
+ switch (h->state) {
+ case TPGS_STATE_TRANSITIONING:
+ /* State transition, retry */
+ goto retry;
+ break;
+ case TPGS_STATE_OFFLINE:
+ /* Path is offline, fail */
+ err = SCSI_DH_DEV_OFFLINED;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* Only Implicit ALUA support */
+ if (h->state == TPGS_STATE_OPTIMIZED ||
+ h->state == TPGS_STATE_NONOPTIMIZED ||
+ h->state == TPGS_STATE_STANDBY)
+ /* Useable path if active */
+ err = SCSI_DH_OK;
+ else
+ /* Path unuseable for unavailable/offline */
+ err = SCSI_DH_DEV_OFFLINED;
+ }
+ return err;
+}
+
+/*
+ * alua_initialize - Initialize ALUA state
+ * @sdev: the device to be initialized
+ *
+ * For the prep_fn to work correctly we have
+ * to initialize the ALUA state for the device.
+ */
+static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ int err;
+
+ err = alua_std_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+ err = alua_vpd_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+ err = alua_rtpg(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+out:
+ return err;
+}
+
+/*
+ * alua_activate - activate a path
+ * @sdev: device on the path to be activated
+ *
+ * We're currently switching the port group to be activated only and
+ * let the array figure out the rest.
+ * There may be other arrays which require us to switch all port groups
+ * based on a certain policy. But until we actually encounter them it
+ * should be okay.
+ */
+static int alua_activate(struct scsi_device *sdev)
+{
+ struct alua_dh_data *h = get_alua_data(sdev);
+ int err = SCSI_DH_OK;
+
+ if (h->group_id != -1) {
+ err = alua_rtpg(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+ }
+
+ if (h->tpgs == TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
+ err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h);
+
+out:
+ return err;
+}
+
+/*
+ * alua_prep_fn - request callback
+ *
+ * Fail I/O to all paths not in state
+ * active/optimized or active/non-optimized.
+ */
+static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct alua_dh_data *h = get_alua_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->state != TPGS_STATE_OPTIMIZED &&
+ h->state != TPGS_STATE_NONOPTIMIZED) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+static const struct scsi_dh_devlist alua_dev_list[] = {
+ {"HP", "MSA VOLUME" },
+ {"HP", "HSV101" },
+ {"HP", "HSV111" },
+ {"HP", "HSV200" },
+ {"HP", "HSV210" },
+ {"HP", "HSV300" },
+ {"IBM", "2107900" },
+ {"IBM", "2145" },
+ {"Pillar", "Axiom" },
+ {NULL, NULL}
+};
+
+static int alua_bus_attach(struct scsi_device *sdev);
+static void alua_bus_detach(struct scsi_device *sdev);
+
+static struct scsi_device_handler alua_dh = {
+ .name = ALUA_DH_NAME,
+ .module = THIS_MODULE,
+ .devlist = alua_dev_list,
+ .attach = alua_bus_attach,
+ .detach = alua_bus_detach,
+ .prep_fn = alua_prep_fn,
+ .check_sense = alua_check_sense,
+ .activate = alua_activate,
+};
+
+/*
+ * alua_bus_attach - Attach device handler
+ * @sdev: device to be attached to
+ */
+static int alua_bus_attach(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data;
+ struct alua_dh_data *h;
+ unsigned long flags;
+ int err = SCSI_DH_OK;
+
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(*h) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
+ ALUA_DH_NAME);
+ return -ENOMEM;
+ }
+
+ scsi_dh_data->scsi_dh = &alua_dh;
+ h = (struct alua_dh_data *) scsi_dh_data->buf;
+ h->tpgs = TPGS_MODE_UNINITIALIZED;
+ h->state = TPGS_STATE_OPTIMIZED;
+ h->group_id = -1;
+ h->rel_port = -1;
+ h->buff = h->inq;
+ h->bufflen = ALUA_INQUIRY_SIZE;
+
+ err = alua_initialize(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
+
+ if (!try_module_get(THIS_MODULE))
+ goto failed;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ return 0;
+
+failed:
+ kfree(scsi_dh_data);
+ sdev_printk(KERN_ERR, sdev, "%s: not attached\n", ALUA_DH_NAME);
+ return -EINVAL;
+}
+
+/*
+ * alua_bus_detach - Detach device handler
+ * @sdev: device to be detached from
+ */
+static void alua_bus_detach(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data;
+ struct alua_dh_data *h;
+ unsigned long flags;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ h = (struct alua_dh_data *) scsi_dh_data->buf;
+ if (h->buff && h->inq != h->buff)
+ kfree(h->buff);
+ kfree(scsi_dh_data);
+ module_put(THIS_MODULE);
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", ALUA_DH_NAME);
+}
+
+static int __init alua_init(void)
+{
+ int r;
+
+ r = scsi_register_device_handler(&alua_dh);
+ if (r != 0)
+ printk(KERN_ERR "%s: Failed to register scsi device handler",
+ ALUA_DH_NAME);
+ return r;
+}
+
+static void __exit alua_exit(void)
+{
+ scsi_unregister_device_handler(&alua_dh);
+}
+
+module_init(alua_init);
+module_exit(alua_exit);
+
+MODULE_DESCRIPTION("DM Multipath ALUA support");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ALUA_DH_VER);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index f2467e936e5..b9d23e9e9a4 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -25,28 +25,31 @@
#include <scsi/scsi_dh.h>
#include <scsi/scsi_device.h>
-#define CLARIION_NAME "emc_clariion"
+#define CLARIION_NAME "emc"
#define CLARIION_TRESPASS_PAGE 0x22
-#define CLARIION_BUFFER_SIZE 0x80
+#define CLARIION_BUFFER_SIZE 0xFC
#define CLARIION_TIMEOUT (60 * HZ)
#define CLARIION_RETRIES 3
#define CLARIION_UNBOUND_LU -1
+#define CLARIION_SP_A 0
+#define CLARIION_SP_B 1
-static unsigned char long_trespass[] = {
- 0, 0, 0, 0,
- CLARIION_TRESPASS_PAGE, /* Page code */
- 0x09, /* Page length - 2 */
- 0x81, /* Trespass code + Honor reservation bit */
- 0xff, 0xff, /* Trespass target */
- 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
-};
+/* Flags */
+#define CLARIION_SHORT_TRESPASS 1
+#define CLARIION_HONOR_RESERVATIONS 2
-static unsigned char long_trespass_hr[] = {
- 0, 0, 0, 0,
+/* LUN states */
+#define CLARIION_LUN_UNINITIALIZED -1
+#define CLARIION_LUN_UNBOUND 0
+#define CLARIION_LUN_BOUND 1
+#define CLARIION_LUN_OWNED 2
+
+static unsigned char long_trespass[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x09, /* Page length - 2 */
- 0x01, /* Trespass code + Honor reservation bit */
+ 0x01, /* Trespass code */
0xff, 0xff, /* Trespass target */
0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
};
@@ -55,39 +58,56 @@ static unsigned char short_trespass[] = {
0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x02, /* Page length - 2 */
- 0x81, /* Trespass code + Honor reservation bit */
+ 0x01, /* Trespass code */
0xff, /* Trespass target */
};
-static unsigned char short_trespass_hr[] = {
- 0, 0, 0, 0,
- CLARIION_TRESPASS_PAGE, /* Page code */
- 0x02, /* Page length - 2 */
- 0x01, /* Trespass code + Honor reservation bit */
- 0xff, /* Trespass target */
+static const char * lun_state[] =
+{
+ "not bound",
+ "bound",
+ "owned",
};
struct clariion_dh_data {
/*
+ * Flags:
+ * CLARIION_SHORT_TRESPASS
* Use short trespass command (FC-series) or the long version
* (default for AX/CX CLARiiON arrays).
- */
- unsigned short_trespass;
- /*
+ *
+ * CLARIION_HONOR_RESERVATIONS
* Whether or not (default) to honor SCSI reservations when
* initiating a switch-over.
*/
- unsigned hr;
- /* I/O buffer for both MODE_SELECT and INQUIRY commands. */
+ unsigned flags;
+ /*
+ * I/O buffer for both MODE_SELECT and INQUIRY commands.
+ */
char buffer[CLARIION_BUFFER_SIZE];
/*
* SCSI sense buffer for commands -- assumes serial issuance
* and completion sequence of all commands for same multipath.
*/
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
- /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */
+ unsigned int senselen;
+ /*
+ * LUN state
+ */
+ int lun_state;
+ /*
+ * SP Port number
+ */
+ int port;
+ /*
+ * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this
+ * path's mapped LUN
+ */
int default_sp;
- /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */
+ /*
+ * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this
+ * path's mapped LUN
+ */
int current_sp;
};
@@ -102,19 +122,16 @@ static inline struct clariion_dh_data
/*
* Parse MODE_SELECT cmd reply.
*/
-static int trespass_endio(struct scsi_device *sdev, int result)
+static int trespass_endio(struct scsi_device *sdev, char *sense)
{
- int err = SCSI_DH_OK;
+ int err = SCSI_DH_IO;
struct scsi_sense_hdr sshdr;
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
- char *sense = csdev->sense;
- if (status_byte(result) == CHECK_CONDITION &&
- scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
- sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
+ if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
+ sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
"0x%2x, 0x%2x while sending CLARiiON trespass "
- "command.\n", sshdr.sense_key, sshdr.asc,
- sshdr.ascq);
+ "command.\n", CLARIION_NAME, sshdr.sense_key,
+ sshdr.asc, sshdr.ascq);
if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
(sshdr.ascq == 0x00)) {
@@ -122,9 +139,9 @@ static int trespass_endio(struct scsi_device *sdev, int result)
* Array based copy in progress -- do not send
* mode_select or copy will be aborted mid-stream.
*/
- sdev_printk(KERN_INFO, sdev, "Array Based Copy in "
+ sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
"progress while sending CLARiiON trespass "
- "command.\n");
+ "command.\n", CLARIION_NAME);
err = SCSI_DH_DEV_TEMP_BUSY;
} else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
(sshdr.ascq == 0x03)) {
@@ -132,160 +149,153 @@ static int trespass_endio(struct scsi_device *sdev, int result)
* LUN Not Ready - Manual Intervention Required
* indicates in-progress ucode upgrade (NDU).
*/
- sdev_printk(KERN_INFO, sdev, "Detected in-progress "
+ sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
"ucode upgrade NDU operation while sending "
- "CLARiiON trespass command.\n");
+ "CLARiiON trespass command.\n", CLARIION_NAME);
err = SCSI_DH_DEV_TEMP_BUSY;
} else
err = SCSI_DH_DEV_FAILED;
- } else if (result) {
- sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending "
- "CLARiiON trespass command.\n", result);
- err = SCSI_DH_IO;
+ } else {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: failed to send MODE SELECT, no sense available\n",
+ CLARIION_NAME);
}
-
return err;
}
-static int parse_sp_info_reply(struct scsi_device *sdev, int result,
- int *default_sp, int *current_sp, int *new_current_sp)
+static int parse_sp_info_reply(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
{
int err = SCSI_DH_OK;
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
- if (result == 0) {
- /* check for in-progress ucode upgrade (NDU) */
- if (csdev->buffer[48] != 0) {
- sdev_printk(KERN_NOTICE, sdev, "Detected in-progress "
- "ucode upgrade NDU operation while finding "
- "current active SP.");
- err = SCSI_DH_DEV_TEMP_BUSY;
- } else {
- *default_sp = csdev->buffer[5];
-
- if (csdev->buffer[4] == 2)
- /* SP for path is current */
- *current_sp = csdev->buffer[8];
- else {
- if (csdev->buffer[4] == 1)
- /* SP for this path is NOT current */
- if (csdev->buffer[8] == 0)
- *current_sp = 1;
- else
- *current_sp = 0;
- else
- /* unbound LU or LUNZ */
- *current_sp = CLARIION_UNBOUND_LU;
- }
- *new_current_sp = csdev->buffer[8];
- }
- } else {
- struct scsi_sense_hdr sshdr;
-
- err = SCSI_DH_IO;
-
- if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
- &sshdr))
- sdev_printk(KERN_ERR, sdev, "Found valid sense data "
- "0x%2x, 0x%2x, 0x%2x while finding current "
- "active SP.", sshdr.sense_key, sshdr.asc,
- sshdr.ascq);
- else
- sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
- "current active SP.", result);
+ /* check for in-progress ucode upgrade (NDU) */
+ if (csdev->buffer[48] != 0) {
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress "
+ "ucode upgrade NDU operation while finding "
+ "current active SP.", CLARIION_NAME);
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ goto out;
+ }
+ if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) {
+ /* Invalid buffer format */
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: invalid VPD page 0xC0 format\n",
+ CLARIION_NAME);
+ err = SCSI_DH_NOSYS;
+ goto out;
+ }
+ switch (csdev->buffer[28] & 0x0f) {
+ case 6:
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: ALUA failover mode detected\n",
+ CLARIION_NAME);
+ break;
+ case 4:
+ /* Linux failover */
+ break;
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid failover mode %d\n",
+ CLARIION_NAME, csdev->buffer[28] & 0x0f);
+ err = SCSI_DH_NOSYS;
+ goto out;
}
+ csdev->default_sp = csdev->buffer[5];
+ csdev->lun_state = csdev->buffer[4];
+ csdev->current_sp = csdev->buffer[8];
+ csdev->port = csdev->buffer[7];
+
+out:
return err;
}
-static int sp_info_endio(struct scsi_device *sdev, int result,
- int mode_select_sent, int *done)
+#define emc_default_str "FC (Legacy)"
+
+static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer)
{
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
- int err_flags, default_sp, current_sp, new_current_sp;
+ unsigned char len = buffer[4] + 5;
+ char *sp_model = NULL;
+ unsigned char sp_len, serial_len;
+
+ if (len < 160) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid information section length %d\n",
+ CLARIION_NAME, len);
+ /* Check for old FC arrays */
+ if (!strncmp(buffer + 8, "DGC", 3)) {
+ /* Old FC array, not supporting extended information */
+ sp_model = emc_default_str;
+ }
+ goto out;
+ }
- err_flags = parse_sp_info_reply(sdev, result, &default_sp,
- &current_sp, &new_current_sp);
+ /*
+ * Parse extended information for SP model number
+ */
+ serial_len = buffer[160];
+ if (serial_len == 0 || serial_len + 161 > len) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid array serial number length %d\n",
+ CLARIION_NAME, serial_len);
+ goto out;
+ }
+ sp_len = buffer[99];
+ if (sp_len == 0 || serial_len + sp_len + 161 > len) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid model number length %d\n",
+ CLARIION_NAME, sp_len);
+ goto out;
+ }
+ sp_model = &buffer[serial_len + 161];
+ /* Strip whitespace at the end */
+ while (sp_len > 1 && sp_model[sp_len - 1] == ' ')
+ sp_len--;
- if (err_flags != SCSI_DH_OK)
- goto done;
+ sp_model[sp_len] = '\0';
- if (mode_select_sent) {
- csdev->default_sp = default_sp;
- csdev->current_sp = current_sp;
- } else {
- /*
- * Issue the actual module_selec request IFF either
- * (1) we do not know the identity of the current SP OR
- * (2) what we think we know is actually correct.
- */
- if ((current_sp != CLARIION_UNBOUND_LU) &&
- (new_current_sp != current_sp)) {
-
- csdev->default_sp = default_sp;
- csdev->current_sp = current_sp;
-
- sdev_printk(KERN_INFO, sdev, "Ignoring path group "
- "switch-over command for CLARiiON SP%s since "
- " mapped device is already initialized.",
- current_sp ? "B" : "A");
- if (done)
- *done = 1; /* as good as doing it */
- }
- }
-done:
- return err_flags;
+out:
+ return sp_model;
}
/*
-* Get block request for REQ_BLOCK_PC command issued to path. Currently
-* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
-*
-* Uses data and sense buffers in hardware handler context structure and
-* assumes serial servicing of commands, both issuance and completion.
-*/
-static struct request *get_req(struct scsi_device *sdev, int cmd)
+ * Get block request for REQ_BLOCK_PC command issued to path. Currently
+ * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
+ *
+ * Uses data and sense buffers in hardware handler context structure and
+ * assumes serial servicing of commands, both issuance and completion.
+ */
+static struct request *get_req(struct scsi_device *sdev, int cmd,
+ unsigned char *buffer)
{
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
struct request *rq;
- unsigned char *page22;
int len = 0;
rq = blk_get_request(sdev->request_queue,
- (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
+ (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO);
if (!rq) {
sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
return NULL;
}
- memset(&rq->cmd, 0, BLK_MAX_CDB);
+ memset(rq->cmd, 0, BLK_MAX_CDB);
+ rq->cmd_len = COMMAND_SIZE(cmd);
rq->cmd[0] = cmd;
- rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
switch (cmd) {
case MODE_SELECT:
- if (csdev->short_trespass) {
- page22 = csdev->hr ? short_trespass_hr : short_trespass;
- len = sizeof(short_trespass);
- } else {
- page22 = csdev->hr ? long_trespass_hr : long_trespass;
- len = sizeof(long_trespass);
- }
- /*
- * Can't DMA from kernel BSS -- must copy selected trespass
- * command mode page contents to context buffer which is
- * allocated by kmalloc.
- */
- BUG_ON((len > CLARIION_BUFFER_SIZE));
- memcpy(csdev->buffer, page22, len);
+ len = sizeof(short_trespass);
+ rq->cmd_flags |= REQ_RW;
+ rq->cmd[1] = 0x10;
+ break;
+ case MODE_SELECT_10:
+ len = sizeof(long_trespass);
rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
break;
case INQUIRY:
- rq->cmd[1] = 0x1;
- rq->cmd[2] = 0xC0;
len = CLARIION_BUFFER_SIZE;
- memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
+ memset(buffer, 0, len);
break;
default:
BUG_ON(1);
@@ -298,47 +308,94 @@ static struct request *get_req(struct scsi_device *sdev, int cmd)
rq->timeout = CLARIION_TIMEOUT;
rq->retries = CLARIION_RETRIES;
- rq->sense = csdev->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
-
- if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
- len, GFP_ATOMIC)) {
- __blk_put_request(rq->q, rq);
+ if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) {
+ blk_put_request(rq);
return NULL;
}
return rq;
}
-static int send_cmd(struct scsi_device *sdev, int cmd)
+static int send_inquiry_cmd(struct scsi_device *sdev, int page,
+ struct clariion_dh_data *csdev)
{
- struct request *rq = get_req(sdev, cmd);
+ struct request *rq = get_req(sdev, INQUIRY, csdev->buffer);
+ int err;
if (!rq)
return SCSI_DH_RES_TEMP_UNAVAIL;
- return blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+ rq->sense = csdev->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = csdev->senselen = 0;
+
+ rq->cmd[0] = INQUIRY;
+ if (page != 0) {
+ rq->cmd[1] = 1;
+ rq->cmd[2] = page;
+ }
+ err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: failed to send %s INQUIRY: %x\n",
+ CLARIION_NAME, page?"EVPD":"standard",
+ rq->errors);
+ csdev->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+
+ blk_put_request(rq);
+
+ return err;
}
-static int clariion_activate(struct scsi_device *sdev)
+static int send_trespass_cmd(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
{
- int result, done = 0;
+ struct request *rq;
+ unsigned char *page22;
+ int err, len, cmd;
+
+ if (csdev->flags & CLARIION_SHORT_TRESPASS) {
+ page22 = short_trespass;
+ if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
+ /* Set Honor Reservations bit */
+ page22[6] |= 0x80;
+ len = sizeof(short_trespass);
+ cmd = MODE_SELECT;
+ } else {
+ page22 = long_trespass;
+ if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
+ /* Set Honor Reservations bit */
+ page22[10] |= 0x80;
+ len = sizeof(long_trespass);
+ cmd = MODE_SELECT_10;
+ }
+ BUG_ON((len > CLARIION_BUFFER_SIZE));
+ memcpy(csdev->buffer, page22, len);
- result = send_cmd(sdev, INQUIRY);
- result = sp_info_endio(sdev, result, 0, &done);
- if (result || done)
- goto done;
+ rq = get_req(sdev, cmd, csdev->buffer);
+ if (!rq)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
- result = send_cmd(sdev, MODE_SELECT);
- result = trespass_endio(sdev, result);
- if (result)
- goto done;
+ rq->sense = csdev->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = csdev->senselen = 0;
- result = send_cmd(sdev, INQUIRY);
- result = sp_info_endio(sdev, result, 1, NULL);
-done:
- return result;
+ err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+ if (err == -EIO) {
+ if (rq->sense_len) {
+ err = trespass_endio(sdev, csdev->sense);
+ } else {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: failed to send MODE SELECT: %x\n",
+ CLARIION_NAME, rq->errors);
+ }
+ }
+
+ blk_put_request(rq);
+
+ return err;
}
static int clariion_check_sense(struct scsi_device *sdev,
@@ -386,99 +443,215 @@ static int clariion_check_sense(struct scsi_device *sdev,
break;
}
- /* success just means we do not care what scsi-ml does */
- return SUCCESS;
+ return SCSI_RETURN_NOT_HANDLED;
+}
+
+static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct clariion_dh_data *h = get_clariion_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->lun_state != CLARIION_LUN_OWNED) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+static int clariion_std_inquiry(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+{
+ int err;
+ char *sp_model;
+
+ err = send_inquiry_cmd(sdev, 0, csdev);
+ if (err != SCSI_DH_OK && csdev->senselen) {
+ struct scsi_sense_hdr sshdr;
+
+ if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
+ &sshdr)) {
+ sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
+ "%02x/%02x/%02x\n", CLARIION_NAME,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ }
+ err = SCSI_DH_IO;
+ goto out;
+ }
+
+ sp_model = parse_sp_model(sdev, csdev->buffer);
+ if (!sp_model) {
+ err = SCSI_DH_DEV_UNSUPP;
+ goto out;
+ }
+
+ /*
+ * FC Series arrays do not support long trespass
+ */
+ if (!strlen(sp_model) || !strncmp(sp_model, "FC",2))
+ csdev->flags |= CLARIION_SHORT_TRESPASS;
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: detected Clariion %s, flags %x\n",
+ CLARIION_NAME, sp_model, csdev->flags);
+out:
+ return err;
}
-static const struct {
- char *vendor;
- char *model;
-} clariion_dev_list[] = {
+static int clariion_send_inquiry(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+{
+ int err, retry = CLARIION_RETRIES;
+
+retry:
+ err = send_inquiry_cmd(sdev, 0xC0, csdev);
+ if (err != SCSI_DH_OK && csdev->senselen) {
+ struct scsi_sense_hdr sshdr;
+
+ err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
+ &sshdr);
+ if (!err)
+ return SCSI_DH_IO;
+
+ err = clariion_check_sense(sdev, &sshdr);
+ if (retry > 0 && err == NEEDS_RETRY) {
+ retry--;
+ goto retry;
+ }
+ sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
+ "%02x/%02x/%02x\n", CLARIION_NAME,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ err = SCSI_DH_IO;
+ } else {
+ err = parse_sp_info_reply(sdev, csdev);
+ }
+ return err;
+}
+
+static int clariion_activate(struct scsi_device *sdev)
+{
+ struct clariion_dh_data *csdev = get_clariion_data(sdev);
+ int result;
+
+ result = clariion_send_inquiry(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+
+ if (csdev->lun_state == CLARIION_LUN_OWNED)
+ goto done;
+
+ result = send_trespass_cmd(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+ sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n",
+ CLARIION_NAME,
+ csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" );
+
+ /* Update status */
+ result = clariion_send_inquiry(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+
+done:
+ sdev_printk(KERN_INFO, sdev,
+ "%s: at SP %c Port %d (%s, default SP %c)\n",
+ CLARIION_NAME, csdev->current_sp + 'A',
+ csdev->port, lun_state[csdev->lun_state],
+ csdev->default_sp + 'A');
+
+ return result;
+}
+
+static const struct scsi_dh_devlist clariion_dev_list[] = {
{"DGC", "RAID"},
{"DGC", "DISK"},
+ {"DGC", "VRAID"},
{NULL, NULL},
};
-static int clariion_bus_notify(struct notifier_block *, unsigned long, void *);
+static int clariion_bus_attach(struct scsi_device *sdev);
+static void clariion_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler clariion_dh = {
.name = CLARIION_NAME,
.module = THIS_MODULE,
- .nb.notifier_call = clariion_bus_notify,
+ .devlist = clariion_dev_list,
+ .attach = clariion_bus_attach,
+ .detach = clariion_bus_detach,
.check_sense = clariion_check_sense,
.activate = clariion_activate,
+ .prep_fn = clariion_prep_fn,
};
/*
* TODO: need some interface so we can set trespass values
*/
-static int clariion_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static int clariion_bus_attach(struct scsi_device *sdev)
{
- struct device *dev = data;
- struct scsi_device *sdev;
struct scsi_dh_data *scsi_dh_data;
struct clariion_dh_data *h;
- int i, found = 0;
unsigned long flags;
+ int err;
- if (!scsi_is_sdev_device(dev))
- return 0;
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(*h) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
+ CLARIION_NAME);
+ return -ENOMEM;
+ }
- sdev = to_scsi_device(dev);
+ scsi_dh_data->scsi_dh = &clariion_dh;
+ h = (struct clariion_dh_data *) scsi_dh_data->buf;
+ h->lun_state = CLARIION_LUN_UNINITIALIZED;
+ h->default_sp = CLARIION_UNBOUND_LU;
+ h->current_sp = CLARIION_UNBOUND_LU;
- if (action == BUS_NOTIFY_ADD_DEVICE) {
- for (i = 0; clariion_dev_list[i].vendor; i++) {
- if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
- strlen(clariion_dev_list[i].vendor)) &&
- !strncmp(sdev->model, clariion_dev_list[i].model,
- strlen(clariion_dev_list[i].model))) {
- found = 1;
- break;
- }
- }
- if (!found)
- goto out;
-
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(*h) , GFP_KERNEL);
- if (!scsi_dh_data) {
- sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
- CLARIION_NAME);
- goto out;
- }
+ err = clariion_std_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
- scsi_dh_data->scsi_dh = &clariion_dh;
- h = (struct clariion_dh_data *) scsi_dh_data->buf;
- h->default_sp = CLARIION_UNBOUND_LU;
- h->current_sp = CLARIION_UNBOUND_LU;
+ err = clariion_send_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- sdev->scsi_dh_data = scsi_dh_data;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ if (!try_module_get(THIS_MODULE))
+ goto failed;
- sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME);
- try_module_get(THIS_MODULE);
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- } else if (action == BUS_NOTIFY_DEL_DEVICE) {
- if (sdev->scsi_dh_data == NULL ||
- sdev->scsi_dh_data->scsi_dh != &clariion_dh)
- goto out;
+ sdev_printk(KERN_INFO, sdev,
+ "%s: connected to SP %c Port %d (%s, default SP %c)\n",
+ CLARIION_NAME, h->current_sp + 'A',
+ h->port, lun_state[h->lun_state],
+ h->default_sp + 'A');
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- scsi_dh_data = sdev->scsi_dh_data;
- sdev->scsi_dh_data = NULL;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ return 0;
- sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n",
- CLARIION_NAME);
+failed:
+ kfree(scsi_dh_data);
+ sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
+ CLARIION_NAME);
+ return -EINVAL;
+}
- kfree(scsi_dh_data);
- module_put(THIS_MODULE);
- }
+static void clariion_bus_detach(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data;
+ unsigned long flags;
-out:
- return 0;
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n",
+ CLARIION_NAME);
+
+ kfree(scsi_dh_data);
+ module_put(THIS_MODULE);
}
static int __init clariion_init(void)
@@ -487,7 +660,8 @@ static int __init clariion_init(void)
r = scsi_register_device_handler(&clariion_dh);
if (r != 0)
- printk(KERN_ERR "Failed to register scsi device handler.");
+ printk(KERN_ERR "%s: Failed to register scsi device handler.",
+ CLARIION_NAME);
return r;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index ae6be87d6a8..a6a4ef3ad51 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
* Copyright (C) 2006 Mike Christie
+ * Copyright (C) 2008 Hannes Reinecke <hare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,13 +26,18 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
-#define HP_SW_NAME "hp_sw"
+#define HP_SW_NAME "hp_sw"
-#define HP_SW_TIMEOUT (60 * HZ)
-#define HP_SW_RETRIES 3
+#define HP_SW_TIMEOUT (60 * HZ)
+#define HP_SW_RETRIES 3
+
+#define HP_SW_PATH_UNINITIALIZED -1
+#define HP_SW_PATH_ACTIVE 0
+#define HP_SW_PATH_PASSIVE 1
struct hp_sw_dh_data {
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ int path_state;
int retries;
};
@@ -42,51 +48,161 @@ static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
}
-static int hp_sw_done(struct scsi_device *sdev)
+/*
+ * tur_done - Handle TEST UNIT READY return status
+ * @sdev: sdev the command has been sent to
+ * @errors: blk error code
+ *
+ * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path
+ */
+static int tur_done(struct scsi_device *sdev, unsigned char *sense)
{
- struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
struct scsi_sense_hdr sshdr;
- int rc;
-
- sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
+ int ret;
- rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
- if (!rc)
+ ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+ if (!ret) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed, no sense available\n",
+ HP_SW_NAME);
+ ret = SCSI_DH_IO;
goto done;
+ }
switch (sshdr.sense_key) {
+ case UNIT_ATTENTION:
+ ret = SCSI_DH_IMM_RETRY;
+ break;
case NOT_READY:
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
- rc = SCSI_DH_RETRY;
- h->retries++;
+ if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) {
+ /*
+ * LUN not ready - Initialization command required
+ *
+ * This is the passive path
+ */
+ ret = SCSI_DH_DEV_OFFLINED;
break;
}
- /* fall through */
+ /* Fallthrough */
default:
- h->retries++;
- rc = SCSI_DH_IMM_RETRY;
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed, sense %x/%x/%x\n",
+ HP_SW_NAME, sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
+ break;
}
done:
- if (rc == SCSI_DH_OK || rc == SCSI_DH_IO)
- h->retries = 0;
- else if (h->retries > HP_SW_RETRIES) {
- h->retries = 0;
+ return ret;
+}
+
+/*
+ * hp_sw_tur - Send TEST UNIT READY
+ * @sdev: sdev command should be sent to
+ *
+ * Use the TEST UNIT READY command to determine
+ * the path state.
+ */
+static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
+{
+ struct request *req;
+ int ret;
+
+ req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
+ if (!req)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
+
+ req->cmd_type = REQ_TYPE_BLOCK_PC;
+ req->cmd_flags |= REQ_FAILFAST;
+ req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
+ memset(req->cmd, 0, MAX_COMMAND_SIZE);
+ req->cmd[0] = TEST_UNIT_READY;
+ req->timeout = HP_SW_TIMEOUT;
+ req->sense = h->sense;
+ memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ req->sense_len = 0;
+
+retry:
+ ret = blk_execute_rq(req->q, NULL, req, 1);
+ if (ret == -EIO) {
+ if (req->sense_len > 0) {
+ ret = tur_done(sdev, h->sense);
+ } else {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed with %x\n",
+ HP_SW_NAME, req->errors);
+ ret = SCSI_DH_IO;
+ }
+ } else {
+ h->path_state = HP_SW_PATH_ACTIVE;
+ ret = SCSI_DH_OK;
+ }
+ if (ret == SCSI_DH_IMM_RETRY)
+ goto retry;
+ if (ret == SCSI_DH_DEV_OFFLINED) {
+ h->path_state = HP_SW_PATH_PASSIVE;
+ ret = SCSI_DH_OK;
+ }
+
+ blk_put_request(req);
+
+ return ret;
+}
+
+/*
+ * start_done - Handle START STOP UNIT return status
+ * @sdev: sdev the command has been sent to
+ * @errors: blk error code
+ */
+static int start_done(struct scsi_device *sdev, unsigned char *sense)
+{
+ struct scsi_sense_hdr sshdr;
+ int rc;
+
+ rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+ if (!rc) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, "
+ "no sense available\n",
+ HP_SW_NAME);
+ return SCSI_DH_IO;
+ }
+ switch (sshdr.sense_key) {
+ case NOT_READY:
+ if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
+ /*
+ * LUN not ready - manual intervention required
+ *
+ * Switch-over in progress, retry.
+ */
+ rc = SCSI_DH_RETRY;
+ break;
+ }
+ /* fall through */
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, sense %x/%x/%x\n",
+ HP_SW_NAME, sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
rc = SCSI_DH_IO;
}
+
return rc;
}
-static int hp_sw_activate(struct scsi_device *sdev)
+/*
+ * hp_sw_start_stop - Send START STOP UNIT command
+ * @sdev: sdev command should be sent to
+ *
+ * Sending START STOP UNIT activates the SP.
+ */
+static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
{
- struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
struct request *req;
- int ret = SCSI_DH_RES_TEMP_UNAVAIL;
+ int ret, retry;
- req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC);
+ req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
- goto done;
-
- sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
+ return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
@@ -98,95 +214,153 @@ static int hp_sw_activate(struct scsi_device *sdev)
req->sense = h->sense;
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->sense_len = 0;
+ retry = h->retries;
+retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
- if (!ret) /* SUCCESS */
- ret = hp_sw_done(sdev);
- else
+ if (ret == -EIO) {
+ if (req->sense_len > 0) {
+ ret = start_done(sdev, h->sense);
+ } else {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed with %x\n",
+ HP_SW_NAME, req->errors);
+ ret = SCSI_DH_IO;
+ }
+ } else
+ ret = SCSI_DH_OK;
+
+ if (ret == SCSI_DH_RETRY) {
+ if (--retry)
+ goto retry;
ret = SCSI_DH_IO;
-done:
+ }
+
+ blk_put_request(req);
+
+ return ret;
+}
+
+static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->path_state != HP_SW_PATH_ACTIVE) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+/*
+ * hp_sw_activate - Activate a path
+ * @sdev: sdev on the path to be activated
+ *
+ * The HP Active/Passive firmware is pretty simple;
+ * the passive path reports NOT READY with sense codes
+ * 0x04/0x02; a START STOP UNIT command will then
+ * activate the passive path (and deactivate the
+ * previously active one).
+ */
+static int hp_sw_activate(struct scsi_device *sdev)
+{
+ int ret = SCSI_DH_OK;
+ struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+
+ ret = hp_sw_tur(sdev, h);
+
+ if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
+ ret = hp_sw_start_stop(sdev, h);
+ if (ret == SCSI_DH_OK)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: activated path\n",
+ HP_SW_NAME);
+ }
+
return ret;
}
-static const struct {
- char *vendor;
- char *model;
-} hp_sw_dh_data_list[] = {
- {"COMPAQ", "MSA"},
- {"HP", "HSV"},
+static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
+ {"COMPAQ", "MSA1000 VOLUME"},
+ {"COMPAQ", "HSV110"},
+ {"HP", "HSV100"},
{"DEC", "HSG80"},
{NULL, NULL},
};
-static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *);
+static int hp_sw_bus_attach(struct scsi_device *sdev);
+static void hp_sw_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler hp_sw_dh = {
.name = HP_SW_NAME,
.module = THIS_MODULE,
- .nb.notifier_call = hp_sw_bus_notify,
+ .devlist = hp_sw_dh_data_list,
+ .attach = hp_sw_bus_attach,
+ .detach = hp_sw_bus_detach,
.activate = hp_sw_activate,
+ .prep_fn = hp_sw_prep_fn,
};
-static int hp_sw_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static int hp_sw_bus_attach(struct scsi_device *sdev)
{
- struct device *dev = data;
- struct scsi_device *sdev;
struct scsi_dh_data *scsi_dh_data;
- int i, found = 0;
+ struct hp_sw_dh_data *h;
unsigned long flags;
+ int ret;
- if (!scsi_is_sdev_device(dev))
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
+ HP_SW_NAME);
return 0;
+ }
- sdev = to_scsi_device(dev);
-
- if (action == BUS_NOTIFY_ADD_DEVICE) {
- for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
- if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
- strlen(hp_sw_dh_data_list[i].vendor)) &&
- !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
- strlen(hp_sw_dh_data_list[i].model))) {
- found = 1;
- break;
- }
- }
- if (!found)
- goto out;
+ scsi_dh_data->scsi_dh = &hp_sw_dh;
+ h = (struct hp_sw_dh_data *) scsi_dh_data->buf;
+ h->path_state = HP_SW_PATH_UNINITIALIZED;
+ h->retries = HP_SW_RETRIES;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
- if (!scsi_dh_data) {
- sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
- HP_SW_NAME);
- goto out;
- }
+ ret = hp_sw_tur(sdev, h);
+ if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
+ goto failed;
- scsi_dh_data->scsi_dh = &hp_sw_dh;
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- sdev->scsi_dh_data = scsi_dh_data;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- try_module_get(THIS_MODULE);
+ if (!try_module_get(THIS_MODULE))
+ goto failed;
- sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME);
- } else if (action == BUS_NOTIFY_DEL_DEVICE) {
- if (sdev->scsi_dh_data == NULL ||
- sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
- goto out;
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- scsi_dh_data = sdev->scsi_dh_data;
- sdev->scsi_dh_data = NULL;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- module_put(THIS_MODULE);
+ sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
+ HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
+ "active":"passive");
- sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME);
+ return 0;
- kfree(scsi_dh_data);
- }
+failed:
+ kfree(scsi_dh_data);
+ sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
+ HP_SW_NAME);
+ return -EINVAL;
+}
-out:
- return 0;
+static void hp_sw_bus_detach( struct scsi_device *sdev )
+{
+ struct scsi_dh_data *scsi_dh_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ module_put(THIS_MODULE);
+
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME);
+
+ kfree(scsi_dh_data);
}
static int __init hp_sw_init(void)
@@ -202,6 +376,6 @@ static void __exit hp_sw_exit(void)
module_init(hp_sw_init);
module_exit(hp_sw_exit);
-MODULE_DESCRIPTION("HP MSA 1000");
+MODULE_DESCRIPTION("HP Active/Passive driver");
MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index fdf34b0ec6e..e7c7b4ebc1f 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -173,6 +173,11 @@ struct rdac_dh_data {
#define RDAC_STATE_ACTIVE 0
#define RDAC_STATE_PASSIVE 1
unsigned char state;
+
+#define RDAC_LUN_UNOWNED 0
+#define RDAC_LUN_OWNED 1
+#define RDAC_LUN_AVT 2
+ char lun_state;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
union {
struct c2_inquiry c2;
@@ -182,6 +187,13 @@ struct rdac_dh_data {
} inq;
};
+static const char *lun_state[] =
+{
+ "unowned",
+ "owned",
+ "owned (AVT mode)",
+};
+
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
@@ -197,9 +209,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- rq = blk_get_request(q, rw, GFP_KERNEL);
+ rq = blk_get_request(q, rw, GFP_NOIO);
if (!rq) {
sdev_printk(KERN_INFO, sdev,
@@ -207,17 +218,14 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return NULL;
}
- if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
+ if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
blk_put_request(rq);
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_rq_map_kern failed.\n");
return NULL;
}
- memset(&rq->cmd, 0, BLK_MAX_CDB);
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
+ memset(rq->cmd, 0, BLK_MAX_CDB);
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
@@ -227,12 +235,12 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return rq;
}
-static struct request *rdac_failover_get(struct scsi_device *sdev)
+static struct request *rdac_failover_get(struct scsi_device *sdev,
+ struct rdac_dh_data *h)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
- struct rdac_dh_data *h = get_rdac_data(sdev);
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
@@ -277,6 +285,10 @@ static struct request *rdac_failover_get(struct scsi_device *sdev)
}
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = 0;
+
return rq;
}
@@ -321,11 +333,10 @@ done:
}
static int submit_inquiry(struct scsi_device *sdev, int page_code,
- unsigned int len)
+ unsigned int len, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
- struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = get_rdac_req(sdev, &h->inq, len, READ);
@@ -338,59 +349,68 @@ static int submit_inquiry(struct scsi_device *sdev, int page_code,
rq->cmd[2] = page_code;
rq->cmd[4] = len;
rq->cmd_len = COMMAND_SIZE(INQUIRY);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = 0;
+
err = blk_execute_rq(q, NULL, rq, 1);
if (err == -EIO)
err = SCSI_DH_IO;
+
+ blk_put_request(rq);
done:
return err;
}
-static int get_lun(struct scsi_device *sdev)
+static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c8_inquiry *inqp;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry));
+ err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c8;
- h->lun = inqp->lun[7]; /* currently it uses only one byte */
+ if (inqp->page_code != 0xc8)
+ return SCSI_DH_NOSYS;
+ if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
+ inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
+ return SCSI_DH_NOSYS;
+ h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun);
}
return err;
}
-#define RDAC_OWNED 0
-#define RDAC_UNOWNED 1
-#define RDAC_FAILED 2
-static int check_ownership(struct scsi_device *sdev)
+static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c9_inquiry *inqp;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry));
+ err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
if (err == SCSI_DH_OK) {
- err = RDAC_UNOWNED;
inqp = &h->inq.c9;
- /*
- * If in AVT mode or if the path already owns the LUN,
- * return RDAC_OWNED;
- */
- if (((inqp->avte_cvp >> 7) == 0x1) ||
- ((inqp->avte_cvp & 0x1) != 0))
- err = RDAC_OWNED;
- } else
- err = RDAC_FAILED;
+ if ((inqp->avte_cvp >> 7) == 0x1) {
+ /* LUN in AVT mode */
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: AVT mode detected\n",
+ RDAC_NAME);
+ h->lun_state = RDAC_LUN_AVT;
+ } else if ((inqp->avte_cvp & 0x1) != 0) {
+ /* LUN was owned by the controller */
+ h->lun_state = RDAC_LUN_OWNED;
+ }
+ }
+
return err;
}
-static int initialize_controller(struct scsi_device *sdev)
+static int initialize_controller(struct scsi_device *sdev,
+ struct rdac_dh_data *h)
{
int err;
struct c4_inquiry *inqp;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry));
+ err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c4;
h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
@@ -400,13 +420,12 @@ static int initialize_controller(struct scsi_device *sdev)
return err;
}
-static int set_mode_select(struct scsi_device *sdev)
+static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c2_inquiry *inqp;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry));
+ err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c2;
/*
@@ -421,13 +440,13 @@ static int set_mode_select(struct scsi_device *sdev)
return err;
}
-static int mode_select_handle_sense(struct scsi_device *sdev)
+static int mode_select_handle_sense(struct scsi_device *sdev,
+ unsigned char *sensebuf)
{
struct scsi_sense_hdr sense_hdr;
- struct rdac_dh_data *h = get_rdac_data(sdev);
int sense, err = SCSI_DH_IO, ret;
- ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
+ ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (!ret)
goto done;
@@ -451,14 +470,13 @@ done:
return err;
}
-static int send_mode_select(struct scsi_device *sdev)
+static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
- struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_RES_TEMP_UNAVAIL;
- rq = rdac_failover_get(sdev);
+ rq = rdac_failover_get(sdev, h);
if (!rq)
goto done;
@@ -466,9 +484,11 @@ static int send_mode_select(struct scsi_device *sdev)
err = blk_execute_rq(q, NULL, rq, 1);
if (err != SCSI_DH_OK)
- err = mode_select_handle_sense(sdev);
+ err = mode_select_handle_sense(sdev, h->sense);
if (err == SCSI_DH_OK)
h->state = RDAC_STATE_ACTIVE;
+
+ blk_put_request(rq);
done:
return err;
}
@@ -478,38 +498,23 @@ static int rdac_activate(struct scsi_device *sdev)
struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_OK;
- if (h->lun == UNINITIALIZED_LUN) {
- err = get_lun(sdev);
- if (err != SCSI_DH_OK)
- goto done;
- }
-
- err = check_ownership(sdev);
- switch (err) {
- case RDAC_UNOWNED:
- break;
- case RDAC_OWNED:
- err = SCSI_DH_OK;
- goto done;
- case RDAC_FAILED:
- default:
- err = SCSI_DH_IO;
+ err = check_ownership(sdev, h);
+ if (err != SCSI_DH_OK)
goto done;
- }
if (!h->ctlr) {
- err = initialize_controller(sdev);
+ err = initialize_controller(sdev, h);
if (err != SCSI_DH_OK)
goto done;
}
if (h->ctlr->use_ms10 == -1) {
- err = set_mode_select(sdev);
+ err = set_mode_select(sdev, h);
if (err != SCSI_DH_OK)
goto done;
}
-
- err = send_mode_select(sdev);
+ if (h->lun_state == RDAC_LUN_UNOWNED)
+ err = send_mode_select(sdev, h);
done:
return err;
}
@@ -569,10 +574,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
return SCSI_RETURN_NOT_HANDLED;
}
-static const struct {
- char *vendor;
- char *model;
-} rdac_dev_list[] = {
+static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1722"},
{"IBM", "1724"},
{"IBM", "1726"},
@@ -590,89 +592,89 @@ static const struct {
{NULL, NULL},
};
-static int rdac_bus_notify(struct notifier_block *, unsigned long, void *);
+static int rdac_bus_attach(struct scsi_device *sdev);
+static void rdac_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler rdac_dh = {
.name = RDAC_NAME,
.module = THIS_MODULE,
- .nb.notifier_call = rdac_bus_notify,
+ .devlist = rdac_dev_list,
.prep_fn = rdac_prep_fn,
.check_sense = rdac_check_sense,
+ .attach = rdac_bus_attach,
+ .detach = rdac_bus_detach,
.activate = rdac_activate,
};
-/*
- * TODO: need some interface so we can set trespass values
- */
-static int rdac_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static int rdac_bus_attach(struct scsi_device *sdev)
{
- struct device *dev = data;
- struct scsi_device *sdev;
struct scsi_dh_data *scsi_dh_data;
struct rdac_dh_data *h;
- int i, found = 0;
unsigned long flags;
+ int err;
- if (!scsi_is_sdev_device(dev))
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(*h) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
+ RDAC_NAME);
return 0;
+ }
- sdev = to_scsi_device(dev);
-
- if (action == BUS_NOTIFY_ADD_DEVICE) {
- for (i = 0; rdac_dev_list[i].vendor; i++) {
- if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
- strlen(rdac_dev_list[i].vendor)) &&
- !strncmp(sdev->model, rdac_dev_list[i].model,
- strlen(rdac_dev_list[i].model))) {
- found = 1;
- break;
- }
- }
- if (!found)
- goto out;
+ scsi_dh_data->scsi_dh = &rdac_dh;
+ h = (struct rdac_dh_data *) scsi_dh_data->buf;
+ h->lun = UNINITIALIZED_LUN;
+ h->state = RDAC_STATE_ACTIVE;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(*h) , GFP_KERNEL);
- if (!scsi_dh_data) {
- sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
- RDAC_NAME);
- goto out;
- }
+ err = get_lun(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
- scsi_dh_data->scsi_dh = &rdac_dh;
- h = (struct rdac_dh_data *) scsi_dh_data->buf;
- h->lun = UNINITIALIZED_LUN;
- h->state = RDAC_STATE_ACTIVE;
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- sdev->scsi_dh_data = scsi_dh_data;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- try_module_get(THIS_MODULE);
-
- sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME);
-
- } else if (action == BUS_NOTIFY_DEL_DEVICE) {
- if (sdev->scsi_dh_data == NULL ||
- sdev->scsi_dh_data->scsi_dh != &rdac_dh)
- goto out;
-
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- scsi_dh_data = sdev->scsi_dh_data;
- sdev->scsi_dh_data = NULL;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
-
- h = (struct rdac_dh_data *) scsi_dh_data->buf;
- if (h->ctlr)
- kref_put(&h->ctlr->kref, release_controller);
- kfree(scsi_dh_data);
- module_put(THIS_MODULE);
- sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
- }
+ err = check_ownership(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
+
+ if (!try_module_get(THIS_MODULE))
+ goto failed;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: LUN %d (%s)\n",
+ RDAC_NAME, h->lun, lun_state[(int)h->lun_state]);
-out:
return 0;
+
+failed:
+ kfree(scsi_dh_data);
+ sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
+ RDAC_NAME);
+ return -EINVAL;
+}
+
+static void rdac_bus_detach( struct scsi_device *sdev )
+{
+ struct scsi_dh_data *scsi_dh_data;
+ struct rdac_dh_data *h;
+ unsigned long flags;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ h = (struct rdac_dh_data *) scsi_dh_data->buf;
+ if (h->ctlr)
+ kref_put(&h->ctlr->kref, release_controller);
+ kfree(scsi_dh_data);
+ module_put(THIS_MODULE);
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
}
+
+
static int __init rdac_init(void)
{
int r;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 2bc30e32b67..1fe0901e811 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -271,8 +271,8 @@ rebuild_sys_tab:
pHba->initialized = TRUE;
pHba->state &= ~DPTI_STATE_RESET;
if (adpt_sysfs_class) {
- struct device *dev = device_create(adpt_sysfs_class,
- NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit),
+ struct device *dev = device_create_drvdata(adpt_sysfs_class,
+ NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
"dpti%d", pHba->unit);
if (IS_ERR(dev)) {
printk(KERN_WARNING"dpti%d: unable to "
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 35cd892dce0..fed0b02ebc1 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -232,8 +232,8 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
}
if (shost->transportt->create_work_queue) {
- snprintf(shost->work_q_name, KOBJ_NAME_LEN, "scsi_wq_%d",
- shost->host_no);
+ snprintf(shost->work_q_name, sizeof(shost->work_q_name),
+ "scsi_wq_%d", shost->host_no);
shost->work_q = create_singlethread_workqueue(
shost->work_q_name);
if (!shost->work_q) {
@@ -466,7 +466,8 @@ struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
struct device *cdev;
struct Scsi_Host *shost = ERR_PTR(-ENXIO);
- cdev = class_find_device(&shost_class, &hostnum, __scsi_host_match);
+ cdev = class_find_device(&shost_class, NULL, &hostnum,
+ __scsi_host_match);
if (cdev) {
shost = scsi_host_get(class_to_shost(cdev));
put_device(cdev);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index da876d3924b..a48e4990fe1 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
-#include <linux/hdreg.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/div64.h>
@@ -1249,6 +1248,13 @@ static struct pci_device_id hptiop_id_table[] = {
{ PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
{ PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index eb702b96d57..ae560bc04f9 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -521,9 +521,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
{
if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
- scsi_block_requests(vhost->host);
- ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING);
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ }
} else
vhost->reinit = 1;
@@ -854,39 +855,41 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
}
/**
- * __ibmvfc_find_target - Find the specified scsi_target (no locking)
+ * __ibmvfc_get_target - Find the specified scsi_target (no locking)
* @starget: scsi target struct
*
* Return value:
* ibmvfc_target struct / NULL if not found
**/
-static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget)
+static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_host *vhost = shost_priv(shost);
struct ibmvfc_target *tgt;
list_for_each_entry(tgt, &vhost->targets, queue)
- if (tgt->target_id == starget->id)
+ if (tgt->target_id == starget->id) {
+ kref_get(&tgt->kref);
return tgt;
+ }
return NULL;
}
/**
- * ibmvfc_find_target - Find the specified scsi_target
+ * ibmvfc_get_target - Find the specified scsi_target
* @starget: scsi target struct
*
* Return value:
* ibmvfc_target struct / NULL if not found
**/
-static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget)
+static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_target *tgt;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- tgt = __ibmvfc_find_target(starget);
+ tgt = __ibmvfc_get_target(starget);
spin_unlock_irqrestore(shost->host_lock, flags);
return tgt;
}
@@ -963,6 +966,9 @@ static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
case IBMVFC_HALTED:
fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
break;
+ case IBMVFC_NO_CRQ:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
default:
ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
@@ -988,6 +994,17 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
}
/**
+ * ibmvfc_release_tgt - Free memory allocated for a target
+ * @kref: kref struct
+ *
+ **/
+static void ibmvfc_release_tgt(struct kref *kref)
+{
+ struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
+ kfree(tgt);
+}
+
+/**
* ibmvfc_get_starget_node_name - Get SCSI target's node name
* @starget: scsi target struct
*
@@ -996,8 +1013,10 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
**/
static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
{
- struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+ struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
+ if (tgt)
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
@@ -1009,8 +1028,10 @@ static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
**/
static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
{
- struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+ struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
+ if (tgt)
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
@@ -1022,8 +1043,10 @@ static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
**/
static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
{
- struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+ struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
+ if (tgt)
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
@@ -1113,7 +1136,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
login_info->capabilities = IBMVFC_CAN_MIGRATE;
login_info->async.va = vhost->async_crq.msg_token;
- login_info->async.len = vhost->async_crq.size;
+ login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
strncpy(login_info->device_name,
vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
@@ -1404,7 +1427,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
err = cmd_status[index].name;
}
- if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL))
+ if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
return;
if (rsp->flags & FCP_RSP_LEN_VALID)
@@ -2054,7 +2077,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
{
const char *desc = ibmvfc_get_ae_desc(crq->event);
- ibmvfc_log(vhost, 2, "%s event received\n", desc);
+ ibmvfc_log(vhost, 3, "%s event received\n", desc);
switch (crq->event) {
case IBMVFC_AE_LINK_UP:
@@ -2648,17 +2671,6 @@ static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
}
/**
- * ibmvfc_release_tgt - Free memory allocated for a target
- * @kref: kref struct
- *
- **/
-static void ibmvfc_release_tgt(struct kref *kref)
-{
- struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
- kfree(tgt);
-}
-
-/**
* ibmvfc_tgt_prli_done - Completion handler for Process Login
* @evt: ibmvfc event struct
*
@@ -2902,6 +2914,139 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
}
/**
+ * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
+ * @mad: ibmvfc passthru mad struct
+ * @tgt: ibmvfc target struct
+ *
+ * Returns:
+ * 1 if PLOGI needed / 0 if PLOGI not needed
+ **/
+static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
+ struct ibmvfc_target *tgt)
+{
+ if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
+ sizeof(tgt->ids.port_name)))
+ return 1;
+ if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
+ sizeof(tgt->ids.node_name)))
+ return 1;
+ if (mad->fc_iu.response[6] != tgt->scsi_id)
+ return 1;
+ return 0;
+}
+
+/**
+ * ibmvfc_tgt_adisc_done - Completion handler for ADISC
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
+ u32 status = mad->common.status;
+ u8 fc_reason, fc_explain;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "ADISC succeeded\n");
+ if (ibmvfc_adisc_needs_plogi(mad, tgt))
+ tgt->need_login = 1;
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ tgt->need_login = 1;
+ fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
+ fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
+ tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
+ mad->iu.status, mad->iu.error,
+ ibmvfc_get_fc_type(fc_reason), fc_reason,
+ ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
+ break;
+ };
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_init_passthru - Initialize an event struct for FC passthru
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
+
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = 1;
+ mad->common.opcode = IBMVFC_PASSTHRU;
+ mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
+ mad->cmd_ioba.va = (u64)evt->crq.ioba +
+ offsetof(struct ibmvfc_passthru_mad, iu);
+ mad->cmd_ioba.len = sizeof(mad->iu);
+ mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
+ mad->iu.rsp_len = sizeof(mad->fc_iu.response);
+ mad->iu.cmd.va = (u64)evt->crq.ioba +
+ offsetof(struct ibmvfc_passthru_mad, fc_iu) +
+ offsetof(struct ibmvfc_passthru_fc_iu, payload);
+ mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
+ mad->iu.rsp.va = (u64)evt->crq.ioba +
+ offsetof(struct ibmvfc_passthru_mad, fc_iu) +
+ offsetof(struct ibmvfc_passthru_fc_iu, response);
+ mad->iu.rsp.len = sizeof(mad->fc_iu.response);
+}
+
+/**
+ * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_passthru_mad *mad;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+
+ ibmvfc_init_passthru(evt);
+ mad = &evt->iu.passthru;
+ mad->iu.flags = IBMVFC_FC_ELS;
+ mad->iu.scsi_id = tgt->scsi_id;
+
+ mad->fc_iu.payload[0] = IBMVFC_ADISC;
+ memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
+ sizeof(vhost->login_buf->resp.port_name));
+ memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
+ sizeof(vhost->login_buf->resp.node_name));
+ mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent ADISC\n");
+}
+
+/**
* ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
* @evt: ibmvfc event struct
*
@@ -2921,6 +3066,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
tgt->new_scsi_id = rsp->scsi_id;
if (rsp->scsi_id != tgt->scsi_id)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ else
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -3336,6 +3483,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
tgt_dbg(tgt, "rport add succeeded\n");
rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
rport->supported_classes = 0;
+ tgt->target_id = rport->scsi_target_id;
if (tgt->service_parms.class1_parms[0] & 0x80000000)
rport->supported_classes |= FC_COS_CLASS1;
if (tgt->service_parms.class2_parms[0] & 0x80000000)
@@ -3525,7 +3673,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
crq->msg_token = dma_map_single(dev, crq->msgs,
PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(crq->msg_token))
+ if (dma_mapping_error(dev, crq->msg_token))
goto map_failed;
retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
@@ -3618,7 +3766,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
async_q->size * sizeof(*async_q->msgs),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(async_q->msg_token)) {
+ if (dma_mapping_error(dev, async_q->msg_token)) {
dev_err(dev, "Failed to map async queue\n");
goto free_async_crq;
}
@@ -3800,10 +3948,12 @@ static int ibmvfc_remove(struct vio_dev *vdev)
ENTER;
ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
+ ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+ ibmvfc_wait_while_resetting(vhost);
+ ibmvfc_release_crq_queue(vhost);
kthread_stop(vhost->work_thread);
fc_remove_host(vhost->host);
scsi_remove_host(vhost->host);
- ibmvfc_release_crq_queue(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
@@ -3819,6 +3969,20 @@ static int ibmvfc_remove(struct vio_dev *vdev)
return 0;
}
+/**
+ * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
+ * @vdev: vio device struct
+ *
+ * Return value:
+ * Number of bytes the driver will need to DMA map at the same time in
+ * order to perform well.
+ */
+static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
+{
+ unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
+ return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
+}
+
static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
{"fcp", "IBM,vfc-client"},
{ "", "" }
@@ -3829,6 +3993,7 @@ static struct vio_driver ibmvfc_driver = {
.id_table = ibmvfc_device_table,
.probe = ibmvfc_probe,
.remove = ibmvfc_remove,
+ .get_desired_dma = ibmvfc_get_desired_dma,
.driver = {
.name = IBMVFC_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 057f3c01ed6..4bf6e374f07 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
-#define IBMVFC_DRIVER_VERSION "1.0.0"
-#define IBMVFC_DRIVER_DATE "(July 1, 2008)"
+#define IBMVFC_DRIVER_VERSION "1.0.1"
+#define IBMVFC_DRIVER_DATE "(July 11, 2008)"
#define IBMVFC_DEFAULT_TIMEOUT 15
#define IBMVFC_INIT_TIMEOUT 30
@@ -119,6 +119,7 @@ enum ibmvfc_mad_types {
IBMVFC_PROCESS_LOGIN = 0x0008,
IBMVFC_QUERY_TARGET = 0x0010,
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
+ IBMVFC_PASSTHRU = 0x0200,
IBMVFC_TMF_MAD = 0x0100,
};
@@ -439,6 +440,37 @@ struct ibmvfc_cmd {
struct ibmvfc_fcp_rsp rsp;
}__attribute__((packed, aligned (8)));
+struct ibmvfc_passthru_fc_iu {
+ u32 payload[7];
+#define IBMVFC_ADISC 0x52000000
+ u32 response[7];
+};
+
+struct ibmvfc_passthru_iu {
+ u64 task_tag;
+ u32 cmd_len;
+ u32 rsp_len;
+ u16 status;
+ u16 error;
+ u32 flags;
+#define IBMVFC_FC_ELS 0x01
+ u32 cancel_key;
+ u32 reserved;
+ struct srp_direct_buf cmd;
+ struct srp_direct_buf rsp;
+ u64 correlation;
+ u64 scsi_id;
+ u64 tag;
+ u64 reserved2[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_passthru_mad {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf cmd_ioba;
+ struct ibmvfc_passthru_iu iu;
+ struct ibmvfc_passthru_fc_iu fc_iu;
+}__attribute__((packed, aligned (8)));
+
struct ibmvfc_trace_start_entry {
u32 xfer_len;
}__attribute__((packed));
@@ -531,6 +563,7 @@ union ibmvfc_iu {
struct ibmvfc_implicit_logout implicit_logout;
struct ibmvfc_tmf tmf;
struct ibmvfc_cmd cmd;
+ struct ibmvfc_passthru_mad passthru;
}__attribute__((packed, aligned (8)));
enum ibmvfc_target_action {
@@ -656,6 +689,9 @@ struct ibmvfc_host {
#define tgt_dbg(t, fmt, ...) \
DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
+#define tgt_info(t, fmt, ...) \
+ dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+
#define tgt_err(t, fmt, ...) \
dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
@@ -668,8 +704,8 @@ struct ibmvfc_host {
dev_err((vhost)->dev, ##__VA_ARGS__); \
} while (0)
-#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__))
-#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__))
+#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __func__))
+#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __func__))
#ifdef CONFIG_SCSI_IBMVFC_TRACE
#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 5d23368a1bc..6b24b9cdb04 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -72,6 +72,7 @@
#include <linux/delay.h>
#include <asm/firmware.h>
#include <asm/vio.h>
+#include <asm/firmware.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -426,8 +427,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
SG_ALL * sizeof(struct srp_direct_buf),
&evt_struct->ext_list_token, 0);
if (!evt_struct->ext_list) {
- sdev_printk(KERN_ERR, cmd->device,
- "Can't allocate memory for indirect table\n");
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ sdev_printk(KERN_ERR, cmd->device,
+ "Can't allocate memory "
+ "for indirect table\n");
return 0;
}
}
@@ -743,7 +746,9 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
srp_cmd->lun = ((u64) lun) << 48;
if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
- sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n");
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ sdev_printk(KERN_ERR, cmnd->device,
+ "couldn't convert cmd to srp_cmd\n");
free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -854,8 +859,11 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(req->buffer)) {
- dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n");
+ if (dma_mapping_error(hostdata->dev, req->buffer)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(hostdata->dev,
+ "Unable to map request_buffer for "
+ "adapter_info!\n");
free_event_struct(&hostdata->pool, evt_struct);
return;
}
@@ -1399,8 +1407,10 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(host_config->buffer)) {
- dev_err(hostdata->dev, "dma_mapping error getting host config\n");
+ if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(hostdata->dev,
+ "dma_mapping error getting host config\n");
free_event_struct(&hostdata->pool, evt_struct);
return -1;
}
@@ -1604,7 +1614,7 @@ static struct scsi_host_template driver_template = {
.eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
.slave_configure = ibmvscsi_slave_configure,
.change_queue_depth = ibmvscsi_change_queue_depth,
- .cmd_per_lun = 16,
+ .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
.this_id = -1,
.sg_tablesize = SG_ALL,
@@ -1613,6 +1623,26 @@ static struct scsi_host_template driver_template = {
};
/**
+ * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
+ *
+ * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
+ *
+ * Return value:
+ * Number of bytes of IO data the driver will need to perform well.
+ */
+static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
+{
+ /* iu_storage data allocated in initialize_event_pool */
+ unsigned long desired_io = max_requests * sizeof(union viosrp_iu);
+
+ /* add io space for sg data */
+ desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT *
+ IBMVSCSI_CMDS_PER_LUN_DEFAULT);
+
+ return desired_io;
+}
+
+/**
* Called by bus code for each adapter
*/
static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -1641,7 +1671,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
hostdata->host = host;
hostdata->dev = dev;
atomic_set(&hostdata->request_limit, -1);
- hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
+ hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests);
if (rc != 0 && rc != H_RESOURCE) {
@@ -1735,6 +1765,7 @@ static struct vio_driver ibmvscsi_driver = {
.id_table = ibmvscsi_device_table,
.probe = ibmvscsi_probe,
.remove = ibmvscsi_remove,
+ .get_desired_dma = ibmvscsi_get_desired_dma,
.driver = {
.name = "ibmvscsi",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 46e850e302c..2d4339d5e16 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -45,6 +45,8 @@ struct Scsi_Host;
#define MAX_INDIRECT_BUFS 10
#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
+#define IBMVSCSI_CMDS_PER_LUN_DEFAULT 16
+#define IBMVSCSI_MAX_SECTORS_DEFAULT 256 /* 32 * 8 = default max I/O 32 pages */
#define IBMVSCSI_MAX_CMDS_PER_LUN 64
/* ------------------------------------------------------------
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 3b9514c8f1f..2a5b29d1217 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -55,7 +55,7 @@
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
- printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
+ printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
@@ -564,7 +564,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(queue->msg_token))
+ if (dma_mapping_error(target->dev, queue->msg_token))
goto map_failed;
err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 182146100dc..462a8574dad 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -253,7 +253,7 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(queue->msg_token))
+ if (dma_mapping_error(hostdata->dev, queue->msg_token))
goto map_failed;
gather_partition_info();
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f843c1383a4..461331d3dc4 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -84,7 +84,6 @@ typedef struct ide_scsi_obj {
struct Scsi_Host *host;
struct ide_atapi_pc *pc; /* Current packet command */
- unsigned long flags; /* Status/Action flags */
unsigned long transform; /* SCSI cmd translation layer */
unsigned long log; /* log flags */
} idescsi_scsi_t;
@@ -102,16 +101,23 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk)
mutex_lock(&idescsi_ref_mutex);
scsi = ide_scsi_g(disk);
- if (scsi)
- scsi_host_get(scsi->host);
+ if (scsi) {
+ if (ide_device_get(scsi->drive))
+ scsi = NULL;
+ else
+ scsi_host_get(scsi->host);
+ }
mutex_unlock(&idescsi_ref_mutex);
return scsi;
}
static void ide_scsi_put(struct ide_scsi_obj *scsi)
{
+ ide_drive_t *drive = scsi->drive;
+
mutex_lock(&idescsi_ref_mutex);
scsi_host_put(scsi->host);
+ ide_device_put(drive);
mutex_unlock(&idescsi_ref_mutex);
}
@@ -126,23 +132,14 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
}
/*
- * Per ATAPI device status bits.
- */
-#define IDESCSI_DRQ_INTERRUPT 0 /* DRQ interrupt device */
-
-/*
- * ide-scsi requests.
- */
-#define IDESCSI_PC_RQ 90
-
-/*
* PIO data transfer routine using the scatter gather table.
*/
static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
unsigned int bcount, int write)
{
ide_hwif_t *hwif = drive->hwif;
- xfer_func_t *xf = write ? hwif->output_data : hwif->input_data;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
char *buf;
int count;
@@ -211,15 +208,15 @@ static int idescsi_check_condition(ide_drive_t *drive,
/* stuff a sense request in front of our current request */
pc = kzalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC);
- rq = kmalloc(sizeof(struct request), GFP_ATOMIC);
+ rq = blk_get_request(drive->queue, READ, GFP_ATOMIC);
buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC);
if (!pc || !rq || !buf) {
kfree(buf);
- kfree(rq);
+ if (rq)
+ blk_put_request(rq);
kfree(pc);
return -ENOMEM;
}
- blk_rq_init(NULL, rq);
rq->special = (char *) pc;
pc->rq = rq;
pc->buf = buf;
@@ -228,7 +225,6 @@ static int idescsi_check_condition(ide_drive_t *drive,
rq->cmd_type = REQ_TYPE_SENSE;
rq->cmd_flags |= REQ_PREEMPT;
pc->timeout = jiffies + WAIT_READY;
- pc->callback = ide_scsi_callback;
/* NOTE! Save the failed packet command in "rq->buffer" */
rq->buffer = (void *) failed_cmd->special;
pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd;
@@ -237,6 +233,8 @@ static int idescsi_check_condition(ide_drive_t *drive,
ide_scsi_hex_dump(pc->c, 6);
}
rq->rq_disk = scsi->disk;
+ rq->ref_count++;
+ memcpy(rq->cmd, pc->c, 12);
ide_do_drive_cmd(drive, rq);
return 0;
}
@@ -246,10 +244,9 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
{
ide_hwif_t *hwif = drive->hwif;
- if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
+ if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
- hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE,
- hwif->io_ports.command_addr);
+ hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
rq->errors++;
@@ -283,7 +280,7 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
SCSI_SENSE_BUFFERSIZE);
kfree(pc->buf);
kfree(pc);
- kfree(rq);
+ blk_put_request(rq);
pc = opc;
rq = pc->rq;
pc->scsi_cmd->result = (CHECK_CONDITION << 1) |
@@ -314,7 +311,7 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
pc->done(pc->scsi_cmd);
spin_unlock_irqrestore(host->host_lock, flags);
kfree(pc);
- kfree(rq);
+ blk_put_request(rq);
scsi->pc = NULL;
return 0;
}
@@ -421,10 +418,6 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
if (blk_sense_request(rq) || blk_special_request(rq)) {
struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special;
- idescsi_scsi_t *scsi = drive_to_idescsi(drive);
-
- if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags))
- pc->flags |= PC_FLAG_DRQ_INTERRUPT;
if (drive->using_dma && !idescsi_map_sg(drive, pc))
pc->flags |= PC_FLAG_DMA_OK;
@@ -460,11 +453,14 @@ static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
{
if (drive->id && (drive->id->config & 0x0060) == 0x20)
- set_bit (IDESCSI_DRQ_INTERRUPT, &scsi->flags);
+ set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform);
#if IDESCSI_DEBUG_LOG
set_bit(IDESCSI_LOG_CMD, &scsi->log);
#endif /* IDESCSI_DEBUG_LOG */
+
+ drive->pc_callback = ide_scsi_callback;
+
idescsi_add_settings(drive);
}
@@ -589,6 +585,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
ide_drive_t *drive = scsi->drive;
struct request *rq = NULL;
struct ide_atapi_pc *pc = NULL;
+ int write = cmd->sc_data_direction == DMA_TO_DEVICE;
if (!drive) {
scmd_printk (KERN_ERR, cmd, "drive not present\n");
@@ -596,7 +593,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
}
scsi = drive_to_idescsi(drive);
pc = kmalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC);
- rq = kmalloc(sizeof(struct request), GFP_ATOMIC);
+ rq = blk_get_request(drive->queue, write, GFP_ATOMIC);
if (rq == NULL || pc == NULL) {
printk (KERN_ERR "ide-scsi: %s: out of memory\n", drive->name);
goto abort;
@@ -616,7 +613,6 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
pc->scsi_cmd = cmd;
pc->done = done;
pc->timeout = jiffies + cmd->timeout_per_command;
- pc->callback = ide_scsi_callback;
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
@@ -627,16 +623,18 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
}
}
- blk_rq_init(NULL, rq);
rq->special = (char *) pc;
rq->cmd_type = REQ_TYPE_SPECIAL;
spin_unlock_irq(host->host_lock);
+ rq->ref_count++;
+ memcpy(rq->cmd, pc->c, 12);
blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL);
spin_lock_irq(host->host_lock);
return 0;
abort:
kfree (pc);
- kfree (rq);
+ if (rq)
+ blk_put_request(rq);
cmd->result = DID_ERROR << 16;
done(cmd);
return 0;
@@ -684,7 +682,9 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
if (blk_sense_request(scsi->pc->rq))
kfree(scsi->pc->buf);
- kfree(scsi->pc->rq);
+ /* we need to call blk_put_request twice. */
+ blk_put_request(scsi->pc->rq);
+ blk_put_request(scsi->pc->rq);
kfree(scsi->pc);
scsi->pc = NULL;
@@ -736,7 +736,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
kfree(scsi->pc->buf);
kfree(scsi->pc);
scsi->pc = NULL;
- kfree(req);
+ blk_put_request(req);
/* now nuke the drive queue */
while ((req = elv_next_request(drive->queue))) {
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index f97d172844b..c2a9a13d788 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -163,7 +163,7 @@ static int imm_proc_info(struct Scsi_Host *host, char *buffer, char **start,
#if IMM_DEBUG > 0
#define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\
- y, __FUNCTION__, __LINE__); imm_fail_func(x,y);
+ y, __func__, __LINE__); imm_fail_func(x,y);
static inline void
imm_fail_func(imm_struct *dev, int error_code)
#else
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index d93156671e9..4871dd1f258 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1403,10 +1403,10 @@ struct ipr_ucode_image_header {
}
#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
- __FILE__, __FUNCTION__, __LINE__)
+ __FILE__, __func__, __LINE__)
-#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __FUNCTION__))
-#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __FUNCTION__))
+#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __func__))
+#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __func__))
#define ipr_err_separator \
ipr_err("----------------------------------------------------------\n")
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 744f06d04a3..48ee8c7f5bd 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -74,7 +74,7 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
SAS_DPRINTK("%s: Saw error %d. What to do?\n",
- __FUNCTION__, ts->stat);
+ __func__, ts->stat);
return AC_ERR_OTHER;
case SAS_ABORTED_TASK:
@@ -115,7 +115,7 @@ static void sas_ata_task_done(struct sas_task *task)
} else if (stat->stat != SAM_STAT_GOOD) {
ac = sas_to_ata_err(stat);
if (ac) {
- SAS_DPRINTK("%s: SAS error %x\n", __FUNCTION__,
+ SAS_DPRINTK("%s: SAS error %x\n", __func__,
stat->stat);
/* We saw a SAS error. Send a vague error. */
qc->err_mask = ac;
@@ -244,20 +244,20 @@ static void sas_ata_phy_reset(struct ata_port *ap)
res = i->dft->lldd_I_T_nexus_reset(dev);
if (res != TMF_RESP_FUNC_COMPLETE)
- SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__);
+ SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
switch (dev->sata_dev.command_set) {
case ATA_COMMAND_SET:
- SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__);
+ SAS_DPRINTK("%s: Found ATA device.\n", __func__);
ap->link.device[0].class = ATA_DEV_ATA;
break;
case ATAPI_COMMAND_SET:
- SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__);
+ SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
ap->link.device[0].class = ATA_DEV_ATAPI;
break;
default:
SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
- __FUNCTION__,
+ __func__,
dev->sata_dev.command_set);
ap->link.device[0].class = ATA_DEV_UNKNOWN;
break;
@@ -299,7 +299,7 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
{
struct domain_device *dev = ap->private_data;
- SAS_DPRINTK("STUB %s\n", __FUNCTION__);
+ SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
case SCR_STATUS:
dev->sata_dev.sstatus = val;
@@ -324,7 +324,7 @@ static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
{
struct domain_device *dev = ap->private_data;
- SAS_DPRINTK("STUB %s\n", __FUNCTION__);
+ SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
case SCR_STATUS:
*val = dev->sata_dev.sstatus;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index aefd865a578..3da02e43678 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -121,7 +121,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
break;
} else {
SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
- "status 0x%x\n", __FUNCTION__,
+ "status 0x%x\n", __func__,
SAS_ADDR(dev->sas_addr),
task->task_status.resp,
task->task_status.stat);
@@ -1279,7 +1279,7 @@ static int sas_configure_present(struct domain_device *dev, int phy_id,
goto out;
} else if (res != SMP_RESP_FUNC_ACC) {
SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
- "result 0x%x\n", __FUNCTION__,
+ "result 0x%x\n", __func__,
SAS_ADDR(dev->sas_addr), phy_id, i, res);
goto out;
}
@@ -1901,7 +1901,7 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (!rsp) {
printk("%s: space for a smp response is missing\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
@@ -1914,20 +1914,20 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (type != SAS_EDGE_EXPANDER_DEVICE &&
type != SAS_FANOUT_EXPANDER_DEVICE) {
printk("%s: can we send a smp request to a device?\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
dev = sas_find_dev_by_rphy(rphy);
if (!dev) {
- printk("%s: fail to find a domain_device?\n", __FUNCTION__);
+ printk("%s: fail to find a domain_device?\n", __func__);
return -EINVAL;
}
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk("%s: multiple segments req %u %u, rsp %u %u\n",
- __FUNCTION__, req->bio->bi_vcnt, req->data_len,
+ __func__, req->bio->bi_vcnt, req->data_len,
rsp->bio->bi_vcnt, rsp->data_len);
return -EINVAL;
}
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 39ae68a3b0e..139935a121b 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -50,7 +50,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
sas_deform_port(phy);
else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
- __FUNCTION__, phy->id, phy->port->id,
+ __func__, phy->id, phy->port->id,
phy->port->num_phys);
return;
}
@@ -78,7 +78,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (i >= sas_ha->num_phys) {
printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
- __FUNCTION__);
+ __func__);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
return;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 601ec5b6a7f..a8e3ef30907 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -343,7 +343,7 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
flags);
SAS_DPRINTK("%s: task 0x%p aborted from "
"task_queue\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_IS_ABORTED;
}
}
@@ -351,13 +351,13 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
}
for (i = 0; i < 5; i++) {
- SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
+ SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
+ SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
return TASK_IS_DONE;
}
@@ -365,24 +365,24 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
if (res == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("%s: task 0x%p is aborted\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_IS_ABORTED;
} else if (si->dft->lldd_query_task) {
SAS_DPRINTK("%s: querying task 0x%p\n",
- __FUNCTION__, task);
+ __func__, task);
res = si->dft->lldd_query_task(task);
switch (res) {
case TMF_RESP_FUNC_SUCC:
SAS_DPRINTK("%s: task 0x%p at LU\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_IS_AT_LU;
case TMF_RESP_FUNC_COMPLETE:
SAS_DPRINTK("%s: task 0x%p not at LU\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_IS_NOT_AT_LU;
case TMF_RESP_FUNC_FAILED:
SAS_DPRINTK("%s: task 0x%p failed to abort\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_ABORT_FAILED;
}
@@ -545,7 +545,7 @@ Again:
if (need_reset) {
SAS_DPRINTK("%s: task 0x%p requests reset\n",
- __FUNCTION__, task);
+ __func__, task);
goto reset;
}
@@ -556,13 +556,13 @@ Again:
switch (res) {
case TASK_IS_DONE:
- SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
+ SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
- __FUNCTION__, task);
+ __func__, task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_AT_LU:
@@ -633,7 +633,7 @@ Again:
}
return list_empty(work_q);
clear_q:
- SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
+ SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
list_for_each_entry_safe(cmd, n, work_q, eh_entry)
sas_eh_finish_cmd(cmd);
@@ -650,7 +650,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
spin_unlock_irqrestore(shost->host_lock, flags);
- SAS_DPRINTK("Enter %s\n", __FUNCTION__);
+ SAS_DPRINTK("Enter %s\n", __func__);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism)
@@ -669,7 +669,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
out:
scsi_eh_flush_done_q(&ha->eh_done_q);
- SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
+ SAS_DPRINTK("--- Exit %s\n", __func__);
return;
}
@@ -990,7 +990,7 @@ int __sas_task_abort(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__,
+ SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
task);
return 0;
}
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 6d6a76e65a6..15e2d132e8b 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -39,7 +39,7 @@ enum srp_task_attributes {
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
- printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
+ printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5b6e5395c8e..d51a2a4b43e 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2083,7 +2083,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
- __FUNCTION__, i, LPFC_IOCB_LIST_CNT);
+ __func__, i, LPFC_IOCB_LIST_CNT);
error = -ENOMEM;
goto out_free_iocbq;
}
@@ -2093,7 +2093,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
kfree (iocbq_entry);
printk(KERN_ERR "%s: failed to allocate IOTAG. "
"Unloading driver.\n",
- __FUNCTION__);
+ __func__);
error = -ENOMEM;
goto out_free_iocbq;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c94da4f2b8a..1bcebbd3dfa 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -341,7 +341,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
printk(KERN_ERR "%s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d",
- __FUNCTION__, phba->cfg_sg_seg_cnt,
+ __func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
scsi_dma_unmap(scsi_cmnd);
return 1;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f40aa7b905f..50fe0764673 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -219,7 +219,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_IOCB_LOGENTRY_CN:
case CMD_IOCB_LOGENTRY_ASYNC_CN:
printk("%s - Unhandled SLI-3 Command x%x\n",
- __FUNCTION__, iocb_cmnd);
+ __func__, iocb_cmnd);
type = LPFC_UNKNOWN_IOCB;
break;
default:
@@ -1715,7 +1715,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
rspiocbp = __lpfc_sli_get_iocbq(phba);
if (rspiocbp == NULL) {
printk(KERN_ERR "%s: out of buffers! Failing "
- "completion.\n", __FUNCTION__);
+ "completion.\n", __func__);
break;
}
@@ -3793,7 +3793,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
break;
default:
printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
- __FUNCTION__, ctx_cmd);
+ __func__, ctx_cmd);
break;
}
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index f62ed468ada..5ead1283a84 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -265,7 +265,7 @@ typedef struct {
#define ASSERT(expression) \
if (!(expression)) { \
ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \
- #expression, __FILE__, __LINE__, __FUNCTION__); \
+ #expression, __FILE__, __LINE__, __func__); \
}
#else
#define ASSERT(expression)
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 70a0f11f48b..805bb61dde1 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -458,7 +458,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (adapter == NULL) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__));
+ "megaraid: out of memory, %s %d.\n", __func__, __LINE__));
goto out_probe_one;
}
@@ -1002,7 +1002,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
}
@@ -1030,7 +1030,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_common_mbox;
@@ -1052,7 +1052,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
if (adapter->kscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_ibuf;
}
@@ -1060,7 +1060,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
// memory allocation for our command packets
if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_scb_list;
}
@@ -2981,7 +2981,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
@@ -3508,7 +3508,7 @@ megaraid_cmm_register(adapter_t *adapter)
if (adapter->uscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
}
@@ -3879,7 +3879,7 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
!raid_dev->sysfs_buffer) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
rval = -ENOMEM;
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index ac3b280c2a7..f680561d2c6 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -929,7 +929,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
!adapter->pthru_dma_pool) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid cmm: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid cmm: out of memory, %s %d\n", __func__,
__LINE__));
rval = (-ENOMEM);
@@ -957,7 +957,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
con_log(CL_ANN, (KERN_WARNING
"megaraid cmm: out of memory, %s %d\n",
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
rval = (-ENOMEM);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 7fed3537215..edf9fdb3cb3 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -299,9 +299,9 @@ static struct scsi_host_template nsp32_template = {
#else
# define NSP32_DEBUG_MASK 0xffffff
# define nsp32_msg(type, args...) \
- nsp32_message (__FUNCTION__, __LINE__, (type), args)
+ nsp32_message (__func__, __LINE__, (type), args)
# define nsp32_dbg(mask, args...) \
- nsp32_dmessage(__FUNCTION__, __LINE__, (mask), args)
+ nsp32_dmessage(__func__, __LINE__, (mask), args)
#endif
#define NSP32_DEBUG_QUEUECOMMAND BIT(0)
diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c
index ef3c59cbcff..2fb3fb58858 100644
--- a/drivers/scsi/nsp32_debug.c
+++ b/drivers/scsi/nsp32_debug.c
@@ -88,7 +88,7 @@ static void print_commandk (unsigned char *command)
int i,s;
// printk(KERN_DEBUG);
print_opcodek(command[0]);
- /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/
+ /*printk(KERN_DEBUG "%s ", __func__);*/
if ((command[0] >> 5) == 6 ||
(command[0] >> 5) == 7 ) {
s = 12; /* vender specific */
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 5082ca3c687..a221b6ef9fa 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -107,9 +107,9 @@ static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
#else
# define NSP_DEBUG_MASK 0xffffff
# define nsp_msg(type, args...) \
- nsp_cs_message (__FUNCTION__, __LINE__, (type), args)
+ nsp_cs_message (__func__, __LINE__, (type), args)
# define nsp_dbg(mask, args...) \
- nsp_cs_dmessage(__FUNCTION__, __LINE__, (mask), args)
+ nsp_cs_dmessage(__func__, __LINE__, (mask), args)
#endif
#define NSP_DEBUG_QUEUECOMMAND BIT(0)
diff --git a/drivers/scsi/pcmcia/nsp_debug.c b/drivers/scsi/pcmcia/nsp_debug.c
index 2f75fe6e35a..3c6ef64fcbf 100644
--- a/drivers/scsi/pcmcia/nsp_debug.c
+++ b/drivers/scsi/pcmcia/nsp_debug.c
@@ -90,7 +90,7 @@ static void print_commandk (unsigned char *command)
int i, s;
printk(KERN_DEBUG);
print_opcodek(command[0]);
- /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/
+ /*printk(KERN_DEBUG "%s ", __func__);*/
if ((command[0] >> 5) == 6 ||
(command[0] >> 5) == 7 ) {
s = 12; /* vender specific */
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index f655ae320b4..8aa0bd987e2 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -171,7 +171,7 @@ static int device_check(ppa_struct *dev);
#if PPA_DEBUG > 0
#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
- y, __FUNCTION__, __LINE__); ppa_fail_func(x,y);
+ y, __func__, __LINE__); ppa_fail_func(x,y);
static inline void ppa_fail_func(ppa_struct *dev, int error_code)
#else
static inline void ppa_fail(ppa_struct *dev, int error_code)
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 3754ab87f89..37f9ba0cd79 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1695,7 +1695,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
dprintk(1, "%s: DMA RISC code (%i) words\n",
- __FUNCTION__, risc_code_size);
+ __func__, risc_code_size);
num = 0;
while (risc_code_size > 0) {
@@ -1721,7 +1721,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
- __FUNCTION__, mb[0],
+ __func__, mb[0],
(void *)(long)ha->request_dma,
mb[6], mb[7], mb[2], mb[3]);
err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
@@ -1753,10 +1753,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
if (tbuf[i] != sp[i] && warn++ < 10) {
printk(KERN_ERR "%s: FW compare error @ "
"byte(0x%x) loop#=%x\n",
- __FUNCTION__, i, num);
+ __func__, i, num);
printk(KERN_ERR "%s: FWbyte=%x "
"FWfromChip=%x\n",
- __FUNCTION__, sp[i], tbuf[i]);
+ __func__, sp[i], tbuf[i]);
/*break; */
}
}
@@ -1781,7 +1781,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
int err;
dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
- __FUNCTION__);
+ __func__);
/* Verify checksum of loaded RISC code. */
mb[0] = MBC_VERIFY_CHECKSUM;
@@ -1794,7 +1794,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
}
/* Start firmware execution. */
- dprintk(1, "%s: start firmware running.\n", __FUNCTION__);
+ dprintk(1, "%s: start firmware running.\n", __func__);
mb[0] = MBC_EXECUTE_FIRMWARE;
mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8dd88fc1244..a319a20ed44 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -8,6 +8,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
+#include <linux/delay.h>
static int qla24xx_vport_disable(struct fc_vport *, bool);
@@ -20,18 +21,12 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
- char *rbuf = (char *)ha->fw_dump;
if (ha->fw_dump_reading == 0)
return 0;
- if (off > ha->fw_dump_len)
- return 0;
- if (off + count > ha->fw_dump_len)
- count = ha->fw_dump_len - off;
- memcpy(buf, &rbuf[off], count);
-
- return (count);
+ return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+ ha->fw_dump_len);
}
static ssize_t
@@ -94,20 +89,13 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
- int size = ha->nvram_size;
- char *nvram_cache = ha->nvram;
- if (!capable(CAP_SYS_ADMIN) || off > size || count == 0)
+ if (!capable(CAP_SYS_ADMIN))
return 0;
- if (off + count > size) {
- size -= off;
- count = size;
- }
/* Read NVRAM data from cache. */
- memcpy(buf, &nvram_cache[off], count);
-
- return count;
+ return memory_read_from_buffer(buf, count, &off, ha->nvram,
+ ha->nvram_size);
}
static ssize_t
@@ -175,14 +163,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
if (ha->optrom_state != QLA_SREADING)
return 0;
- if (off > ha->optrom_region_size)
- return 0;
- if (off + count > ha->optrom_region_size)
- count = ha->optrom_region_size - off;
-
- memcpy(buf, &ha->optrom_buffer[off], count);
- return count;
+ return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+ ha->optrom_region_size);
}
static ssize_t
@@ -374,20 +357,12 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
- int size = ha->vpd_size;
- char *vpd_cache = ha->vpd;
- if (!capable(CAP_SYS_ADMIN) || off > size || count == 0)
+ if (!capable(CAP_SYS_ADMIN))
return 0;
- if (off + count > size) {
- size -= off;
- count = size;
- }
/* Read NVRAM data from cache. */
- memcpy(buf, &vpd_cache[off], count);
-
- return count;
+ return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
}
static ssize_t
@@ -557,8 +532,10 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
uint32_t sn;
- if (IS_FWI2_CAPABLE(ha))
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (IS_FWI2_CAPABLE(ha)) {
+ qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE);
+ return snprintf(buf, PAGE_SIZE, "%s\n", buf);
+ }
sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
@@ -809,6 +786,16 @@ qla2x00_optrom_fw_version_show(struct device *dev,
ha->fw_revision[3]);
}
+static ssize_t
+qla2x00_total_isp_aborts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ha->qla_stats.total_isp_aborts);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -831,6 +818,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
qla2x00_optrom_fcode_version_show, NULL);
static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
NULL);
+static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
+ NULL);
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -849,6 +838,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_optrom_efi_version,
&dev_attr_optrom_fcode_version,
&dev_attr_optrom_fw_version,
+ &dev_attr_total_isp_aborts,
NULL,
};
@@ -972,26 +962,39 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
}
static void
-qla2x00_get_rport_loss_tmo(struct fc_rport *rport)
+qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
- struct Scsi_Host *host = rport_to_shost(rport);
- scsi_qla_host_t *ha = shost_priv(host);
-
- rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
}
static void
-qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct Scsi_Host *host = rport_to_shost(rport);
- scsi_qla_host_t *ha = shost_priv(host);
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
+ qla2x00_abort_fcport_cmds(fcport);
+
+ /*
+ * Transport has effectively 'deleted' the rport, clear
+ * all local references.
+ */
+ spin_lock_irq(host->host_lock);
+ fcport->rport = NULL;
+ *((fc_port_t **)rport->dd_data) = NULL;
+ spin_unlock_irq(host->host_lock);
+}
- if (timeout)
- ha->port_down_retry_count = timeout;
- else
- ha->port_down_retry_count = 1;
+static void
+qla2x00_terminate_rport_io(struct fc_rport *rport)
+{
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
- rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+ qla2x00_abort_fcport_cmds(fcport);
+ scsi_target_unblock(&rport->dev);
}
static int
@@ -1045,6 +1048,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
if (IS_FWI2_CAPABLE(ha)) {
+ pfc_host_stat->lip_count = stats->lip_cnt;
pfc_host_stat->tx_frames = stats->tx_frames;
pfc_host_stat->rx_frames = stats->rx_frames;
pfc_host_stat->dumped_frames = stats->dumped_frames;
@@ -1173,17 +1177,16 @@ vport_create_failed_2:
static int
qla24xx_vport_delete(struct fc_vport *fc_vport)
{
- scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
scsi_qla_host_t *vha = fc_vport->dd_data;
+ scsi_qla_host_t *pha = to_qla_parent(vha);
+
+ while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
+ test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags))
+ msleep(1000);
qla24xx_disable_vp(vha);
qla24xx_deallocate_vp_id(vha);
- mutex_lock(&ha->vport_lock);
- ha->cur_vport_count--;
- clear_bit(vha->vp_idx, ha->vp_idx_map);
- mutex_unlock(&ha->vport_lock);
-
kfree(vha->node_name);
kfree(vha->port_name);
@@ -1248,11 +1251,12 @@ struct fc_function_template qla2xxx_transport_functions = {
.get_starget_port_id = qla2x00_get_starget_port_id,
.show_starget_port_id = 1,
- .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.issue_fc_host_lip = qla2x00_issue_lip,
+ .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
+ .terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
.vport_create = qla24xx_vport_create,
@@ -1291,11 +1295,12 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.get_starget_port_id = qla2x00_get_starget_port_id,
.show_starget_port_id = 1,
- .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.issue_fc_host_lip = qla2x00_issue_lip,
+ .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
+ .terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
};
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cbef785765c..510ba64bc28 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -216,7 +216,7 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
static int
qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
- uint16_t ram_words, void **nxt)
+ uint32_t ram_words, void **nxt)
{
int rval;
uint32_t cnt, stat, timer, words, idx;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 8dd600013bd..6da31ba9440 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -864,7 +864,8 @@ struct link_statistics {
uint32_t prim_seq_err_cnt;
uint32_t inval_xmit_word_cnt;
uint32_t inval_crc_cnt;
- uint32_t unused1[0x1b];
+ uint32_t lip_cnt;
+ uint32_t unused1[0x1a];
uint32_t tx_frames;
uint32_t rx_frames;
uint32_t dumped_frames;
@@ -1544,7 +1545,6 @@ typedef struct fc_port {
int login_retry;
atomic_t port_down_timer;
- spinlock_t rport_lock;
struct fc_rport *rport, *drport;
u32 supported_classes;
@@ -2155,6 +2155,10 @@ struct qla_chip_state_84xx {
uint32_t gold_fw_version;
};
+struct qla_statistics {
+ uint32_t total_isp_aborts;
+};
+
/*
* Linux Host Adapter structure
*/
@@ -2166,7 +2170,6 @@ typedef struct scsi_qla_host {
struct pci_dev *pdev;
unsigned long host_no;
- unsigned long instance;
volatile struct {
uint32_t init_done :1;
@@ -2515,7 +2518,7 @@ typedef struct scsi_qla_host {
uint8_t model_number[16+1];
#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
- char *model_desc;
+ char model_desc[80];
uint8_t adapter_id[16+1];
uint8_t *node_name;
@@ -2596,6 +2599,7 @@ typedef struct scsi_qla_host {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
+ struct qla_statistics qla_stats;
} scsi_qla_host_t;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9b4bebee687..0b156735e9a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -62,7 +62,7 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xqfullrampup;
-extern int num_hosts;
+extern int ql2xiidmaenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -71,6 +71,8 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
uint16_t, uint16_t);
+extern void qla2x00_abort_fcport_cmds(fc_port_t *);
+
/*
* Global Functions in qla_mid.c source file.
*/
@@ -312,6 +314,7 @@ extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
uint16_t, uint16_t);
extern void qla2xxx_get_flash_info(scsi_qla_host_t *);
+extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
/*
* Global Function Prototypes in qla_dbg.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4cb80b476c8..c2a4bfbcb05 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1661,6 +1661,12 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
{
int rval;
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ DEBUG2(printk("scsi(%ld): FDMI unsupported on "
+ "ISP2100/ISP2200.\n", ha->host_no));
+ return QLA_SUCCESS;
+ }
+
rval = qla2x00_mgmt_svr_login(ha);
if (rval)
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bbbc5a632a1..601a6b29750 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -334,6 +334,8 @@ static int
qla2x00_isp_firmware(scsi_qla_host_t *ha)
{
int rval;
+ uint16_t loop_id, topo, sw_cap;
+ uint8_t domain, area, al_pa;
/* Assume loading risc code */
rval = QLA_FUNCTION_FAILED;
@@ -345,6 +347,11 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
/* Verify checksum of loaded RISC code. */
rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address);
+ if (rval == QLA_SUCCESS) {
+ /* And, verify we are not in ROM code. */
+ rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa,
+ &area, &domain, &topo, &sw_cap);
+ }
}
if (rval) {
@@ -722,7 +729,7 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
/* Perform RISC reset. */
qla24xx_reset_risc(ha);
- ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
+ ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length;
rval = qla2x00_mbx_reg_test(ha);
if (rval) {
@@ -768,42 +775,16 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
- /* Allocate memory for Extended Trace Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
- if (!tc) {
- qla_printk(KERN_WARNING, ha, "Unable to allocate "
- "(%d KB) for EFT.\n", EFT_SIZE / 1024);
- goto cont_alloc;
- }
-
- memset(tc, 0, EFT_SIZE);
- rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
- if (rval) {
- qla_printk(KERN_WARNING, ha, "Unable to initialize "
- "EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
- tc_dma);
- goto cont_alloc;
- }
-
- qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
- EFT_SIZE / 1024);
-
- eft_size = EFT_SIZE;
- ha->eft_dma = tc_dma;
- ha->eft = tc;
-
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha))
- goto cont_alloc;
+ goto try_eft;
tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
GFP_KERNEL);
if (!tc) {
qla_printk(KERN_WARNING, ha, "Unable to allocate "
"(%d KB) for FCE.\n", FCE_SIZE / 1024);
- goto cont_alloc;
+ goto try_eft;
}
memset(tc, 0, FCE_SIZE);
@@ -815,7 +796,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
tc_dma);
ha->flags.fce_enabled = 0;
- goto cont_alloc;
+ goto try_eft;
}
qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
@@ -825,6 +806,32 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
+try_eft:
+ /* Allocate memory for Extended Trace Buffer. */
+ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ qla_printk(KERN_WARNING, ha, "Unable to allocate "
+ "(%d KB) for EFT.\n", EFT_SIZE / 1024);
+ goto cont_alloc;
+ }
+
+ memset(tc, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha, "Unable to initialize "
+ "EFT (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
+ tc_dma);
+ goto cont_alloc;
+ }
+
+ qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
+ EFT_SIZE / 1024);
+
+ eft_size = EFT_SIZE;
+ ha->eft_dma = tc_dma;
+ ha->eft = tc;
}
cont_alloc:
req_q_size = ha->request_q_length * sizeof(request_t);
@@ -1501,18 +1508,25 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
index = (ha->pdev->subsystem_device & 0xff);
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES)
- ha->model_desc = qla2x00_model_name[index * 2 + 1];
+ strncpy(ha->model_desc,
+ qla2x00_model_name[index * 2 + 1],
+ sizeof(ha->model_desc) - 1);
} else {
index = (ha->pdev->subsystem_device & 0xff);
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES) {
strcpy(ha->model_number,
qla2x00_model_name[index * 2]);
- ha->model_desc = qla2x00_model_name[index * 2 + 1];
+ strncpy(ha->model_desc,
+ qla2x00_model_name[index * 2 + 1],
+ sizeof(ha->model_desc) - 1);
} else {
strcpy(ha->model_number, def);
}
}
+ if (IS_FWI2_CAPABLE(ha))
+ qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc,
+ sizeof(ha->model_desc));
}
/* On sparc systems, obtain port and node WWN from firmware
@@ -1864,12 +1878,11 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
- unsigned long flags;
- spin_lock_irqsave(&fcport->rport_lock, flags);
+ spin_lock_irq(fcport->ha->host->host_lock);
rport = fcport->drport;
fcport->drport = NULL;
- spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ spin_unlock_irq(fcport->ha->host->host_lock);
if (rport)
fc_remote_port_delete(rport);
}
@@ -1898,7 +1911,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
atomic_set(&fcport->state, FCS_UNCONFIGURED);
fcport->flags = FCF_RLC_SUPPORT;
fcport->supported_classes = FC_COS_UNSPECIFIED;
- spin_lock_init(&fcport->rport_lock);
return fcport;
}
@@ -2007,8 +2019,10 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
- if (test_bit(RSCN_UPDATE, &save_flags))
+ if (test_bit(RSCN_UPDATE, &save_flags)) {
+ ha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &ha->dpc_flags);
+ }
}
return (rval);
@@ -2243,28 +2257,24 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
{
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport;
- unsigned long flags;
if (fcport->drport)
qla2x00_rport_del(fcport);
- if (fcport->rport)
- return;
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
- rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+ fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
if (!rport) {
qla_printk(KERN_WARNING, ha,
"Unable to allocate fc remote port!\n");
return;
}
- spin_lock_irqsave(&fcport->rport_lock, flags);
- fcport->rport = rport;
+ spin_lock_irq(fcport->ha->host->host_lock);
*((fc_port_t **)rport->dd_data) = fcport;
- spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ spin_unlock_irq(fcport->ha->host->host_lock);
rport->supported_classes = fcport->supported_classes;
@@ -2565,7 +2575,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
} else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) {
kfree(swl);
swl = NULL;
- } else if (qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) {
+ } else if (ql2xiidmaenable &&
+ qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) {
qla2x00_gpsc(ha, swl);
}
}
@@ -3220,7 +3231,8 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
/* Go with deferred removal of rport references. */
list_for_each_entry(fcport, &ha->fcports, list)
- if (fcport->drport)
+ if (fcport->drport &&
+ atomic_read(&fcport->state) != FCS_UNCONFIGURED)
qla2x00_rport_del(fcport);
}
@@ -3243,6 +3255,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
if (ha->flags.online) {
ha->flags.online = 0;
clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ ha->qla_stats.total_isp_aborts++;
qla_printk(KERN_INFO, ha,
"Performing ISP error recovery - ha= %p.\n", ha);
@@ -3283,17 +3296,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
ha->isp_abort_cnt = 0;
clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
- if (ha->eft) {
- memset(ha->eft, 0, EFT_SIZE);
- rval = qla2x00_enable_eft_trace(ha,
- ha->eft_dma, EFT_NUM_BUFFERS);
- if (rval) {
- qla_printk(KERN_WARNING, ha,
- "Unable to reinitialize EFT "
- "(%d).\n", rval);
- }
- }
-
if (ha->fce) {
ha->flags.fce_enabled = 1;
memset(ha->fce, 0,
@@ -3308,6 +3310,17 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
ha->flags.fce_enabled = 0;
}
}
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(ha,
+ ha->eft_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize EFT "
+ "(%d).\n", rval);
+ }
+ }
} else { /* failed the ISP abort */
ha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
@@ -4026,8 +4039,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
ret = qla2x00_stop_firmware(ha);
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
retries ; retries--) {
- qla2x00_reset_chip(ha);
- if (qla2x00_chip_diag(ha) != QLA_SUCCESS)
+ ha->isp_ops->reset_chip(ha);
+ if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS)
continue;
if (qla2x00_setup_chip(ha) != QLA_SUCCESS)
continue;
@@ -4049,7 +4062,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
rval = qla2x00_fw_ready(ha->parent);
if (rval == QLA_SUCCESS) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
- qla2x00_marker(ha->parent, 0, 0, MK_SYNC_ALL);
+ qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
}
ha->flags.management_server_logged_in = 0;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 5489d502467..d57669aa461 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -454,10 +454,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
{
int ret;
unsigned long flags = 0;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
ret = __qla2x00_marker(ha, loop_id, lun, type);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
return (ret);
}
@@ -672,7 +673,7 @@ qla24xx_start_scsi(srb_t *sp)
{
int ret, nseg;
unsigned long flags;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *ha, *pha;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
uint32_t index;
@@ -686,6 +687,7 @@ qla24xx_start_scsi(srb_t *sp)
/* Setup device pointers. */
ret = 0;
ha = sp->ha;
+ pha = to_qla_parent(ha);
reg = &ha->iobase->isp24;
cmd = sp->cmd;
/* So we know we haven't pci_map'ed anything yet */
@@ -700,7 +702,7 @@ qla24xx_start_scsi(srb_t *sp)
}
/* Acquire ring specific lock */
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
/* Check for room in outstanding command list. */
handle = ha->current_outstanding_cmd;
@@ -795,14 +797,14 @@ qla24xx_start_scsi(srb_t *sp)
ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
qla24xx_process_response_queue(ha);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
return QLA_SUCCESS;
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ec63b79f900..874d802edb7 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -542,10 +542,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
case MBA_PORT_UPDATE: /* Port database update */
- /* Only handle SCNs for our Vport index. */
- if (ha->parent && ha->vp_idx != (mb[3] & 0xff))
- break;
-
/*
* If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
* event etc. earlier indicating loop is down) then process
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 250d2f60439..bc90d6b8d0a 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -918,6 +918,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
rval = qla2x00_mailbox_command(ha, mcp);
if (mcp->mb[0] == MBS_COMMAND_ERROR)
rval = QLA_COMMAND_ERROR;
+ else if (mcp->mb[0] == MBS_INVALID_COMMAND)
+ rval = QLA_INVALID_COMMAND;
/* Return data. */
*id = mcp->mb[1];
@@ -2161,17 +2163,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
struct abort_entry_24xx *abt;
dma_addr_t abt_dma;
uint32_t handle;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
fcport = sp->fcport;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
- if (ha->outstanding_cmds[handle] == sp)
+ if (pha->outstanding_cmds[handle] == sp)
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
if (handle == MAX_OUTSTANDING_COMMANDS) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 62a3ad6e8ec..50baf6a1d67 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -43,6 +43,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
set_bit(vp_id, ha->vp_idx_map);
ha->num_vhosts++;
+ ha->cur_vport_count++;
vha->vp_idx = vp_id;
list_add_tail(&vha->vp_list, &ha->vp_list);
mutex_unlock(&ha->vport_lock);
@@ -58,6 +59,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
mutex_lock(&ha->vport_lock);
vp_id = vha->vp_idx;
ha->num_vhosts--;
+ ha->cur_vport_count--;
clear_bit(vp_id, ha->vp_idx_map);
list_del(&vha->vp_list);
mutex_unlock(&ha->vport_lock);
@@ -103,8 +105,8 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
"loop_id=0x%04x :%x\n",
vha->host_no, fcport->loop_id, fcport->vp_idx));
- atomic_set(&fcport->state, FCS_DEVICE_DEAD);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ atomic_set(&fcport->state, FCS_UNCONFIGURED);
}
}
@@ -276,7 +278,8 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
clear_bit(RESET_ACTIVE, &vha->dpc_flags);
}
- if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ if (atomic_read(&vha->vp_state) == VP_ACTIVE &&
+ test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
qla2x00_loop_resync(vha);
clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
@@ -390,7 +393,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->parent = ha;
vha->fc_vport = fc_vport;
vha->device_flags = 0;
- vha->instance = num_hosts;
vha->vp_idx = qla24xx_allocate_vp_id(vha);
if (vha->vp_idx > ha->max_npiv_vports) {
DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
@@ -428,7 +430,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
host->max_cmd_len = MAX_CMDSZ;
host->max_channel = MAX_BUSES - 1;
host->max_lun = MAX_LUNS;
- host->unique_id = vha->instance;
+ host->unique_id = host->host_no;
host->max_id = MAX_TARGETS_2200;
host->transportt = qla2xxx_transport_vport_template;
@@ -436,12 +438,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->host_no, vha));
vha->flags.init_done = 1;
- num_hosts++;
-
- mutex_lock(&ha->vport_lock);
- set_bit(vha->vp_idx, ha->vp_idx_map);
- ha->cur_vport_count++;
- mutex_unlock(&ha->vport_lock);
return vha;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48eaa3bb543..7c8af7ed2a5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -27,7 +27,6 @@ char qla2x00_version_str[40];
*/
static struct kmem_cache *srb_cachep;
-int num_hosts;
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -87,6 +86,13 @@ MODULE_PARM_DESC(ql2xqfullrampup,
"depth for a device after a queue-full condition has been "
"detected. Default is 120 seconds.");
+int ql2xiidmaenable=1;
+module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xiidmaenable,
+ "Enables iIDMA settings "
+ "Default is 1 - perform iIDMA. 0 - no iIDMA.");
+
+
/*
* SCSI host template entry points
*/
@@ -388,7 +394,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
- if (!*(fc_port_t **)rport->dd_data) {
+ if (fcport->drport) {
cmd->result = DID_IMM_RETRY << 16;
goto qc_fail_command;
}
@@ -443,7 +449,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
int rval;
scsi_qla_host_t *pha = to_qla_parent(ha);
- if (unlikely(pci_channel_offline(ha->pdev))) {
+ if (unlikely(pci_channel_offline(pha->pdev))) {
cmd->result = DID_REQUEUE << 16;
goto qc24_fail_command;
}
@@ -455,7 +461,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
- if (!*(fc_port_t **)rport->dd_data) {
+ if (fcport->drport) {
cmd->result = DID_IMM_RETRY << 16;
goto qc24_fail_command;
}
@@ -617,6 +623,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
return (return_status);
}
+void
+qla2x00_abort_fcport_cmds(fc_port_t *fcport)
+{
+ int cnt;
+ unsigned long flags;
+ srb_t *sp;
+ scsi_qla_host_t *ha = fcport->ha;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ spin_lock_irqsave(&pha->hardware_lock, flags);
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = pha->outstanding_cmds[cnt];
+ if (!sp)
+ continue;
+ if (sp->fcport != fcport)
+ continue;
+
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(ha, sp)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Abort failed -- %lx\n", sp->cmd->serial_number));
+ } else {
+ if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
+ QLA_SUCCESS)
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Abort failed while waiting -- %lx\n",
+ sp->cmd->serial_number));
+
+ }
+ spin_lock_irqsave(&pha->hardware_lock, flags);
+ }
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+}
+
static void
qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
{
@@ -1073,7 +1113,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
else
scsi_deactivate_tcq(sdev, ha->max_q_depth);
- rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+ rport->dev_loss_tmo = ha->port_down_retry_count;
return 0;
}
@@ -1629,9 +1669,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
host->can_queue = ha->request_q_length + 128;
- /* load the F/W, read paramaters, and init the H/W */
- ha->instance = num_hosts;
-
mutex_init(&ha->vport_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
@@ -1679,7 +1716,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->this_id = 255;
host->cmd_per_lun = 3;
- host->unique_id = ha->instance;
+ host->unique_id = host->host_no;
host->max_cmd_len = MAX_CMDSZ;
host->max_channel = MAX_BUSES - 1;
host->max_lun = MAX_LUNS;
@@ -1700,8 +1737,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flags.init_done = 1;
ha->flags.online = 1;
- num_hosts++;
-
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
@@ -1813,27 +1848,21 @@ static inline void
qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
int defer)
{
- unsigned long flags;
struct fc_rport *rport;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
if (!fcport->rport)
return;
rport = fcport->rport;
if (defer) {
- spin_lock_irqsave(&fcport->rport_lock, flags);
+ spin_lock_irq(ha->host->host_lock);
fcport->drport = rport;
- fcport->rport = NULL;
- *(fc_port_t **)rport->dd_data = NULL;
- spin_unlock_irqrestore(&fcport->rport_lock, flags);
- set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
- } else {
- spin_lock_irqsave(&fcport->rport_lock, flags);
- fcport->rport = NULL;
- *(fc_port_t **)rport->dd_data = NULL;
- spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ spin_unlock_irq(ha->host->host_lock);
+ set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags);
+ qla2xxx_wake_dpc(pha);
+ } else
fc_remote_port_delete(rport);
- }
}
/*
@@ -1903,7 +1932,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
scsi_qla_host_t *pha = to_qla_parent(ha);
list_for_each_entry(fcport, &pha->fcports, list) {
- if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx)
+ if (ha->vp_idx != fcport->vp_idx)
continue;
/*
* No point in marking the device as lost, if the device is
@@ -1911,17 +1940,10 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
*/
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
continue;
- if (atomic_read(&fcport->state) == FCS_ONLINE) {
- if (defer)
- qla2x00_schedule_rport_del(ha, fcport, defer);
- else if (ha->vp_idx == fcport->vp_idx)
- qla2x00_schedule_rport_del(ha, fcport, defer);
- }
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qla2x00_schedule_rport_del(ha, fcport, defer);
atomic_set(&fcport->state, FCS_DEVICE_LOST);
}
-
- if (defer)
- qla2xxx_wake_dpc(ha);
}
/*
@@ -2156,7 +2178,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
static int
qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
{
- unsigned long flags;
+ unsigned long uninitialized_var(flags);
scsi_qla_host_t *pha = to_qla_parent(ha);
if (!locked)
@@ -2313,8 +2335,10 @@ qla2x00_do_dpc(void *data)
ha->host_no));
}
- if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags))
+ if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) {
qla2x00_update_fcports(ha);
+ clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
+ }
if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) &&
(!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1728ab3ccb2..1bca7447493 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -869,11 +869,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
uint32_t i;
uint32_t *dwptr;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- unsigned long flags;
ret = QLA_SUCCESS;
- spin_lock_irqsave(&ha->hardware_lock, flags);
/* Enable flash write. */
WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -907,7 +905,6 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
return ret;
}
@@ -2306,6 +2303,51 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
}
static int
+qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
+{
+ if (pos >= end || *pos != 0x82)
+ return 0;
+
+ pos += 3 + pos[1];
+ if (pos >= end || *pos != 0x90)
+ return 0;
+
+ pos += 3 + pos[1];
+ if (pos >= end || *pos != 0x78)
+ return 0;
+
+ return 1;
+}
+
+int
+qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
+{
+ uint8_t *pos = ha->vpd;
+ uint8_t *end = pos + ha->vpd_size;
+ int len = 0;
+
+ if (!IS_FWI2_CAPABLE(ha) || !qla2xxx_is_vpd_valid(pos, end))
+ return 0;
+
+ while (pos < end && *pos != 0x78) {
+ len = (*pos == 0x82) ? pos[1] : pos[2];
+
+ if (!strncmp(pos, key, strlen(key)))
+ break;
+
+ if (*pos != 0x90 && *pos != 0x91)
+ pos += len;
+
+ pos += 3;
+ }
+
+ if (pos < end - len && *pos != 0x78)
+ return snprintf(str, size, "%.*s", len, pos + 3);
+
+ return 0;
+}
+
+static int
qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
{
uint32_t d[2], faddr;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d058c8862b3..676c390db35 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.01-k4"
+#define QLA2XXX_VERSION "8.02.01-k6"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5822dd59582..88bebb13bc5 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -46,6 +46,8 @@ MODULE_PARM_DESC(ql4xextended_error_logging,
int ql4_mod_unload = 0;
+#define QL4_DEF_QDEPTH 32
+
/*
* SCSI host template entry points
*/
@@ -1387,7 +1389,7 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev)
sdev->hostdata = ddb;
sdev->tagged_supported = 1;
- scsi_activate_tcq(sdev, sdev->host->can_queue);
+ scsi_activate_tcq(sdev, QL4_DEF_QDEPTH);
return 0;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 36c92f961e1..ee6be596503 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -197,11 +197,43 @@ static void
scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
struct scsi_cmnd *cmd)
{
+ if (cmd->prot_sdb)
+ kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
+
kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
kmem_cache_free(pool->cmd_slab, cmd);
}
/**
+ * scsi_host_alloc_command - internal function to allocate command
+ * @shost: SCSI host whose pool to allocate from
+ * @gfp_mask: mask for the allocation
+ *
+ * Returns a fully allocated command with sense buffer and protection
+ * data buffer (where applicable) or NULL on failure
+ */
+static struct scsi_cmnd *
+scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
+{
+ struct scsi_cmnd *cmd;
+
+ cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+ if (!cmd)
+ return NULL;
+
+ if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
+ cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
+
+ if (!cmd->prot_sdb) {
+ scsi_pool_free_command(shost->cmd_pool, cmd);
+ return NULL;
+ }
+ }
+
+ return cmd;
+}
+
+/**
* __scsi_get_command - Allocate a struct scsi_cmnd
* @shost: host to transmit command
* @gfp_mask: allocation mask
@@ -214,7 +246,7 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
struct scsi_cmnd *cmd;
unsigned char *buf;
- cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+ cmd = scsi_host_alloc_command(shost, gfp_mask);
if (unlikely(!cmd)) {
unsigned long flags;
@@ -457,7 +489,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
/*
* Get one backup command for this host.
*/
- cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+ cmd = scsi_host_alloc_command(shost, gfp_mask);
if (!cmd) {
scsi_put_host_cmd_pool(gfp_mask);
shost->cmd_pool = NULL;
@@ -902,11 +934,20 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- /* Check to see if the queue is managed by the block layer.
- * If it is, and we fail to adjust the depth, exit. */
- if (blk_queue_tagged(sdev->request_queue) &&
- blk_queue_resize_tags(sdev->request_queue, tags) != 0)
- goto out;
+ /*
+ * Check to see if the queue is managed by the block layer.
+ * If it is, and we fail to adjust the depth, exit.
+ *
+ * Do not resize the tag map if it is a host wide share bqt,
+ * because the size should be the hosts's can_queue. If there
+ * is more IO than the LLD's can_queue (so there are not enuogh
+ * tags) request_fn's host queue ready check will handle it.
+ */
+ if (!sdev->host->bqt) {
+ if (blk_queue_tagged(sdev->request_queue) &&
+ blk_queue_resize_tags(sdev->request_queue, tags) != 0)
+ goto out;
+ }
sdev->queue_depth = tags;
switch (tagged) {
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 01d11a01ffb..27c633f5579 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1753,7 +1753,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
if (!open_devip) {
printk(KERN_ERR "%s: out of memory at line %d\n",
- __FUNCTION__, __LINE__);
+ __func__, __LINE__);
return NULL;
}
}
@@ -2656,7 +2656,7 @@ static int sdebug_add_adapter(void)
sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
if (NULL == sdbg_host) {
printk(KERN_ERR "%s: out of memory at line %d\n",
- __FUNCTION__, __LINE__);
+ __func__, __LINE__);
return -ENOMEM;
}
@@ -2667,7 +2667,7 @@ static int sdebug_add_adapter(void)
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
if (!sdbg_devinfo) {
printk(KERN_ERR "%s: out of memory at line %d\n",
- __FUNCTION__, __LINE__);
+ __func__, __LINE__);
error = -ENOMEM;
goto clean;
}
@@ -2987,7 +2987,7 @@ static int sdebug_driver_probe(struct device * dev)
hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
if (NULL == hpnt) {
- printk(KERN_ERR "%s: scsi_register failed\n", __FUNCTION__);
+ printk(KERN_ERR "%s: scsi_register failed\n", __func__);
error = -ENODEV;
return error;
}
@@ -3002,7 +3002,7 @@ static int sdebug_driver_probe(struct device * dev)
error = scsi_add_host(hpnt, &sdbg_host->dev);
if (error) {
- printk(KERN_ERR "%s: scsi_add_host failed\n", __FUNCTION__);
+ printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
error = -ENODEV;
scsi_host_put(hpnt);
} else
@@ -3021,7 +3021,7 @@ static int sdebug_driver_remove(struct device * dev)
if (!sdbg_host) {
printk(KERN_ERR "%s: Unable to locate host info\n",
- __FUNCTION__);
+ __func__);
return -ENODEV;
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index a235802f298..4969e4ec75e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -272,7 +272,7 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
}
if (from_length > to_length)
printk(KERN_WARNING "%s: %s string '%s' is too long\n",
- __FUNCTION__, name, from);
+ __func__, name, from);
}
/**
@@ -298,7 +298,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL);
if (!devinfo) {
- printk(KERN_ERR "%s: no memory\n", __FUNCTION__);
+ printk(KERN_ERR "%s: no memory\n", __func__);
return -ENOMEM;
}
@@ -363,7 +363,7 @@ static int scsi_dev_info_list_add_str(char *dev_list)
strflags = strsep(&next, next_check);
if (!model || !strflags) {
printk(KERN_ERR "%s: bad dev info string '%s' '%s'"
- " '%s'\n", __FUNCTION__, vendor, model,
+ " '%s'\n", __func__, vendor, model,
strflags);
res = -EINVAL;
} else
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 006a95916f7..880051c89bd 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -139,7 +139,7 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
- " %d, (%p)\n", __FUNCTION__,
+ " %d, (%p)\n", __func__,
scmd, timeout, complete));
add_timer(&scmd->eh_timeout);
@@ -163,7 +163,7 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
rtn = del_timer(&scmd->eh_timeout);
SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
- " rtn: %d\n", __FUNCTION__,
+ " rtn: %d\n", __func__,
scmd, rtn));
scmd->eh_timeout.data = (unsigned long)NULL;
@@ -233,7 +233,7 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
online = scsi_device_online(sdev);
- SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__,
+ SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__,
online));
return online;
@@ -271,7 +271,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
"%s: cmds failed: %d, cancel: %d\n",
- __FUNCTION__, cmd_failed,
+ __func__, cmd_failed,
cmd_cancel));
cmd_cancel = 0;
cmd_failed = 0;
@@ -344,6 +344,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
return /* soft_error */ SUCCESS;
case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) /* DIF */
+ return SUCCESS;
+
return NEEDS_RETRY;
case NOT_READY:
case UNIT_ATTENTION:
@@ -470,7 +473,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s scmd: %p result: %x\n",
- __FUNCTION__, scmd, scmd->result));
+ __func__, scmd, scmd->result));
eh_action = scmd->device->host->eh_action;
if (eh_action)
@@ -487,7 +490,7 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
int rtn;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
- __FUNCTION__));
+ __func__));
if (!scmd->device->host->hostt->eh_host_reset_handler)
return FAILED;
@@ -516,7 +519,7 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
int rtn;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
- __FUNCTION__));
+ __func__));
if (!scmd->device->host->hostt->eh_bus_reset_handler)
return FAILED;
@@ -664,7 +667,10 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
ses->sdb = scmd->sdb;
ses->next_rq = scmd->request->next_rq;
ses->result = scmd->result;
+ ses->underflow = scmd->underflow;
+ ses->prot_op = scmd->prot_op;
+ scmd->prot_op = SCSI_PROT_NORMAL;
scmd->cmnd = ses->eh_cmnd;
memset(scmd->cmnd, 0, BLK_MAX_CDB);
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
@@ -722,6 +728,8 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
scmd->sdb = ses->sdb;
scmd->request->next_rq = ses->next_rq;
scmd->result = ses->result;
+ scmd->underflow = ses->underflow;
+ scmd->prot_op = ses->prot_op;
}
EXPORT_SYMBOL(scsi_eh_restore_cmnd);
@@ -766,7 +774,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s: scmd: %p, timeleft: %ld\n",
- __FUNCTION__, scmd, timeleft));
+ __func__, scmd, timeleft));
/*
* If there is time left scsi_eh_done got called, and we will
@@ -778,7 +786,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
rtn = scsi_eh_completed_normally(scmd);
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s: scsi_eh_completed_normally %x\n",
- __FUNCTION__, rtn));
+ __func__, rtn));
switch (rtn) {
case SUCCESS:
@@ -913,7 +921,7 @@ retry_tur:
rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
- __FUNCTION__, scmd, rtn));
+ __func__, scmd, rtn));
switch (rtn) {
case NEEDS_RETRY:
@@ -1296,7 +1304,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
if (!scsi_device_online(scmd->device)) {
SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
" as SUCCESS\n",
- __FUNCTION__));
+ __func__));
return SUCCESS;
}
@@ -1511,7 +1519,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* ioctls to queued block devices.
*/
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
- __FUNCTION__));
+ __func__));
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RUNNING))
@@ -1835,7 +1843,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
*/
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s: waking up host to restart after TMF\n",
- __FUNCTION__));
+ __func__));
wake_up(&shost->host_wait);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 88d1b5f44e5..ff5d56b3ee4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
};
#undef SP
-static struct kmem_cache *scsi_sdb_cache;
+struct kmem_cache *scsi_sdb_cache;
static void scsi_run_queue(struct request_queue *q);
@@ -787,6 +787,9 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL;
}
+
+ if (scsi_prot_sg_count(cmd))
+ scsi_free_sgtable(cmd->prot_sdb);
}
EXPORT_SYMBOL(scsi_release_buffers);
@@ -947,9 +950,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* 6-byte command.
*/
scsi_requeue_command(q, cmd);
- return;
- } else {
+ } else if (sshdr.asc == 0x10) /* DIX */
+ scsi_end_request(cmd, -EIO, this_count, 0);
+ else
scsi_end_request(cmd, -EIO, this_count, 1);
+ return;
+ case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) { /* DIF */
+ scsi_end_request(cmd, -EIO, this_count, 0);
return;
}
break;
@@ -1072,6 +1080,26 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
goto err_exit;
}
+ if (blk_integrity_rq(cmd->request)) {
+ struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
+ int ivecs, count;
+
+ BUG_ON(prot_sdb == NULL);
+ ivecs = blk_rq_count_integrity_sg(cmd->request);
+
+ if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
+ error = BLKPREP_DEFER;
+ goto err_exit;
+ }
+
+ count = blk_rq_map_integrity_sg(cmd->request,
+ prot_sdb->table.sgl);
+ BUG_ON(unlikely(count > ivecs));
+
+ cmd->prot_sdb = prot_sdb;
+ cmd->prot_sdb->table.nents = count;
+ }
+
return BLKPREP_OK ;
err_exit:
@@ -1367,7 +1395,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n",
- __FUNCTION__);
+ __func__);
BUG();
}
@@ -1491,12 +1519,27 @@ static void scsi_request_fn(struct request_queue *q)
printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to "
"linux-scsi@vger.kernel.org\n",
- __FUNCTION__);
+ __func__);
blk_dump_rq_flags(req, "foo");
BUG();
}
spin_lock(shost->host_lock);
+ /*
+ * We hit this when the driver is using a host wide
+ * tag map. For device level tag maps the queue_depth check
+ * in the device ready fn would prevent us from trying
+ * to allocate a tag. Since the map is a shared host resource
+ * we add the dev to the starved list so it eventually gets
+ * a run when a tag is freed.
+ */
+ if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
+ if (list_empty(&sdev->starved_entry))
+ list_add_tail(&sdev->starved_entry,
+ &shost->starved_list);
+ goto not_ready;
+ }
+
if (!scsi_host_queue_ready(q, shost, sdev))
goto not_ready;
if (scsi_target(sdev)->single_lun) {
@@ -2486,7 +2529,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
if (unlikely(i == sg_count)) {
printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
"elements %d\n",
- __FUNCTION__, sg_len, *offset, sg_count);
+ __func__, sg_len, *offset, sg_count);
WARN_ON(1);
return NULL;
}
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 370c78cc1cb..ae7ed9a2266 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -55,7 +55,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
(skb->len < nlh->nlmsg_len)) {
printk(KERN_WARNING "%s: discarding partial skb\n",
- __FUNCTION__);
+ __func__);
return;
}
@@ -82,7 +82,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
printk(KERN_WARNING "%s: discarding partial message\n",
- __FUNCTION__);
+ __func__);
return;
}
@@ -139,7 +139,7 @@ scsi_netlink_init(void)
error = netlink_register_notifier(&scsi_netlink_notifier);
if (error) {
printk(KERN_ERR "%s: register of event handler failed - %d\n",
- __FUNCTION__, error);
+ __func__, error);
return;
}
@@ -148,7 +148,7 @@ scsi_netlink_init(void)
THIS_MODULE);
if (!scsi_nl_sock) {
printk(KERN_ERR "%s: register of recieve handler failed\n",
- __FUNCTION__);
+ __func__);
netlink_unregister_notifier(&scsi_netlink_notifier);
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index b33e72516ef..79f0f751120 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -77,6 +77,7 @@ extern void scsi_exit_queue(void);
struct request_queue;
struct request;
extern int scsi_prep_fn(struct request_queue *, struct request *);
+extern struct kmem_cache *scsi_sdb_cache;
/* scsi_proc.c */
#ifdef CONFIG_SCSI_PROC_FS
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index e4a0d2f9b35..c6a904a45bf 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -114,7 +114,7 @@ void scsi_proc_hostdir_add(struct scsi_host_template *sht)
sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi);
if (!sht->proc_dir)
printk(KERN_ERR "%s: proc_mkdir failed for %s\n",
- __FUNCTION__, sht->proc_name);
+ __func__, sht->proc_name);
else
sht->proc_dir->owner = sht->module;
}
@@ -157,7 +157,7 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
sht->proc_dir, proc_scsi_read, shost);
if (!p) {
printk(KERN_ERR "%s: Failed to register host %d in"
- "%s\n", __FUNCTION__, shost->host_no,
+ "%s\n", __func__, shost->host_no,
sht->proc_name);
return;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 196fe3af0d5..84b4879cff1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -318,7 +318,7 @@ out_device_destroy:
put_device(&sdev->sdev_gendev);
out:
if (display_failure_msg)
- printk(ALLOC_FAILURE_MSG, __FUNCTION__);
+ printk(ALLOC_FAILURE_MSG, __func__);
return NULL;
}
@@ -404,7 +404,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
starget = kzalloc(size, GFP_KERNEL);
if (!starget) {
- printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
return NULL;
}
dev = &starget->dev;
@@ -1337,7 +1337,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
lun_data = kmalloc(length, GFP_ATOMIC |
(sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
if (!lun_data) {
- printk(ALLOC_FAILURE_MSG, __FUNCTION__);
+ printk(ALLOC_FAILURE_MSG, __func__);
goto out;
}
@@ -1649,7 +1649,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
{
SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
"%s: <%u:%u:%u>\n",
- __FUNCTION__, channel, id, lun));
+ __func__, channel, id, lun));
if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
@@ -1703,7 +1703,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return NULL;
if (shost->async_scan) {
- printk("%s called twice for host %d", __FUNCTION__,
+ printk("%s called twice for host %d", __func__,
shost->host_no);
dump_stack();
return NULL;
@@ -1757,9 +1757,10 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
mutex_lock(&shost->scan_mutex);
if (!shost->async_scan) {
- printk("%s called twice for host %d", __FUNCTION__,
+ printk("%s called twice for host %d", __func__,
shost->host_no);
dump_stack();
+ mutex_unlock(&shost->scan_mutex);
return;
}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b6e56105977..ab3c71869be 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -249,6 +249,8 @@ shost_rd_attr(cmd_per_lun, "%hd\n");
shost_rd_attr(can_queue, "%hd\n");
shost_rd_attr(sg_tablesize, "%hu\n");
shost_rd_attr(unchecked_isa_dma, "%d\n");
+shost_rd_attr(prot_capabilities, "%u\n");
+shost_rd_attr(prot_guard_type, "%hd\n");
shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
static struct attribute *scsi_sysfs_shost_attrs[] = {
@@ -263,6 +265,8 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_hstate.attr,
&dev_attr_supported_mode.attr,
&dev_attr_active_mode.attr,
+ &dev_attr_prot_capabilities.attr,
+ &dev_attr_prot_guard_type.attr,
NULL
};
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
index cb92888948f..fe4c62177f7 100644
--- a/drivers/scsi/scsi_tgt_priv.h
+++ b/drivers/scsi/scsi_tgt_priv.h
@@ -6,7 +6,7 @@ struct task_struct;
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
- printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
+ printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
} while (0)
#define dprintk(fmt, args...)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 5fd64e70029..56823fd1fb8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -417,15 +417,16 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->next_vport_number = 0;
fc_host->npiv_vports_inuse = 0;
- snprintf(fc_host->work_q_name, KOBJ_NAME_LEN, "fc_wq_%d",
- shost->host_no);
+ snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
+ "fc_wq_%d", shost->host_no);
fc_host->work_q = create_singlethread_workqueue(
fc_host->work_q_name);
if (!fc_host->work_q)
return -ENOMEM;
- snprintf(fc_host->devloss_work_q_name, KOBJ_NAME_LEN, "fc_dl_%d",
- shost->host_no);
+ snprintf(fc_host->devloss_work_q_name,
+ sizeof(fc_host->devloss_work_q_name),
+ "fc_dl_%d", shost->host_no);
fc_host->devloss_work_q = create_singlethread_workqueue(
fc_host->devloss_work_q_name);
if (!fc_host->devloss_work_q) {
@@ -570,7 +571,7 @@ send_fail:
name = get_fc_host_event_code_name(event_code);
printk(KERN_WARNING
"%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
- __FUNCTION__, shost->host_no,
+ __func__, shost->host_no,
(name) ? name : "<unknown>", event_data, err);
return;
}
@@ -643,7 +644,7 @@ send_vendor_fail_skb:
send_vendor_fail:
printk(KERN_WARNING
"%s: Dropped Event : host %d vendor_unique - err %d\n",
- __FUNCTION__, shost->host_no, err);
+ __func__, shost->host_no, err);
return;
}
EXPORT_SYMBOL(fc_host_post_vendor_event);
@@ -2463,7 +2464,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
rport = kzalloc(size, GFP_KERNEL);
if (unlikely(!rport)) {
- printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
return NULL;
}
@@ -3136,7 +3137,7 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
vport = kzalloc(size, GFP_KERNEL);
if (unlikely(!vport)) {
- printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
return -ENOMEM;
}
@@ -3200,7 +3201,7 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
printk(KERN_ERR
"%s: Cannot create vport symlinks for "
"%s, err=%d\n",
- __FUNCTION__, dev->bus_id, error);
+ __func__, dev->bus_id, error);
}
spin_lock_irqsave(shost->host_lock, flags);
vport->flags &= ~FC_VPORT_CREATING;
@@ -3313,7 +3314,7 @@ fc_vport_sched_delete(struct work_struct *work)
if (stat)
dev_printk(KERN_ERR, vport->dev.parent,
"%s: %s could not be deleted created via "
- "shost%d channel %d - error %d\n", __FUNCTION__,
+ "shost%d channel %d - error %d\n", __func__,
vport->dev.bus_id, vport->shost->host_no,
vport->channel, stat);
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 3af7cbcc5c5..043c3921164 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -170,7 +170,7 @@ iscsi_create_endpoint(int dd_size)
int err;
for (id = 1; id < ISCSI_MAX_EPID; id++) {
- dev = class_find_device(&iscsi_endpoint_class, &id,
+ dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
iscsi_match_epid);
if (!dev)
break;
@@ -222,7 +222,7 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
struct iscsi_endpoint *ep;
struct device *dev;
- dev = class_find_device(&iscsi_endpoint_class, &handle,
+ dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
iscsi_match_epid);
if (!dev)
return NULL;
@@ -247,8 +247,8 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
atomic_set(&ihost->nr_scans, 0);
mutex_init(&ihost->mutex);
- snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
- shost->host_no);
+ snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+ "iscsi_scan_%d", shost->host_no);
ihost->scan_workq = create_singlethread_workqueue(
ihost->scan_workq_name);
if (!ihost->scan_workq)
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f4461d35ffb..366609386be 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -779,7 +779,7 @@ static void sas_port_create_link(struct sas_port *port,
return;
err:
printk(KERN_ERR "%s: Cannot create port links, err=%d\n",
- __FUNCTION__, res);
+ __func__, res);
}
static void sas_port_delete_link(struct sas_port *port,
@@ -1029,7 +1029,7 @@ void sas_port_mark_backlink(struct sas_port *port)
return;
err:
printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n",
- __FUNCTION__, res);
+ __func__, res);
}
EXPORT_SYMBOL(sas_port_mark_backlink);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 75a64a6cae8..b29360ed0bd 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -366,12 +366,14 @@ spi_transport_rd_attr(rti, "%d\n");
spi_transport_rd_attr(pcomp_en, "%d\n");
spi_transport_rd_attr(hold_mcs, "%d\n");
-/* we only care about the first child device so we return 1 */
+/* we only care about the first child device that's a real SCSI device
+ * so we return 1 to terminate the iteration when we find it */
static int child_iter(struct device *dev, void *data)
{
- struct scsi_device *sdev = to_scsi_device(dev);
+ if (!scsi_is_sdev_device(dev))
+ return 0;
- spi_dv_device(sdev);
+ spi_dv_device(to_scsi_device(dev));
return 1;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0c63947d8a9..e5e7d785645 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -99,8 +99,7 @@ static void scsi_disk_release(struct device *cdev);
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
static void sd_print_result(struct scsi_disk *, int);
-static DEFINE_IDR(sd_index_idr);
-static DEFINE_SPINLOCK(sd_index_lock);
+static DEFINE_IDA(sd_index_ida);
/* This semaphore is used to mediate the 0->1 reference get in the
* face of object destruction (i.e. we can't allow a get on an
@@ -234,6 +233,24 @@ sd_show_allow_restart(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
}
+static ssize_t
+sd_show_protection_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->protection_type);
+}
+
+static ssize_t
+sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->ATO);
+}
+
static struct device_attribute sd_disk_attrs[] = {
__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
sd_store_cache_type),
@@ -242,6 +259,8 @@ static struct device_attribute sd_disk_attrs[] = {
sd_store_allow_restart),
__ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
sd_store_manage_start_stop),
+ __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
+ __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
__ATTR_NULL,
};
@@ -354,7 +373,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
struct scsi_cmnd *SCpnt;
struct scsi_device *sdp = q->queuedata;
struct gendisk *disk = rq->rq_disk;
+ struct scsi_disk *sdkp;
sector_t block = rq->sector;
+ sector_t threshold;
unsigned int this_count = rq->nr_sectors;
unsigned int timeout = sdp->timeout;
int ret;
@@ -370,6 +391,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
if (ret != BLKPREP_OK)
goto out;
SCpnt = rq->special;
+ sdkp = scsi_disk(disk);
/* from here on until we're complete, any goto out
* is used for a killable error condition */
@@ -401,13 +423,21 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
/*
- * Some devices (some sdcards for one) don't like it if the
- * last sector gets read in a larger then 1 sector read.
+ * Some SD card readers can't handle multi-sector accesses which touch
+ * the last one or two hardware sectors. Split accesses as needed.
*/
- if (unlikely(sdp->last_sector_bug &&
- rq->nr_sectors > sdp->sector_size / 512 &&
- block + this_count == get_capacity(disk)))
- this_count -= sdp->sector_size / 512;
+ threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
+ (sdp->sector_size / 512);
+
+ if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
+ if (block < threshold) {
+ /* Access up to the threshold but not beyond */
+ this_count = threshold - block;
+ } else {
+ /* Access only a single hardware sector */
+ this_count = sdp->sector_size / 512;
+ }
+ }
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
(unsigned long long)block));
@@ -459,6 +489,11 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
SCpnt->cmnd[0] = WRITE_6;
SCpnt->sc_data_direction = DMA_TO_DEVICE;
+
+ if (blk_integrity_rq(rq) &&
+ sd_dif_prepare(rq, block, sdp->sector_size) == -EIO)
+ goto out;
+
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
@@ -473,8 +508,12 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
"writing" : "reading", this_count,
rq->nr_sectors));
- SCpnt->cmnd[1] = 0;
-
+ /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
+ if (scsi_host_dif_capable(sdp->host, sdkp->protection_type))
+ SCpnt->cmnd[1] = 1 << 5;
+ else
+ SCpnt->cmnd[1] = 0;
+
if (block > 0xffffffff) {
SCpnt->cmnd[0] += READ_16 - READ_6;
SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
@@ -492,6 +531,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
} else if ((this_count > 0xff) || (block > 0x1fffff) ||
+ scsi_device_protection(SCpnt->device) ||
SCpnt->device->use_10_for_rw) {
if (this_count > 0xffff)
this_count = 0xffff;
@@ -526,6 +566,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
SCpnt->sdb.length = this_count * sdp->sector_size;
+ /* If DIF or DIX is enabled, tell HBA how to handle request */
+ if (sdkp->protection_type || scsi_prot_sg_count(SCpnt))
+ sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt));
+
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
* host adapter, it's safe to assume that we can at least transfer
@@ -920,6 +964,48 @@ static struct block_device_operations sd_fops = {
.revalidate_disk = sd_revalidate_disk,
};
+static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
+{
+ u64 start_lba = scmd->request->sector;
+ u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
+ u64 bad_lba;
+ int info_valid;
+
+ if (!blk_fs_request(scmd->request))
+ return 0;
+
+ info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ &bad_lba);
+ if (!info_valid)
+ return 0;
+
+ if (scsi_bufflen(scmd) <= scmd->device->sector_size)
+ return 0;
+
+ if (scmd->device->sector_size < 512) {
+ /* only legitimate sector_size here is 256 */
+ start_lba <<= 1;
+ end_lba <<= 1;
+ } else {
+ /* be careful ... don't want any overflows */
+ u64 factor = scmd->device->sector_size / 512;
+ do_div(start_lba, factor);
+ do_div(end_lba, factor);
+ }
+
+ /* The bad lba was reported incorrectly, we have no idea where
+ * the error is.
+ */
+ if (bad_lba < start_lba || bad_lba >= end_lba)
+ return 0;
+
+ /* This computation should always be done in terms of
+ * the resolution of the device's medium.
+ */
+ return (bad_lba - start_lba) * scmd->device->sector_size;
+}
+
/**
* sd_done - bottom half handler: called when the lower level
* driver has completed (successfully or otherwise) a scsi command.
@@ -930,15 +1016,10 @@ static struct block_device_operations sd_fops = {
static int sd_done(struct scsi_cmnd *SCpnt)
{
int result = SCpnt->result;
- unsigned int xfer_size = scsi_bufflen(SCpnt);
- unsigned int good_bytes = result ? 0 : xfer_size;
- u64 start_lba = SCpnt->request->sector;
- u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
- u64 bad_lba;
+ unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int sense_deferred = 0;
- int info_valid;
if (result) {
sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -963,36 +1044,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
switch (sshdr.sense_key) {
case HARDWARE_ERROR:
case MEDIUM_ERROR:
- if (!blk_fs_request(SCpnt->request))
- goto out;
- info_valid = scsi_get_sense_info_fld(SCpnt->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- &bad_lba);
- if (!info_valid)
- goto out;
- if (xfer_size <= SCpnt->device->sector_size)
- goto out;
- if (SCpnt->device->sector_size < 512) {
- /* only legitimate sector_size here is 256 */
- start_lba <<= 1;
- end_lba <<= 1;
- } else {
- /* be careful ... don't want any overflows */
- u64 factor = SCpnt->device->sector_size / 512;
- do_div(start_lba, factor);
- do_div(end_lba, factor);
- }
-
- if (bad_lba < start_lba || bad_lba >= end_lba)
- /* the bad lba was reported incorrectly, we have
- * no idea where the error is
- */
- goto out;
-
- /* This computation should always be done in terms of
- * the resolution of the device's medium.
- */
- good_bytes = (bad_lba - start_lba)*SCpnt->device->sector_size;
+ good_bytes = sd_completed_bytes(SCpnt);
break;
case RECOVERED_ERROR:
case NO_SENSE:
@@ -1002,10 +1054,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_print_sense("sd", SCpnt);
SCpnt->result = 0;
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- good_bytes = xfer_size;
+ good_bytes = scsi_bufflen(SCpnt);
+ break;
+ case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
+ scsi_print_result(SCpnt);
+ scsi_print_sense("sd", SCpnt);
+ good_bytes = sd_completed_bytes(SCpnt);
+ }
break;
case ILLEGAL_REQUEST:
- if (SCpnt->device->use_10_for_rw &&
+ if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
+ scsi_print_result(SCpnt);
+ scsi_print_sense("sd", SCpnt);
+ good_bytes = sd_completed_bytes(SCpnt);
+ }
+ if (!scsi_device_protection(SCpnt->device) &&
+ SCpnt->device->use_10_for_rw &&
(SCpnt->cmnd[0] == READ_10 ||
SCpnt->cmnd[0] == WRITE_10))
SCpnt->device->use_10_for_rw = 0;
@@ -1018,6 +1083,9 @@ static int sd_done(struct scsi_cmnd *SCpnt)
break;
}
out:
+ if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
+ sd_dif_complete(SCpnt, good_bytes);
+
return good_bytes;
}
@@ -1165,6 +1233,49 @@ sd_spinup_disk(struct scsi_disk *sdkp)
}
}
+
+/*
+ * Determine whether disk supports Data Integrity Field.
+ */
+void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdp = sdkp->device;
+ u8 type;
+
+ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+ type = 0;
+ else
+ type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
+
+ switch (type) {
+ case SD_DIF_TYPE0_PROTECTION:
+ sdkp->protection_type = 0;
+ break;
+
+ case SD_DIF_TYPE1_PROTECTION:
+ case SD_DIF_TYPE3_PROTECTION:
+ sdkp->protection_type = type;
+ break;
+
+ case SD_DIF_TYPE2_PROTECTION:
+ sd_printk(KERN_ERR, sdkp, "formatted with DIF Type 2 " \
+ "protection which is currently unsupported. " \
+ "Disabling disk!\n");
+ goto disable;
+
+ default:
+ sd_printk(KERN_ERR, sdkp, "formatted with unknown " \
+ "protection type %d. Disabling disk!\n", type);
+ goto disable;
+ }
+
+ return;
+
+disable:
+ sdkp->protection_type = 0;
+ sdkp->capacity = 0;
+}
+
/*
* read disk capacity
*/
@@ -1174,7 +1285,8 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
unsigned char cmd[16];
int the_result, retries;
int sector_size = 0;
- int longrc = 0;
+ /* Force READ CAPACITY(16) when PROTECT=1 */
+ int longrc = scsi_device_protection(sdkp->device) ? 1 : 0;
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
struct scsi_device *sdp = sdkp->device;
@@ -1186,8 +1298,8 @@ repeat:
memset((void *) cmd, 0, 16);
cmd[0] = SERVICE_ACTION_IN;
cmd[1] = SAI_READ_CAPACITY_16;
- cmd[13] = 12;
- memset((void *) buffer, 0, 12);
+ cmd[13] = 13;
+ memset((void *) buffer, 0, 13);
} else {
cmd[0] = READ_CAPACITY;
memset((void *) &cmd[1], 0, 9);
@@ -1195,7 +1307,7 @@ repeat:
}
the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
- buffer, longrc ? 12 : 8, &sshdr,
+ buffer, longrc ? 13 : 8, &sshdr,
SD_TIMEOUT, SD_MAX_RETRIES);
if (media_not_present(sdkp, &sshdr))
@@ -1270,6 +1382,8 @@ repeat:
sector_size = (buffer[8] << 24) |
(buffer[9] << 16) | (buffer[10] << 8) | buffer[11];
+
+ sd_read_protection_type(sdkp, buffer);
}
/* Some devices return the total number of sectors, not the
@@ -1531,6 +1645,52 @@ defaults:
sdkp->DPOFUA = 0;
}
+/*
+ * The ATO bit indicates whether the DIF application tag is available
+ * for use by the operating system.
+ */
+void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ int res, offset;
+ struct scsi_device *sdp = sdkp->device;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
+
+ if (sdp->type != TYPE_DISK)
+ return;
+
+ if (sdkp->protection_type == 0)
+ return;
+
+ res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
+ SD_MAX_RETRIES, &data, &sshdr);
+
+ if (!scsi_status_is_good(res) || !data.header_length ||
+ data.length < 6) {
+ sd_printk(KERN_WARNING, sdkp,
+ "getting Control mode page failed, assume no ATO\n");
+
+ if (scsi_sense_valid(&sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
+
+ return;
+ }
+
+ offset = data.header_length + data.block_descriptor_length;
+
+ if ((buffer[offset] & 0x3f) != 0x0a) {
+ sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
+ return;
+ }
+
+ if ((buffer[offset + 5] & 0x80) == 0)
+ return;
+
+ sdkp->ATO = 1;
+
+ return;
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -1567,6 +1727,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sdkp->write_prot = 0;
sdkp->WCE = 0;
sdkp->RCD = 0;
+ sdkp->ATO = 0;
sd_spinup_disk(sdkp);
@@ -1578,6 +1739,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_capacity(sdkp, buffer);
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
+ sd_read_app_tag_own(sdkp, buffer);
}
/*
@@ -1643,18 +1805,20 @@ static int sd_probe(struct device *dev)
if (!gd)
goto out_free;
- if (!idr_pre_get(&sd_index_idr, GFP_KERNEL))
- goto out_put;
+ do {
+ if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
+ goto out_put;
- spin_lock(&sd_index_lock);
- error = idr_get_new(&sd_index_idr, NULL, &index);
- spin_unlock(&sd_index_lock);
+ error = ida_get_new(&sd_index_ida, &index);
+ } while (error == -EAGAIN);
- if (index >= SD_MAX_DISKS)
- error = -EBUSY;
if (error)
goto out_put;
+ error = -EBUSY;
+ if (index >= SD_MAX_DISKS)
+ goto out_free_index;
+
sdkp->device = sdp;
sdkp->driver = &sd_template;
sdkp->disk = gd;
@@ -1675,7 +1839,7 @@ static int sd_probe(struct device *dev)
strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE);
if (device_add(&sdkp->dev))
- goto out_put;
+ goto out_free_index;
get_device(&sdp->sdev_gendev);
@@ -1711,12 +1875,15 @@ static int sd_probe(struct device *dev)
dev_set_drvdata(dev, sdkp);
add_disk(gd);
+ sd_dif_config_host(sdkp);
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
return 0;
+ out_free_index:
+ ida_remove(&sd_index_ida, index);
out_put:
put_disk(gd);
out_free:
@@ -1766,9 +1933,7 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
- spin_lock(&sd_index_lock);
- idr_remove(&sd_index_idr, sdkp->index);
- spin_unlock(&sd_index_lock);
+ ida_remove(&sd_index_ida, sdkp->index);
disk->private_data = NULL;
put_disk(disk);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 03a3d45cfa4..95b9f06534d 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -31,6 +31,12 @@
*/
#define SD_BUF_SIZE 512
+/*
+ * Number of sectors at the end of the device to avoid multi-sector
+ * accesses to in the case of last_sector_bug
+ */
+#define SD_LAST_BUGGY_SECTORS 8
+
struct scsi_disk {
struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
@@ -41,7 +47,9 @@ struct scsi_disk {
u32 index;
u8 media_present;
u8 write_prot;
+ u8 protection_type;/* Data Integrity Field */
unsigned previous_state : 1;
+ unsigned ATO : 1; /* state of disk ATO bit */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
@@ -59,4 +67,50 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
(sdsk)->disk->disk_name, ##a) : \
sdev_printk(prefix, (sdsk)->device, fmt, ##a)
+/*
+ * A DIF-capable target device can be formatted with different
+ * protection schemes. Currently 0 through 3 are defined:
+ *
+ * Type 0 is regular (unprotected) I/O
+ *
+ * Type 1 defines the contents of the guard and reference tags
+ *
+ * Type 2 defines the contents of the guard and reference tags and
+ * uses 32-byte commands to seed the latter
+ *
+ * Type 3 defines the contents of the guard tag only
+ */
+
+enum sd_dif_target_protection_types {
+ SD_DIF_TYPE0_PROTECTION = 0x0,
+ SD_DIF_TYPE1_PROTECTION = 0x1,
+ SD_DIF_TYPE2_PROTECTION = 0x2,
+ SD_DIF_TYPE3_PROTECTION = 0x3,
+};
+
+/*
+ * Data Integrity Field tuple.
+ */
+struct sd_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int);
+extern void sd_dif_config_host(struct scsi_disk *);
+extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
+extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+#define sd_dif_op(a, b, c) do { } while (0)
+#define sd_dif_config_host(a) do { } while (0)
+#define sd_dif_prepare(a, b, c) (0)
+#define sd_dif_complete(a, b) (0)
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
new file mode 100644
index 00000000000..4d17f3d35aa
--- /dev/null
+++ b/drivers/scsi/sd_dif.c
@@ -0,0 +1,538 @@
+/*
+ * sd_dif.c - SCSI Data Integrity Field
+ *
+ * Copyright (C) 2007, 2008 Oracle Corporation
+ * Written by: Martin K. Petersen <martin.petersen@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/blkdev.h>
+#include <linux/crc-t10dif.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsicam.h>
+
+#include <net/checksum.h>
+
+#include "sd.h"
+
+typedef __u16 (csum_fn) (void *, unsigned int);
+
+static __u16 sd_dif_crc_fn(void *data, unsigned int len)
+{
+ return cpu_to_be16(crc_t10dif(data, len));
+}
+
+static __u16 sd_dif_ip_fn(void *data, unsigned int len)
+{
+ return ip_compute_csum(data, len);
+}
+
+/*
+ * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
+ * 16 bit app tag, 32 bit reference tag.
+ */
+static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
+{
+ void *buf = bix->data_buf;
+ struct sd_dif_tuple *sdt = bix->prot_buf;
+ sector_t sector = bix->sector;
+ unsigned int i;
+
+ for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
+ sdt->guard_tag = fn(buf, bix->sector_size);
+ sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
+ sdt->app_tag = 0;
+
+ buf += bix->sector_size;
+ sector++;
+ }
+}
+
+static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix)
+{
+ sd_dif_type1_generate(bix, sd_dif_crc_fn);
+}
+
+static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix)
+{
+ sd_dif_type1_generate(bix, sd_dif_ip_fn);
+}
+
+static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
+{
+ void *buf = bix->data_buf;
+ struct sd_dif_tuple *sdt = bix->prot_buf;
+ sector_t sector = bix->sector;
+ unsigned int i;
+ __u16 csum;
+
+ for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
+ /* Unwritten sectors */
+ if (sdt->app_tag == 0xffff)
+ return 0;
+
+ /* Bad ref tag received from disk */
+ if (sdt->ref_tag == 0xffffffff) {
+ printk(KERN_ERR
+ "%s: bad phys ref tag on sector %lu\n",
+ bix->disk_name, (unsigned long)sector);
+ return -EIO;
+ }
+
+ if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
+ printk(KERN_ERR
+ "%s: ref tag error on sector %lu (rcvd %u)\n",
+ bix->disk_name, (unsigned long)sector,
+ be32_to_cpu(sdt->ref_tag));
+ return -EIO;
+ }
+
+ csum = fn(buf, bix->sector_size);
+
+ if (sdt->guard_tag != csum) {
+ printk(KERN_ERR "%s: guard tag error on sector %lu " \
+ "(rcvd %04x, data %04x)\n", bix->disk_name,
+ (unsigned long)sector,
+ be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+ return -EIO;
+ }
+
+ buf += bix->sector_size;
+ sector++;
+ }
+
+ return 0;
+}
+
+static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix)
+{
+ return sd_dif_type1_verify(bix, sd_dif_crc_fn);
+}
+
+static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
+{
+ return sd_dif_type1_verify(bix, sd_dif_ip_fn);
+}
+
+/*
+ * Functions for interleaving and deinterleaving application tags
+ */
+static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
+{
+ struct sd_dif_tuple *sdt = prot;
+ char *tag = tag_buf;
+ unsigned int i, j;
+
+ for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
+ sdt->app_tag = tag[j] << 8 | tag[j+1];
+ BUG_ON(sdt->app_tag == 0xffff);
+ }
+}
+
+static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
+{
+ struct sd_dif_tuple *sdt = prot;
+ char *tag = tag_buf;
+ unsigned int i, j;
+
+ for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
+ tag[j] = (sdt->app_tag & 0xff00) >> 8;
+ tag[j+1] = sdt->app_tag & 0xff;
+ }
+}
+
+static struct blk_integrity dif_type1_integrity_crc = {
+ .name = "T10-DIF-TYPE1-CRC",
+ .generate_fn = sd_dif_type1_generate_crc,
+ .verify_fn = sd_dif_type1_verify_crc,
+ .get_tag_fn = sd_dif_type1_get_tag,
+ .set_tag_fn = sd_dif_type1_set_tag,
+ .tuple_size = sizeof(struct sd_dif_tuple),
+ .tag_size = 0,
+};
+
+static struct blk_integrity dif_type1_integrity_ip = {
+ .name = "T10-DIF-TYPE1-IP",
+ .generate_fn = sd_dif_type1_generate_ip,
+ .verify_fn = sd_dif_type1_verify_ip,
+ .get_tag_fn = sd_dif_type1_get_tag,
+ .set_tag_fn = sd_dif_type1_set_tag,
+ .tuple_size = sizeof(struct sd_dif_tuple),
+ .tag_size = 0,
+};
+
+
+/*
+ * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
+ * tag space.
+ */
+static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
+{
+ void *buf = bix->data_buf;
+ struct sd_dif_tuple *sdt = bix->prot_buf;
+ unsigned int i;
+
+ for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
+ sdt->guard_tag = fn(buf, bix->sector_size);
+ sdt->ref_tag = 0;
+ sdt->app_tag = 0;
+
+ buf += bix->sector_size;
+ }
+}
+
+static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix)
+{
+ sd_dif_type3_generate(bix, sd_dif_crc_fn);
+}
+
+static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix)
+{
+ sd_dif_type3_generate(bix, sd_dif_ip_fn);
+}
+
+static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
+{
+ void *buf = bix->data_buf;
+ struct sd_dif_tuple *sdt = bix->prot_buf;
+ sector_t sector = bix->sector;
+ unsigned int i;
+ __u16 csum;
+
+ for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
+ /* Unwritten sectors */
+ if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff)
+ return 0;
+
+ csum = fn(buf, bix->sector_size);
+
+ if (sdt->guard_tag != csum) {
+ printk(KERN_ERR "%s: guard tag error on sector %lu " \
+ "(rcvd %04x, data %04x)\n", bix->disk_name,
+ (unsigned long)sector,
+ be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+ return -EIO;
+ }
+
+ buf += bix->sector_size;
+ sector++;
+ }
+
+ return 0;
+}
+
+static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix)
+{
+ return sd_dif_type3_verify(bix, sd_dif_crc_fn);
+}
+
+static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
+{
+ return sd_dif_type3_verify(bix, sd_dif_ip_fn);
+}
+
+static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
+{
+ struct sd_dif_tuple *sdt = prot;
+ char *tag = tag_buf;
+ unsigned int i, j;
+
+ for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
+ sdt->app_tag = tag[j] << 8 | tag[j+1];
+ sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 |
+ tag[j+4] << 8 | tag[j+5];
+ }
+}
+
+static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
+{
+ struct sd_dif_tuple *sdt = prot;
+ char *tag = tag_buf;
+ unsigned int i, j;
+
+ for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
+ tag[j] = (sdt->app_tag & 0xff00) >> 8;
+ tag[j+1] = sdt->app_tag & 0xff;
+ tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24;
+ tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16;
+ tag[j+4] = (sdt->ref_tag & 0xff00) >> 8;
+ tag[j+5] = sdt->ref_tag & 0xff;
+ BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff);
+ }
+}
+
+static struct blk_integrity dif_type3_integrity_crc = {
+ .name = "T10-DIF-TYPE3-CRC",
+ .generate_fn = sd_dif_type3_generate_crc,
+ .verify_fn = sd_dif_type3_verify_crc,
+ .get_tag_fn = sd_dif_type3_get_tag,
+ .set_tag_fn = sd_dif_type3_set_tag,
+ .tuple_size = sizeof(struct sd_dif_tuple),
+ .tag_size = 0,
+};
+
+static struct blk_integrity dif_type3_integrity_ip = {
+ .name = "T10-DIF-TYPE3-IP",
+ .generate_fn = sd_dif_type3_generate_ip,
+ .verify_fn = sd_dif_type3_verify_ip,
+ .get_tag_fn = sd_dif_type3_get_tag,
+ .set_tag_fn = sd_dif_type3_set_tag,
+ .tuple_size = sizeof(struct sd_dif_tuple),
+ .tag_size = 0,
+};
+
+/*
+ * Configure exchange of protection information between OS and HBA.
+ */
+void sd_dif_config_host(struct scsi_disk *sdkp)
+{
+ struct scsi_device *sdp = sdkp->device;
+ struct gendisk *disk = sdkp->disk;
+ u8 type = sdkp->protection_type;
+
+ /* If this HBA doesn't support DIX, resort to normal I/O or DIF */
+ if (scsi_host_dix_capable(sdp->host, type) == 0) {
+
+ if (type == SD_DIF_TYPE0_PROTECTION)
+ return;
+
+ if (scsi_host_dif_capable(sdp->host, type) == 0) {
+ sd_printk(KERN_INFO, sdkp, "Type %d protection " \
+ "unsupported by HBA. Disabling DIF.\n", type);
+ sdkp->protection_type = 0;
+ return;
+ }
+
+ sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n",
+ type);
+
+ return;
+ }
+
+ /* Enable DMA of protection information */
+ if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
+ if (type == SD_DIF_TYPE3_PROTECTION)
+ blk_integrity_register(disk, &dif_type3_integrity_ip);
+ else
+ blk_integrity_register(disk, &dif_type1_integrity_ip);
+ else
+ if (type == SD_DIF_TYPE3_PROTECTION)
+ blk_integrity_register(disk, &dif_type3_integrity_crc);
+ else
+ blk_integrity_register(disk, &dif_type1_integrity_crc);
+
+ sd_printk(KERN_INFO, sdkp,
+ "Enabling %s integrity protection\n", disk->integrity->name);
+
+ /* Signal to block layer that we support sector tagging */
+ if (type && sdkp->ATO) {
+ if (type == SD_DIF_TYPE3_PROTECTION)
+ disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
+ else
+ disk->integrity->tag_size = sizeof(u16);
+
+ sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n",
+ disk->integrity->tag_size);
+ }
+}
+
+/*
+ * DIF DMA operation magic decoder ring.
+ */
+void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
+{
+ int csum_convert, prot_op;
+
+ prot_op = 0;
+
+ /* Convert checksum? */
+ if (scsi_host_get_guard(scmd->device->host) != SHOST_DIX_GUARD_CRC)
+ csum_convert = 1;
+ else
+ csum_convert = 0;
+
+ switch (scmd->cmnd[0]) {
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ if (dif && dix)
+ if (csum_convert)
+ prot_op = SCSI_PROT_READ_CONVERT;
+ else
+ prot_op = SCSI_PROT_READ_PASS;
+ else if (dif && !dix)
+ prot_op = SCSI_PROT_READ_STRIP;
+ else if (!dif && dix)
+ prot_op = SCSI_PROT_READ_INSERT;
+
+ break;
+
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ if (dif && dix)
+ if (csum_convert)
+ prot_op = SCSI_PROT_WRITE_CONVERT;
+ else
+ prot_op = SCSI_PROT_WRITE_PASS;
+ else if (dif && !dix)
+ prot_op = SCSI_PROT_WRITE_INSERT;
+ else if (!dif && dix)
+ prot_op = SCSI_PROT_WRITE_STRIP;
+
+ break;
+ }
+
+ scsi_set_prot_op(scmd, prot_op);
+ scsi_set_prot_type(scmd, dif);
+}
+
+/*
+ * The virtual start sector is the one that was originally submitted
+ * by the block layer. Due to partitioning, MD/DM cloning, etc. the
+ * actual physical start sector is likely to be different. Remap
+ * protection information to match the physical LBA.
+ *
+ * From a protocol perspective there's a slight difference between
+ * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
+ * reference tag is seeded in the CDB. This gives us the potential to
+ * avoid virt->phys remapping during write. However, at read time we
+ * don't know whether the virt sector is the same as when we wrote it
+ * (we could be reading from real disk as opposed to MD/DM device. So
+ * we always remap Type 2 making it identical to Type 1.
+ *
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz)
+{
+ const int tuple_sz = sizeof(struct sd_dif_tuple);
+ struct bio *bio;
+ struct scsi_disk *sdkp;
+ struct sd_dif_tuple *sdt;
+ unsigned int i, j;
+ u32 phys, virt;
+
+ /* Already remapped? */
+ if (rq->cmd_flags & REQ_INTEGRITY)
+ return 0;
+
+ sdkp = rq->bio->bi_bdev->bd_disk->private_data;
+
+ if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
+ return 0;
+
+ rq->cmd_flags |= REQ_INTEGRITY;
+ phys = hw_sector & 0xffffffff;
+
+ __rq_for_each_bio(bio, rq) {
+ struct bio_vec *iv;
+
+ virt = bio->bi_integrity->bip_sector & 0xffffffff;
+
+ bip_for_each_vec(iv, bio->bi_integrity, i) {
+ sdt = kmap_atomic(iv->bv_page, KM_USER0)
+ + iv->bv_offset;
+
+ for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+
+ if (be32_to_cpu(sdt->ref_tag) != virt)
+ goto error;
+
+ sdt->ref_tag = cpu_to_be32(phys);
+ virt++;
+ phys++;
+ }
+
+ kunmap_atomic(sdt, KM_USER0);
+ }
+ }
+
+ return 0;
+
+error:
+ kunmap_atomic(sdt, KM_USER0);
+ sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n",
+ __func__, virt, phys, be32_to_cpu(sdt->ref_tag));
+
+ return -EIO;
+}
+
+/*
+ * Remap physical sector values in the reference tag to the virtual
+ * values expected by the block layer.
+ */
+void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
+{
+ const int tuple_sz = sizeof(struct sd_dif_tuple);
+ struct scsi_disk *sdkp;
+ struct bio *bio;
+ struct sd_dif_tuple *sdt;
+ unsigned int i, j, sectors, sector_sz;
+ u32 phys, virt;
+
+ sdkp = scsi_disk(scmd->request->rq_disk);
+
+ if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
+ return;
+
+ sector_sz = scmd->device->sector_size;
+ sectors = good_bytes / sector_sz;
+
+ phys = scmd->request->sector & 0xffffffff;
+ if (sector_sz == 4096)
+ phys >>= 3;
+
+ __rq_for_each_bio(bio, scmd->request) {
+ struct bio_vec *iv;
+
+ virt = bio->bi_integrity->bip_sector & 0xffffffff;
+
+ bip_for_each_vec(iv, bio->bi_integrity, i) {
+ sdt = kmap_atomic(iv->bv_page, KM_USER0)
+ + iv->bv_offset;
+
+ for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+
+ if (sectors == 0) {
+ kunmap_atomic(sdt, KM_USER0);
+ return;
+ }
+
+ if (be32_to_cpu(sdt->ref_tag) != phys &&
+ sdt->app_tag != 0xffff)
+ sdt->ref_tag = 0xffffffff; /* Bad ref */
+ else
+ sdt->ref_tag = cpu_to_be32(virt);
+
+ virt++;
+ phys++;
+ sectors--;
+ }
+
+ kunmap_atomic(sdt, KM_USER0);
+ }
+ }
+}
+
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0fe031f003e..1bcf3c33d7f 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -345,14 +345,14 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
return 0;
}
-#define VPD_INQUIRY_SIZE 512
+#define VPD_INQUIRY_SIZE 36
static void ses_match_to_enclosure(struct enclosure_device *edev,
struct scsi_device *sdev)
{
unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL);
unsigned char *desc;
- int len;
+ u16 vpd_len;
struct efd efd = {
.addr = 0,
};
@@ -372,9 +372,19 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
goto free;
- len = (buf[2] << 8) + buf[3];
+ vpd_len = (buf[2] << 8) + buf[3];
+ kfree(buf);
+ buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!buf)
+ return;
+ cmd[3] = vpd_len >> 8;
+ cmd[4] = vpd_len & 0xff;
+ if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
+ vpd_len, NULL, SES_TIMEOUT, SES_RETRIES))
+ goto free;
+
desc = buf + 4;
- while (desc < buf + len) {
+ while (desc < buf + vpd_len) {
enum scsi_protocol proto = desc[0] >> 4;
u8 code_set = desc[0] & 0x0f;
u8 piv = desc[1] & 0x80;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index d3b8ebb8377..3d36270a8b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1747,7 +1747,7 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
*/
flush_dcache_page(pages[i]);
/* ?? Is locking needed? I don't think so */
- /* if (TestSetPageLocked(pages[i]))
+ /* if (!trylock_page(pages[i]))
goto out_unlock; */
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 4684cc716aa..c2bb53e3d94 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/
-static const char *verstr = "20080224";
+static const char *verstr = "20080504";
#include <linux/module.h>
@@ -631,7 +631,7 @@ static int cross_eof(struct scsi_tape * STp, int forward)
/* Flush the write buffer (never need to write if variable blocksize). */
static int st_flush_write_buffer(struct scsi_tape * STp)
{
- int offset, transfer, blks;
+ int transfer, blks;
int result;
unsigned char cmd[MAX_COMMAND_SIZE];
struct st_request *SRpnt;
@@ -644,14 +644,10 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
result = 0;
if (STp->dirty == 1) {
- offset = (STp->buffer)->buffer_bytes;
- transfer = ((offset + STp->block_size - 1) /
- STp->block_size) * STp->block_size;
+ transfer = STp->buffer->buffer_bytes;
DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n",
tape_name(STp), transfer));
- memset((STp->buffer)->b_data + offset, 0, transfer - offset);
-
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = WRITE_6;
cmd[1] = 1;
@@ -1670,6 +1666,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (undone <= do_count) {
/* Only data from this write is not written */
count += undone;
+ b_point -= undone;
do_count -= undone;
if (STp->block_size)
blks = (transfer - undone) / STp->block_size;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index f308a030882..3790906a77d 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -467,7 +467,7 @@ stex_slave_alloc(struct scsi_device *sdev)
/* Cheat: usually extracted from Inquiry data */
sdev->tagged_supported = 1;
- scsi_activate_tcq(sdev, sdev->host->can_queue);
+ scsi_activate_tcq(sdev, ST_CMD_PER_LUN);
return 0;
}
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 2c87db98cdf..f9cf7015136 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/mm.h>
#include <linux/init.h>
#include <asm/irq.h>
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 22a6aae7869..98df1651404 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -5741,6 +5741,8 @@ void sym_hcb_free(struct sym_hcb *np)
for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
tp = &np->target[target];
+ if (tp->luntbl)
+ sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
#if SYM_CONF_MAX_LUN > 1
kfree(tp->lunmp);
#endif
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 5b04ddfed26..1723d71cbf3 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -452,7 +452,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
/* TODO: error handling */
if (pSRB->SGcount != 1)
error = 1;
- DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle));
+ DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle));
/* Map SG list */
} else if (scsi_sg_count(pcmd)) {
int nseg;
@@ -466,7 +466,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
if (nseg < 0)
error = 1;
DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
- __FUNCTION__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
+ __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
/* Map single segment */
} else
pSRB->SGcount = 0;
@@ -483,11 +483,11 @@ static void dc390_pci_unmap (struct dc390_srb* pSRB)
if (pSRB->SRBFlag) {
pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
- DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
+ DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle));
} else {
scsi_dma_unmap(pcmd);
DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
- __FUNCTION__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
+ __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
}
}
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index c975c01b3a0..d4c13561f4a 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -148,7 +148,7 @@
*
* 2002/10/04 - Alan Cox <alan@redhat.com>
*
- * Use dev_id for interrupts, kill __FUNCTION__ pasting
+ * Use dev_id for interrupts, kill __func__ pasting
* Add a lock for the scb pool, clean up all other cli/sti usage stuff
* Use the adapter lock for the other places we had the cli's
*
@@ -640,12 +640,12 @@ static int __init wd7000_setup(char *str)
(void) get_options(str, ARRAY_SIZE(ints), ints);
if (wd7000_card_num >= NUM_CONFIGS) {
- printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __FUNCTION__);
+ printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __func__);
return 0;
}
if ((ints[0] < 3) || (ints[0] > 5)) {
- printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __FUNCTION__);
+ printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __func__);
} else {
for (i = 0; i < NUM_IRQS; i++)
if (ints[1] == wd7000_irq[i])
@@ -1642,7 +1642,7 @@ static int wd7000_biosparam(struct scsi_device *sdev,
ip[2] = info[2];
if (info[0] == 255)
- printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __FUNCTION__);
+ printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __func__);
}
}
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 4b5f908d35c..3c4a300494a 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -68,11 +68,11 @@ lasi_scsi_clock(void * hpa, int defaultclock)
if (status == PDC_RET_OK) {
clock = (int) pdc_result[16];
} else {
- printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __FUNCTION__, status);
+ printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __func__, status);
clock = defaultclock;
}
- printk(KERN_DEBUG "%s: SCSI clock %d\n", __FUNCTION__, clock);
+ printk(KERN_DEBUG "%s: SCSI clock %d\n", __func__, clock);
return clock;
}
#endif
@@ -108,13 +108,13 @@ zalon_probe(struct parisc_device *dev)
*/
dev->irq = gsc_alloc_irq(&gsc_irq);
- printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __FUNCTION__,
+ printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __func__,
zalon_vers, dev->irq);
__raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM);
if (zalon_vers == 0)
- printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __FUNCTION__);
+ printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __func__);
memset(&device, 0, sizeof(struct ncr_device));