summaryrefslogtreecommitdiffstats
path: root/drivers/block/cciss.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-10 15:22:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-10 15:22:42 -0700
commit2f9e825d3e0e2b407ae8f082de5c00afcf7378fb (patch)
treef8b3ee40674ce4acd5508a0a0bf52a30904caf6c /drivers/block/cciss.c
parent7ae0dea900b027cd90e8a3e14deca9a19e17638b (diff)
parentde75d60d5ea235e6e09f4962ab22541ce0fe176a (diff)
Merge branch 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block: (149 commits) block: make sure that REQ_* types are seen even with CONFIG_BLOCK=n xen-blkfront: fix missing out label blkdev: fix blkdev_issue_zeroout return value block: update request stacking methods to support discards block: fix missing export of blk_types.h writeback: fix bad _bh spinlock nesting drbd: revert "delay probes", feature is being re-implemented differently drbd: Initialize all members of sync_conf to their defaults [Bugz 315] drbd: Disable delay probes for the upcomming release writeback: cleanup bdi_register writeback: add new tracepoints writeback: remove unnecessary init_timer call writeback: optimize periodic bdi thread wakeups writeback: prevent unnecessary bdi threads wakeups writeback: move bdi threads exiting logic to the forker thread writeback: restructure bdi forker loop a little writeback: move last_active to bdi writeback: do not remove bdi from bdi_list writeback: simplify bdi code a little writeback: do not lose wake-ups in bdi threads ... Fixed up pretty trivial conflicts in drivers/block/virtio_blk.c and drivers/scsi/scsi_error.c as per Jens.
Diffstat (limited to 'drivers/block/cciss.c')
-rw-r--r--drivers/block/cciss.c2165
1 files changed, 1281 insertions, 884 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index e1e7143ca1e..31064df1370 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -56,16 +56,14 @@
#include <linux/kthread.h>
#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
+#define DRIVER_NAME "HP CISS Driver (v 3.6.26)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26)
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
-MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
- " SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
- " Smart Array G2 Series SAS/SATA Controllers");
-MODULE_VERSION("3.6.20");
+MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
+MODULE_VERSION("3.6.26");
MODULE_LICENSE("GPL");
static int cciss_allow_hpsa;
@@ -107,6 +105,11 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
{0,}
};
@@ -146,6 +149,11 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324A103C, "Smart Array P712m", &SA5_access},
{0x324B103C, "Smart Array P711m", &SA5_access},
+ {0x3250103C, "Smart Array", &SA5_access},
+ {0x3251103C, "Smart Array", &SA5_access},
+ {0x3252103C, "Smart Array", &SA5_access},
+ {0x3253103C, "Smart Array", &SA5_access},
+ {0x3254103C, "Smart Array", &SA5_access},
};
/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -167,9 +175,13 @@ static DEFINE_MUTEX(scan_mutex);
static LIST_HEAD(scan_q);
static void do_cciss_request(struct request_queue *q);
-static irqreturn_t do_cciss_intr(int irq, void *dev_id);
+static irqreturn_t do_cciss_intx(int irq, void *dev_id);
+static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
static int cciss_open(struct block_device *bdev, fmode_t mode);
+static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
static int cciss_release(struct gendisk *disk, fmode_t mode);
+static int do_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -179,25 +191,23 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
static int deregister_disk(ctlr_info_t *h, int drv_index,
int clear_all, int via_ioctl);
-static void cciss_read_capacity(int ctlr, int logvol,
+static void cciss_read_capacity(ctlr_info_t *h, int logvol,
sector_t *total_size, unsigned int *block_size);
-static void cciss_read_capacity_16(int ctlr, int logvol,
+static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
sector_t *total_size, unsigned int *block_size);
-static void cciss_geometry_inquiry(int ctlr, int logvol,
+static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
sector_t total_size,
unsigned int block_size, InquiryData_struct *inq_buff,
drive_info_struct *drv);
-static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
- __u32);
+static void __devinit cciss_interrupt_mode(ctlr_info_t *);
static void start_io(ctlr_info_t *h);
-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
__u8 page_code, unsigned char scsi3addr[],
int cmd_type);
static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
int attempt_retry);
static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
-static void fail_all_cmds(unsigned long ctlr);
static int add_to_scan_list(struct ctlr_info *h);
static int scan_thread(void *data);
static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
@@ -205,11 +215,23 @@ static void cciss_hba_release(struct device *dev);
static void cciss_device_release(struct device *dev);
static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
+static inline u32 next_command(ctlr_info_t *h);
+static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
+ void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset);
+static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar);
+
+
+/* performant mode helper functions */
+static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
+ int *bucket_map);
+static void cciss_put_controller_into_performant_mode(ctlr_info_t *h);
#ifdef CONFIG_PROC_FS
-static void cciss_procinit(int i);
+static void cciss_procinit(ctlr_info_t *h);
#else
-static void cciss_procinit(int i)
+static void cciss_procinit(ctlr_info_t *h)
{
}
#endif /* CONFIG_PROC_FS */
@@ -221,9 +243,9 @@ static int cciss_compat_ioctl(struct block_device *, fmode_t,
static const struct block_device_operations cciss_fops = {
.owner = THIS_MODULE,
- .open = cciss_open,
+ .open = cciss_unlocked_open,
.release = cciss_release,
- .locked_ioctl = cciss_ioctl,
+ .ioctl = do_ioctl,
.getgeo = cciss_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = cciss_compat_ioctl,
@@ -231,6 +253,16 @@ static const struct block_device_operations cciss_fops = {
.revalidate_disk = cciss_revalidate,
};
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
+{
+ if (likely(h->transMethod == CFGTBL_Trans_Performant))
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+}
+
/*
* Enqueuing and dequeuing functions for cmdlists.
*/
@@ -257,6 +289,18 @@ static inline void removeQ(CommandList_struct *c)
hlist_del_init(&c->list);
}
+static void enqueue_cmd_and_start_io(ctlr_info_t *h,
+ CommandList_struct *c)
+{
+ unsigned long flags;
+ set_performant_mode(h, c);
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ start_io(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
int nr_cmds)
{
@@ -366,32 +410,31 @@ static void cciss_seq_show_header(struct seq_file *seq)
h->product_name,
(unsigned long)h->board_id,
h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
- h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
+ h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT],
h->num_luns,
h->Qdepth, h->commands_outstanding,
h->maxQsinceinit, h->max_outstanding, h->maxSG);
#ifdef CONFIG_CISS_SCSI_TAPE
- cciss_seq_tape_report(seq, h->ctlr);
+ cciss_seq_tape_report(seq, h);
#endif /* CONFIG_CISS_SCSI_TAPE */
}
static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
{
ctlr_info_t *h = seq->private;
- unsigned ctlr = h->ctlr;
unsigned long flags;
/* prevent displaying bogus info during configuration
* or deconfiguration of a logical volume
*/
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return ERR_PTR(-EBUSY);
}
h->busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (*pos == 0)
cciss_seq_show_header(seq);
@@ -499,7 +542,7 @@ cciss_proc_write(struct file *file, const char __user *buf,
struct seq_file *seq = file->private_data;
ctlr_info_t *h = seq->private;
- err = cciss_engage_scsi(h->ctlr);
+ err = cciss_engage_scsi(h);
if (err == 0)
err = length;
} else
@@ -522,7 +565,7 @@ static const struct file_operations cciss_proc_fops = {
.write = cciss_proc_write,
};
-static void __devinit cciss_procinit(int i)
+static void __devinit cciss_procinit(ctlr_info_t *h)
{
struct proc_dir_entry *pde;
@@ -530,9 +573,9 @@ static void __devinit cciss_procinit(int i)
proc_cciss = proc_mkdir("driver/cciss", NULL);
if (!proc_cciss)
return;
- pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
+ pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP |
S_IROTH, proc_cciss,
- &cciss_proc_fops, hba[i]);
+ &cciss_proc_fops, h);
}
#endif /* CONFIG_PROC_FS */
@@ -565,12 +608,12 @@ static ssize_t dev_show_unique_id(struct device *dev,
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring)
ret = -EBUSY;
else
memcpy(sn, drv->serial_no, sizeof(sn));
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (ret)
return ret;
@@ -595,12 +638,12 @@ static ssize_t dev_show_vendor(struct device *dev,
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring)
ret = -EBUSY;
else
memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (ret)
return ret;
@@ -619,12 +662,12 @@ static ssize_t dev_show_model(struct device *dev,
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring)
ret = -EBUSY;
else
memcpy(model, drv->model, MODEL_LEN + 1);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (ret)
return ret;
@@ -643,12 +686,12 @@ static ssize_t dev_show_rev(struct device *dev,
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring)
ret = -EBUSY;
else
memcpy(rev, drv->rev, REV_LEN + 1);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (ret)
return ret;
@@ -665,17 +708,17 @@ static ssize_t cciss_show_lunid(struct device *dev,
unsigned long flags;
unsigned char lunid[8];
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -EBUSY;
}
if (!drv->heads) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -ENOTTY;
}
memcpy(lunid, drv->LunID, sizeof(lunid));
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
lunid[0], lunid[1], lunid[2], lunid[3],
lunid[4], lunid[5], lunid[6], lunid[7]);
@@ -690,13 +733,13 @@ static ssize_t cciss_show_raid_level(struct device *dev,
int raid;
unsigned long flags;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -EBUSY;
}
raid = drv->raid_level;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (raid < 0 || raid > RAID_UNKNOWN)
raid = RAID_UNKNOWN;
@@ -713,13 +756,13 @@ static ssize_t cciss_show_usage_count(struct device *dev,
unsigned long flags;
int count;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -EBUSY;
}
count = drv->usage_count;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return snprintf(buf, 20, "%d\n", count);
}
static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
@@ -864,60 +907,70 @@ static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
- * which ones are free or in use. For operations that can wait for kmalloc
- * to possible sleep, this routine can be called with get_from_pool set to 0.
- * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
+ * which ones are free or in use.
*/
-static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
+static CommandList_struct *cmd_alloc(ctlr_info_t *h)
{
CommandList_struct *c;
int i;
u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
- if (!get_from_pool) {
- c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
- sizeof(CommandList_struct), &cmd_dma_handle);
- if (c == NULL)
+ do {
+ i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+ if (i == h->nr_cmds)
return NULL;
- memset(c, 0, sizeof(CommandList_struct));
+ } while (test_and_set_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+ c = h->cmd_pool + i;
+ memset(c, 0, sizeof(CommandList_struct));
+ cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
+ c->err_info = h->errinfo_pool + i;
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+ err_dma_handle = h->errinfo_pool_dhandle
+ + i * sizeof(ErrorInfo_struct);
+ h->nr_allocs++;
- c->cmdindex = -1;
+ c->cmdindex = i;
- c->err_info = (ErrorInfo_struct *)
- pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
- &err_dma_handle);
+ INIT_HLIST_NODE(&c->list);
+ c->busaddr = (__u32) cmd_dma_handle;
+ temp64.val = (__u64) err_dma_handle;
+ c->ErrDesc.Addr.lower = temp64.val32.lower;
+ c->ErrDesc.Addr.upper = temp64.val32.upper;
+ c->ErrDesc.Len = sizeof(ErrorInfo_struct);
- if (c->err_info == NULL) {
- pci_free_consistent(h->pdev,
- sizeof(CommandList_struct), c, cmd_dma_handle);
- return NULL;
- }
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- } else { /* get it out of the controllers pool */
-
- do {
- i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
- if (i == h->nr_cmds)
- return NULL;
- } while (test_and_set_bit
- (i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
-#endif
- c = h->cmd_pool + i;
- memset(c, 0, sizeof(CommandList_struct));
- cmd_dma_handle = h->cmd_pool_dhandle
- + i * sizeof(CommandList_struct);
- c->err_info = h->errinfo_pool + i;
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- err_dma_handle = h->errinfo_pool_dhandle
- + i * sizeof(ErrorInfo_struct);
- h->nr_allocs++;
+ c->ctlr = h->ctlr;
+ return c;
+}
- c->cmdindex = i;
+/* allocate a command using pci_alloc_consistent, used for ioctls,
+ * etc., not for the main i/o path.
+ */
+static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
+{
+ CommandList_struct *c;
+ u64bit temp64;
+ dma_addr_t cmd_dma_handle, err_dma_handle;
+
+ c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
+ sizeof(CommandList_struct), &cmd_dma_handle);
+ if (c == NULL)
+ return NULL;
+ memset(c, 0, sizeof(CommandList_struct));
+
+ c->cmdindex = -1;
+
+ c->err_info = (ErrorInfo_struct *)
+ pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
+ &err_dma_handle);
+
+ if (c->err_info == NULL) {
+ pci_free_consistent(h->pdev,
+ sizeof(CommandList_struct), c, cmd_dma_handle);
+ return NULL;
}
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
INIT_HLIST_NODE(&c->list);
c->busaddr = (__u32) cmd_dma_handle;
@@ -930,27 +983,26 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
return c;
}
-/*
- * Frees a command block that was previously allocated with cmd_alloc().
- */
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
{
int i;
+
+ i = c - h->cmd_pool;
+ clear_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG));
+ h->nr_frees++;
+}
+
+static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
+{
u64bit temp64;
- if (!got_from_pool) {
- temp64.val32.lower = c->ErrDesc.Addr.lower;
- temp64.val32.upper = c->ErrDesc.Addr.upper;
- pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
- c->err_info, (dma_addr_t) temp64.val);
- pci_free_consistent(h->pdev, sizeof(CommandList_struct),
- c, (dma_addr_t) c->busaddr);
- } else {
- i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
- h->nr_frees++;
- }
+ temp64.val32.lower = c->ErrDesc.Addr.lower;
+ temp64.val32.upper = c->ErrDesc.Addr.upper;
+ pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
+ c->err_info, (dma_addr_t) temp64.val);
+ pci_free_consistent(h->pdev, sizeof(CommandList_struct),
+ c, (dma_addr_t) c->busaddr);
}
static inline ctlr_info_t *get_host(struct gendisk *disk)
@@ -968,13 +1020,10 @@ static inline drive_info_struct *get_drv(struct gendisk *disk)
*/
static int cciss_open(struct block_device *bdev, fmode_t mode)
{
- ctlr_info_t *host = get_host(bdev->bd_disk);
+ ctlr_info_t *h = get_host(bdev->bd_disk);
drive_info_struct *drv = get_drv(bdev->bd_disk);
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
-#endif /* CCISS_DEBUG */
-
+ dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name);
if (drv->busy_configuring)
return -EBUSY;
/*
@@ -1000,29 +1049,39 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
return -EPERM;
}
drv->usage_count++;
- host->usage_count++;
+ h->usage_count++;
return 0;
}
+static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ lock_kernel();
+ ret = cciss_open(bdev, mode);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* Close. Sync first.
*/
static int cciss_release(struct gendisk *disk, fmode_t mode)
{
- ctlr_info_t *host = get_host(disk);
- drive_info_struct *drv = get_drv(disk);
-
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
-#endif /* CCISS_DEBUG */
+ ctlr_info_t *h;
+ drive_info_struct *drv;
+ lock_kernel();
+ h = get_host(disk);
+ drv = get_drv(disk);
+ dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name);
drv->usage_count--;
- host->usage_count--;
+ h->usage_count--;
+ unlock_kernel();
return 0;
}
-#ifdef CONFIG_COMPAT
-
static int do_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
@@ -1033,6 +1092,8 @@ static int do_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
}
+#ifdef CONFIG_COMPAT
+
static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg);
static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
@@ -1163,11 +1224,11 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c)
+static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c)
{
if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
- (void)check_for_unit_attention(host, c);
+ (void)check_for_unit_attention(h, c);
}
/*
* ioctl
@@ -1176,15 +1237,12 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
- ctlr_info_t *host = get_host(disk);
+ ctlr_info_t *h = get_host(disk);
drive_info_struct *drv = get_drv(disk);
- int ctlr = host->ctlr;
void __user *argp = (void __user *)arg;
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
-#endif /* CCISS_DEBUG */
-
+ dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n",
+ cmd, arg);
switch (cmd) {
case CCISS_GETPCIINFO:
{
@@ -1192,10 +1250,10 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
- pciinfo.domain = pci_domain_nr(host->pdev->bus);
- pciinfo.bus = host->pdev->bus->number;
- pciinfo.dev_fn = host->pdev->devfn;
- pciinfo.board_id = host->board_id;
+ pciinfo.domain = pci_domain_nr(h->pdev->bus);
+ pciinfo.bus = h->pdev->bus->number;
+ pciinfo.dev_fn = h->pdev->devfn;
+ pciinfo.board_id = h->board_id;
if (copy_to_user
(argp, &pciinfo, sizeof(cciss_pci_info_struct)))
return -EFAULT;
@@ -1207,9 +1265,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
intinfo.delay =
- readl(&host->cfgtable->HostWrite.CoalIntDelay);
+ readl(&h->cfgtable->HostWrite.CoalIntDelay);
intinfo.count =
- readl(&host->cfgtable->HostWrite.CoalIntCount);
+ readl(&h->cfgtable->HostWrite.CoalIntCount);
if (copy_to_user
(argp, &intinfo, sizeof(cciss_coalint_struct)))
return -EFAULT;
@@ -1229,26 +1287,23 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
(&intinfo, argp, sizeof(cciss_coalint_struct)))
return -EFAULT;
if ((intinfo.delay == 0) && (intinfo.count == 0))
- {
-// printk("cciss_ioctl: delay and count cannot be 0\n");
return -EINVAL;
- }
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
/* Update the field, and then ring the doorbell */
writel(intinfo.delay,
- &(host->cfgtable->HostWrite.CoalIntDelay));
+ &(h->cfgtable->HostWrite.CoalIntDelay));
writel(intinfo.count,
- &(host->cfgtable->HostWrite.CoalIntCount));
- writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+ &(h->cfgtable->HostWrite.CoalIntCount));
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(host->vaddr + SA5_DOORBELL)
+ if (!(readl(h->vaddr + SA5_DOORBELL)
& CFGTBL_ChangeReq))
break;
/* delay and try again */
udelay(1000);
}
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (i >= MAX_IOCTL_CONFIG_WAIT)
return -EAGAIN;
return 0;
@@ -1262,7 +1317,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
return -EINVAL;
for (i = 0; i < 16; i++)
NodeName[i] =
- readb(&host->cfgtable->ServerName[i]);
+ readb(&h->cfgtable->ServerName[i]);
if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
return -EFAULT;
return 0;
@@ -1282,23 +1337,23 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
(NodeName, argp, sizeof(NodeName_type)))
return -EFAULT;
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
/* Update the field, and then ring the doorbell */
for (i = 0; i < 16; i++)
writeb(NodeName[i],
- &host->cfgtable->ServerName[i]);
+ &h->cfgtable->ServerName[i]);
- writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(host->vaddr + SA5_DOORBELL)
+ if (!(readl(h->vaddr + SA5_DOORBELL)
& CFGTBL_ChangeReq))
break;
/* delay and try again */
udelay(1000);
}
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (i >= MAX_IOCTL_CONFIG_WAIT)
return -EAGAIN;
return 0;
@@ -1310,7 +1365,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
- heartbeat = readl(&host->cfgtable->HeartBeat);
+ heartbeat = readl(&h->cfgtable->HeartBeat);
if (copy_to_user
(argp, &heartbeat, sizeof(Heartbeat_type)))
return -EFAULT;
@@ -1322,7 +1377,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
- BusTypes = readl(&host->cfgtable->BusTypes);
+ BusTypes = readl(&h->cfgtable->BusTypes);
if (copy_to_user
(argp, &BusTypes, sizeof(BusTypes_type)))
return -EFAULT;
@@ -1334,7 +1389,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
- memcpy(firmware, host->firm_ver, 4);
+ memcpy(firmware, h->firm_ver, 4);
if (copy_to_user
(argp, firmware, sizeof(FirmwareVer_type)))
@@ -1357,7 +1412,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
case CCISS_DEREGDISK:
case CCISS_REGNEWD:
case CCISS_REVALIDVOLS:
- return rebuild_lun_table(host, 0, 1);
+ return rebuild_lun_table(h, 0, 1);
case CCISS_GETLUNINFO:{
LogvolInfo_struct luninfo;
@@ -1377,7 +1432,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
CommandList_struct *c;
char *buff = NULL;
u64bit temp64;
- unsigned long flags;
DECLARE_COMPLETION_ONSTACK(wait);
if (!arg)
@@ -1413,7 +1467,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
} else {
memset(buff, 0, iocommand.buf_size);
}
- if ((c = cmd_alloc(host, 0)) == NULL) {
+ c = cmd_special_alloc(h);
+ if (!c) {
kfree(buff);
return -ENOMEM;
}
@@ -1439,7 +1494,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0) {
- temp64.val = pci_map_single(host->pdev, buff,
+ temp64.val = pci_map_single(h->pdev, buff,
iocommand.buf_size,
PCI_DMA_BIDIRECTIONAL);
c->SG[0].Addr.lower = temp64.val32.lower;
@@ -1449,30 +1504,24 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
}
c->waiting = &wait;
- /* Put the request on the tail of the request queue */
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
- addQ(&host->reqQ, c);
- host->Qdepth++;
- start_io(host);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
+ enqueue_cmd_and_start_io(h, c);
wait_for_completion(&wait);
/* unlock the buffers from DMA */
temp64.val32.lower = c->SG[0].Addr.lower;
temp64.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
+ pci_unmap_single(h->pdev, (dma_addr_t) temp64.val,
iocommand.buf_size,
PCI_DMA_BIDIRECTIONAL);
- check_ioctl_unit_attention(host, c);
+ check_ioctl_unit_attention(h, c);
/* Copy the error information out */
iocommand.error_info = *(c->err_info);
if (copy_to_user
(argp, &iocommand, sizeof(IOCTL_Command_struct))) {
kfree(buff);
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
return -EFAULT;
}
@@ -1481,12 +1530,12 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (copy_to_user
(iocommand.buf, buff, iocommand.buf_size)) {
kfree(buff);
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
return -EFAULT;
}
}
kfree(buff);
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
return 0;
}
case CCISS_BIG_PASSTHRU:{
@@ -1495,7 +1544,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned char **buff = NULL;
int *buff_size = NULL;
u64bit temp64;
- unsigned long flags;
BYTE sg_used = 0;
int status = 0;
int i;
@@ -1569,7 +1617,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
data_ptr += sz;
sg_used++;
}
- if ((c = cmd_alloc(host, 0)) == NULL) {
+ c = cmd_special_alloc(h);
+ if (!c) {
status = -ENOMEM;
goto cleanup1;
}
@@ -1590,7 +1639,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (ioc->buf_size > 0) {
for (i = 0; i < sg_used; i++) {
temp64.val =
- pci_map_single(host->pdev, buff[i],
+ pci_map_single(h->pdev, buff[i],
buff_size[i],
PCI_DMA_BIDIRECTIONAL);
c->SG[i].Addr.lower =
@@ -1602,26 +1651,21 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
}
}
c->waiting = &wait;
- /* Put the request on the tail of the request queue */
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
- addQ(&host->reqQ, c);
- host->Qdepth++;
- start_io(host);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ enqueue_cmd_and_start_io(h, c);
wait_for_completion(&wait);
/* unlock the buffers from DMA */
for (i = 0; i < sg_used; i++) {
temp64.val32.lower = c->SG[i].Addr.lower;
temp64.val32.upper = c->SG[i].Addr.upper;
- pci_unmap_single(host->pdev,
+ pci_unmap_single(h->pdev,
(dma_addr_t) temp64.val, buff_size[i],
PCI_DMA_BIDIRECTIONAL);
}
- check_ioctl_unit_attention(host, c);
+ check_ioctl_unit_attention(h, c);
/* Copy the error information out */
ioc->error_info = *(c->err_info);
if (copy_to_user(argp, ioc, sizeof(*ioc))) {
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
status = -EFAULT;
goto cleanup1;
}
@@ -1631,14 +1675,14 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
for (i = 0; i < sg_used; i++) {
if (copy_to_user
(ptr, buff[i], buff_size[i])) {
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
status = -EFAULT;
goto cleanup1;
}
ptr += buff_size[i];
}
}
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
status = 0;
cleanup1:
if (buff) {
@@ -1726,26 +1770,26 @@ static void cciss_check_queues(ctlr_info_t *h)
static void cciss_softirq_done(struct request *rq)
{
- CommandList_struct *cmd = rq->completion_data;
- ctlr_info_t *h = hba[cmd->ctlr];
- SGDescriptor_struct *curr_sg = cmd->SG;
- unsigned long flags;
+ CommandList_struct *c = rq->completion_data;
+ ctlr_info_t *h = hba[c->ctlr];
+ SGDescriptor_struct *curr_sg = c->SG;
u64bit temp64;
+ unsigned long flags;
int i, ddir;
int sg_index = 0;
- if (cmd->Request.Type.Direction == XFER_READ)
+ if (c->Request.Type.Direction == XFER_READ)
ddir = PCI_DMA_FROMDEVICE;
else
ddir = PCI_DMA_TODEVICE;
/* command did not need to be retried */
/* unmap the DMA mapping for all the scatter gather elements */
- for (i = 0; i < cmd->Header.SGList; i++) {
+ for (i = 0; i < c->Header.SGList; i++) {
if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
- cciss_unmap_sg_chain_block(h, cmd);
+ cciss_unmap_sg_chain_block(h, c);
/* Point to the next block */
- curr_sg = h->cmd_sg_list[cmd->cmdindex];
+ curr_sg = h->cmd_sg_list[c->cmdindex];
sg_index = 0;
}
temp64.val32.lower = curr_sg[sg_index].Addr.lower;
@@ -1755,18 +1799,16 @@ static void cciss_softirq_done(struct request *rq)
++sg_index;
}
-#ifdef CCISS_DEBUG
- printk("Done with %p\n", rq);
-#endif /* CCISS_DEBUG */
+ dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
/* set the residual count for pc requests */
- if (blk_pc_request(rq))
- rq->resid_len = cmd->err_info->ResidualCnt;
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ rq->resid_len = c->err_info->ResidualCnt;
blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
spin_lock_irqsave(&h->lock, flags);
- cmd_free(h, cmd, 1);
+ cmd_free(h, c);
cciss_check_queues(h);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -1782,7 +1824,7 @@ static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
* via the inquiry page 0. Model, vendor, and rev are set to empty strings if
* they cannot be read.
*/
-static void cciss_get_device_descr(int ctlr, int logvol,
+static void cciss_get_device_descr(ctlr_info_t *h, int logvol,
char *vendor, char *model, char *rev)
{
int rc;
@@ -1797,8 +1839,8 @@ static void cciss_get_device_descr(int ctlr, int logvol,
if (!inq_buf)
return;
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf, sizeof(*inq_buf), 0,
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0,
scsi3addr, TYPE_CMD);
if (rc == IO_OK) {
memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
@@ -1818,7 +1860,7 @@ static void cciss_get_device_descr(int ctlr, int logvol,
* number cannot be had, for whatever reason, 16 bytes of 0xff
* are returned instead.
*/
-static void cciss_get_serial_no(int ctlr, int logvol,
+static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
unsigned char *serial_no, int buflen)
{
#define PAGE_83_INQ_BYTES 64
@@ -1833,8 +1875,8 @@ static void cciss_get_serial_no(int ctlr, int logvol,
if (!buf)
return;
memset(serial_no, 0, buflen);
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ rc = sendcmd_withirq(h, CISS_INQUIRY, buf,
PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
if (rc == IO_OK)
memcpy(serial_no, &buf[8], buflen);
@@ -1900,10 +1942,9 @@ init_queue_failure:
* is also the controller node. Any changes to disk 0 will show up on
* the next reboot.
*/
-static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
- int via_ioctl)
+static void cciss_update_drive_info(ctlr_info_t *h, int drv_index,
+ int first_time, int via_ioctl)
{
- ctlr_info_t *h = hba[ctlr];
struct gendisk *disk;
InquiryData_struct *inq_buff = NULL;
unsigned int block_size;
@@ -1920,16 +1961,16 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
/* testing to see if 16-byte CDBs are already being used */
if (h->cciss_read == CCISS_READ_16) {
- cciss_read_capacity_16(h->ctlr, drv_index,
+ cciss_read_capacity_16(h, drv_index,
&total_size, &block_size);
} else {
- cciss_read_capacity(ctlr, drv_index, &total_size, &block_size);
+ cciss_read_capacity(h, drv_index, &total_size, &block_size);
/* if read_capacity returns all F's this volume is >2TB */
/* in size so we switch to 16-byte CDB's for all */
/* read/write ops */
if (total_size == 0xFFFFFFFFULL) {
- cciss_read_capacity_16(ctlr, drv_index,
+ cciss_read_capacity_16(h, drv_index,
&total_size, &block_size);
h->cciss_read = CCISS_READ_16;
h->cciss_write = CCISS_WRITE_16;
@@ -1939,14 +1980,14 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
}
}
- cciss_geometry_inquiry(ctlr, drv_index, total_size, block_size,
+ cciss_geometry_inquiry(h, drv_index, total_size, block_size,
inq_buff, drvinfo);
drvinfo->block_size = block_size;
drvinfo->nr_blocks = total_size + 1;
- cciss_get_device_descr(ctlr, drv_index, drvinfo->vendor,
+ cciss_get_device_descr(h, drv_index, drvinfo->vendor,
drvinfo->model, drvinfo->rev);
- cciss_get_serial_no(ctlr, drv_index, drvinfo->serial_no,
+ cciss_get_serial_no(h, drv_index, drvinfo->serial_no,
sizeof(drvinfo->serial_no));
/* Save the lunid in case we deregister the disk, below. */
memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
@@ -1971,10 +2012,10 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
* (unless it's the first disk (for the controller node).
*/
if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
- printk(KERN_WARNING "disk %d has changed.\n", drv_index);
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index);
+ spin_lock_irqsave(&h->lock, flags);
h->drv[drv_index]->busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
/* deregister_disk sets h->drv[drv_index]->queue = NULL
* which keeps the interrupt handler from starting
@@ -2024,8 +2065,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
if (cciss_add_disk(h, disk, drv_index) != 0) {
cciss_free_gendisk(h, drv_index);
cciss_free_drive_info(h, drv_index);
- printk(KERN_WARNING "cciss:%d could not update "
- "disk %d\n", h->ctlr, drv_index);
+ dev_warn(&h->pdev->dev, "could not update disk %d\n",
+ drv_index);
--h->num_luns;
}
}
@@ -2035,7 +2076,7 @@ freeret:
kfree(drvinfo);
return;
mem_msg:
- printk(KERN_ERR "cciss: out of memory\n");
+ dev_err(&h->pdev->dev, "out of memory\n");
goto freeret;
}
@@ -2127,9 +2168,9 @@ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
h->gendisk[drv_index] =
alloc_disk(1 << NWD_SHIFT);
if (!h->gendisk[drv_index]) {
- printk(KERN_ERR "cciss%d: could not "
- "allocate a new disk %d\n",
- h->ctlr, drv_index);
+ dev_err(&h->pdev->dev,
+ "could not allocate a new disk %d\n",
+ drv_index);
goto err_free_drive_info;
}
}
@@ -2180,8 +2221,7 @@ static void cciss_add_controller_node(ctlr_info_t *h)
cciss_free_gendisk(h, drv_index);
cciss_free_drive_info(h, drv_index);
error:
- printk(KERN_WARNING "cciss%d: could not "
- "add disk 0.\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "could not add disk 0.\n");
return;
}
@@ -2196,7 +2236,6 @@ error:
static int rebuild_lun_table(ctlr_info_t *h, int first_time,
int via_ioctl)
{
- int ctlr = h->ctlr;
int num_luns;
ReportLunData_struct *ld_buff = NULL;
int return_code;
@@ -2211,27 +2250,27 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
return -EPERM;
/* Set busy_configuring flag for this operation */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -EBUSY;
}
h->busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
if (ld_buff == NULL)
goto mem_msg;
- return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
+ return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff,
sizeof(ReportLunData_struct),
0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK)
listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
else { /* reading number of logical volumes failed */
- printk(KERN_WARNING "cciss: report logical volume"
- " command failed\n");
+ dev_warn(&h->pdev->dev,
+ "report logical volume command failed\n");
listlength = 0;
goto freeret;
}
@@ -2239,7 +2278,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
num_luns = listlength / 8; /* 8 bytes per entry */
if (num_luns > CISS_MAX_LUN) {
num_luns = CISS_MAX_LUN;
- printk(KERN_WARNING "cciss: more luns configured"
+ dev_warn(&h->pdev->dev, "more luns configured"
" on controller than can be handled by"
" this driver.\n");
}
@@ -2270,9 +2309,9 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
}
if (!drv_found) {
/* Deregister it from the OS, it's gone. */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
h->drv[i]->busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return_code = deregister_disk(h, i, 1, via_ioctl);
if (h->drv[i] != NULL)
h->drv[i]->busy_configuring = 0;
@@ -2311,8 +2350,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
if (drv_index == -1)
goto freeret;
}
- cciss_update_drive_info(ctlr, drv_index, first_time,
- via_ioctl);
+ cciss_update_drive_info(h, drv_index, first_time, via_ioctl);
} /* end for */
freeret:
@@ -2324,7 +2362,7 @@ freeret:
*/
return -1;
mem_msg:
- printk(KERN_ERR "cciss: out of memory\n");
+ dev_err(&h->pdev->dev, "out of memory\n");
h->busy_configuring = 0;
goto freeret;
}
@@ -2444,11 +2482,10 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
return 0;
}
-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
size_t size, __u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
- ctlr_info_t *h = hba[ctlr];
u64bit buff_dma_handle;
int status = IO_OK;
@@ -2532,8 +2569,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
c->Request.Timeout = 0;
break;
default:
- printk(KERN_WARNING
- "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
+ dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd);
return IO_ERROR;
}
} else if (cmd_type == TYPE_MSG) {
@@ -2565,13 +2601,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
c->Request.CDB[0] = cmd;
break;
default:
- printk(KERN_WARNING
- "cciss%d: unknown message type %d\n", ctlr, cmd);
+ dev_warn(&h->pdev->dev,
+ "unknown message type %d\n", cmd);
return IO_ERROR;
}
} else {
- printk(KERN_WARNING
- "cciss%d: unknown command type %d\n", ctlr, cmd_type);
+ dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
return IO_ERROR;
}
/* Fill in the scatter gather information */
@@ -2599,15 +2634,14 @@ static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
default:
if (check_for_unit_attention(h, c))
return IO_NEEDS_RETRY;
- printk(KERN_WARNING "cciss%d: cmd 0x%02x "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x "
"check condition, sense key = 0x%02x\n",
- h->ctlr, c->Request.CDB[0],
- c->err_info->SenseInfo[2]);
+ c->Request.CDB[0], c->err_info->SenseInfo[2]);
}
break;
default:
- printk(KERN_WARNING "cciss%d: cmd 0x%02x"
- "scsi status = 0x%02x\n", h->ctlr,
+ dev_warn(&h->pdev->dev, "cmd 0x%02x"
+ "scsi status = 0x%02x\n",
c->Request.CDB[0], c->err_info->ScsiStatus);
break;
}
@@ -2630,43 +2664,42 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
/* expected for inquiry and report lun commands */
break;
case CMD_INVALID:
- printk(KERN_WARNING "cciss: cmd 0x%02x is "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x is "
"reported invalid\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_PROTOCOL_ERR:
- printk(KERN_WARNING "cciss: cmd 0x%02x has "
- "protocol error \n", c->Request.CDB[0]);
+ dev_warn(&h->pdev->dev, "cmd 0x%02x has "
+ "protocol error\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_HARDWARE_ERR:
- printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x had "
" hardware error\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_CONNECTION_LOST:
- printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x had "
"connection lost\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_ABORTED:
- printk(KERN_WARNING "cciss: cmd 0x%02x was "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x was "
"aborted\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_ABORT_FAILED:
- printk(KERN_WARNING "cciss: cmd 0x%02x reports "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x reports "
"abort failed\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_UNSOLICITED_ABORT:
- printk(KERN_WARNING
- "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
+ dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n",
c->Request.CDB[0]);
return_status = IO_NEEDS_RETRY;
break;
default:
- printk(KERN_WARNING "cciss: cmd 0x%02x returned "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
"unknown status %x\n", c->Request.CDB[0],
c->err_info->CommandStatus);
return_status = IO_ERROR;
@@ -2679,17 +2712,11 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
{
DECLARE_COMPLETION_ONSTACK(wait);
u64bit buff_dma_handle;
- unsigned long flags;
int return_status = IO_OK;
resend_cmd2:
c->waiting = &wait;
- /* Put the request on the tail of the queue and send it */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- start_io(h);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ enqueue_cmd_and_start_io(h, c);
wait_for_completion(&wait);
@@ -2700,7 +2727,7 @@ resend_cmd2:
if (return_status == IO_NEEDS_RETRY &&
c->retry_count < MAX_CMD_RETRIES) {
- printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
+ dev_warn(&h->pdev->dev, "retrying 0x%02x\n",
c->Request.CDB[0]);
c->retry_count++;
/* erase the old error information */
@@ -2719,27 +2746,26 @@ command_done:
return return_status;
}
-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
__u8 page_code, unsigned char scsi3addr[],
int cmd_type)
{
- ctlr_info_t *h = hba[ctlr];
CommandList_struct *c;
int return_status;
- c = cmd_alloc(h, 0);
+ c = cmd_special_alloc(h);
if (!c)
return -ENOMEM;
- return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+ return_status = fill_cmd(h, c, cmd, buff, size, page_code,
scsi3addr, cmd_type);
if (return_status == IO_OK)
return_status = sendcmd_withirq_core(h, c, 1);
- cmd_free(h, c, 0);
+ cmd_special_free(h, c);
return return_status;
}
-static void cciss_geometry_inquiry(int ctlr, int logvol,
+static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
sector_t total_size,
unsigned int block_size,
InquiryData_struct *inq_buff,
@@ -2750,13 +2776,13 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
unsigned char scsi3addr[8];
memset(inq_buff, 0, sizeof(InquiryData_struct));
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD);
if (return_code == IO_OK) {
if (inq_buff->data_byte[8] == 0xFF) {
- printk(KERN_WARNING
- "cciss: reading geometry failed, volume "
+ dev_warn(&h->pdev->dev,
+ "reading geometry failed, volume "
"does not support reading geometry\n");
drv->heads = 255;
drv->sectors = 32; /* Sectors per track */
@@ -2780,12 +2806,12 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
drv->cylinders = real_size;
}
} else { /* Get geometry failed */
- printk(KERN_WARNING "cciss: reading geometry failed\n");
+ dev_warn(&h->pdev->dev, "reading geometry failed\n");
}
}
static void
-cciss_read_capacity(int ctlr, int logvol, sector_t *total_size,
+cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size,
unsigned int *block_size)
{
ReadCapdata_struct *buf;
@@ -2794,25 +2820,25 @@ cciss_read_capacity(int ctlr, int logvol, sector_t *total_size,
buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
if (!buf) {
- printk(KERN_WARNING "cciss: out of memory\n");
+ dev_warn(&h->pdev->dev, "out of memory\n");
return;
}
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, buf,
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf,
sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD);
if (return_code == IO_OK) {
*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
} else { /* read capacity command failed */
- printk(KERN_WARNING "cciss: read capacity failed\n");
+ dev_warn(&h->pdev->dev, "read capacity failed\n");
*total_size = 0;
*block_size = BLOCK_SIZE;
}
kfree(buf);
}
-static void cciss_read_capacity_16(int ctlr, int logvol,
+static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
sector_t *total_size, unsigned int *block_size)
{
ReadCapdata_struct_16 *buf;
@@ -2821,23 +2847,23 @@ static void cciss_read_capacity_16(int ctlr, int logvol,
buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
if (!buf) {
- printk(KERN_WARNING "cciss: out of memory\n");
+ dev_warn(&h->pdev->dev, "out of memory\n");
return;
}
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
- ctlr, buf, sizeof(ReadCapdata_struct_16),
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16,
+ buf, sizeof(ReadCapdata_struct_16),
0, scsi3addr, TYPE_CMD);
if (return_code == IO_OK) {
*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
} else { /* read capacity command failed */
- printk(KERN_WARNING "cciss: read capacity failed\n");
+ dev_warn(&h->pdev->dev, "read capacity failed\n");
*total_size = 0;
*block_size = BLOCK_SIZE;
}
- printk(KERN_INFO " blocks= %llu block_size= %d\n",
+ dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n",
(unsigned long long)*total_size+1, *block_size);
kfree(buf);
}
@@ -2865,17 +2891,17 @@ static int cciss_revalidate(struct gendisk *disk)
inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
if (inq_buff == NULL) {
- printk(KERN_WARNING "cciss: out of memory\n");
+ dev_warn(&h->pdev->dev, "out of memory\n");
return 1;
}
if (h->cciss_read == CCISS_READ_10) {
- cciss_read_capacity(h->ctlr, logvol,
+ cciss_read_capacity(h, logvol,
&total_size, &block_size);
} else {
- cciss_read_capacity_16(h->ctlr, logvol,
+ cciss_read_capacity_16(h, logvol,
&total_size, &block_size);
}
- cciss_geometry_inquiry(h->ctlr, logvol, total_size, block_size,
+ cciss_geometry_inquiry(h, logvol, total_size, block_size,
inq_buff, drv);
blk_queue_logical_block_size(drv->queue, drv->block_size);
@@ -2909,7 +2935,7 @@ static void start_io(ctlr_info_t *h)
c = hlist_entry(h->reqQ.first, CommandList_struct, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
- printk(KERN_WARNING "cciss: fifo full\n");
+ dev_warn(&h->pdev->dev, "fifo full\n");
break;
}
@@ -2925,7 +2951,7 @@ static void start_io(ctlr_info_t *h)
}
}
-/* Assumes that CCISS_LOCK(h->ctlr) is held. */
+/* Assumes that h->lock is held. */
/* Zeros out the error record and then resends the command back */
/* to the controller */
static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
@@ -2966,7 +2992,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
driver_byte = DRIVER_OK;
msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
- if (blk_pc_request(cmd->rq))
+ if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC)
host_byte = DID_PASSTHROUGH;
else
host_byte = DID_OK;
@@ -2975,8 +3001,8 @@ static inline int evaluate_target_status(ctlr_info_t *h,
host_byte, driver_byte);
if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
- if (!blk_pc_request(cmd->rq))
- printk(KERN_WARNING "cciss: cmd %p "
+ if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)
+ dev_warn(&h->pdev->dev, "cmd %p "
"has SCSI Status 0x%x\n",
cmd, cmd->err_info->ScsiStatus);
return error_value;
@@ -2985,17 +3011,19 @@ static inline int evaluate_target_status(ctlr_info_t *h,
/* check the sense key */
sense_key = 0xf & cmd->err_info->SenseInfo[2];
/* no status or recovered error */
- if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
+ if (((sense_key == 0x0) || (sense_key == 0x1)) &&
+ (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC))
error_value = 0;
if (check_for_unit_attention(h, cmd)) {
- *retry_cmd = !blk_pc_request(cmd->rq);
+ *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC);
return 0;
}
- if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
+ /* Not SG_IO or similar? */
+ if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) {
if (error_value != 0)
- printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
+ dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
" sense key = 0x%x\n", cmd, sense_key);
return error_value;
}
@@ -3035,90 +3063,97 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
break;
case CMD_DATA_UNDERRUN:
- if (blk_fs_request(cmd->rq)) {
- printk(KERN_WARNING "cciss: cmd %p has"
+ if (cmd->rq->cmd_type == REQ_TYPE_FS) {
+ dev_warn(&h->pdev->dev, "cmd %p has"
" completed with data underrun "
"reported\n", cmd);
cmd->rq->resid_len = cmd->err_info->ResidualCnt;
}
break;
case CMD_DATA_OVERRUN:
- if (blk_fs_request(cmd->rq))
- printk(KERN_WARNING "cciss: cmd %p has"
+ if (cmd->rq->cmd_type == REQ_TYPE_FS)
+ dev_warn(&h->pdev->dev, "cciss: cmd %p has"
" completed with data overrun "
"reported\n", cmd);
break;
case CMD_INVALID:
- printk(KERN_WARNING "cciss: cmd %p is "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p is "
"reported invalid\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_PROTOCOL_ERR:
- printk(KERN_WARNING "cciss: cmd %p has "
- "protocol error \n", cmd);
+ dev_warn(&h->pdev->dev, "cciss: cmd %p has "
+ "protocol error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_HARDWARE_ERR:
- printk(KERN_WARNING "cciss: cmd %p had "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p had "
" hardware error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_CONNECTION_LOST:
- printk(KERN_WARNING "cciss: cmd %p had "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p had "
"connection lost\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_ABORTED:
- printk(KERN_WARNING "cciss: cmd %p was "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p was "
"aborted\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ABORT);
break;
case CMD_ABORT_FAILED:
- printk(KERN_WARNING "cciss: cmd %p reports "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p reports "
"abort failed\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_UNSOLICITED_ABORT:
- printk(KERN_WARNING "cciss%d: unsolicited "
+ dev_warn(&h->pdev->dev, "cciss%d: unsolicited "
"abort %p\n", h->ctlr, cmd);
if (cmd->retry_count < MAX_CMD_RETRIES) {
retry_cmd = 1;
- printk(KERN_WARNING
- "cciss%d: retrying %p\n", h->ctlr, cmd);
+ dev_warn(&h->pdev->dev, "retrying %p\n", cmd);
cmd->retry_count++;
} else
- printk(KERN_WARNING
- "cciss%d: %p retried too "
- "many times\n", h->ctlr, cmd);
+ dev_warn(&h->pdev->dev,
+ "%p retried too many times\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ABORT);
break;
case CMD_TIMEOUT:
- printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
+ dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
default:
- printk(KERN_WARNING "cciss: cmd %p returned "
+ dev_warn(&h->pdev->dev, "cmd %p returned "
"unknown status %x\n", cmd,
cmd->err_info->CommandStatus);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
}
after_error_processing:
@@ -3132,6 +3167,34 @@ after_error_processing:
blk_complete_request(cmd->rq);
}
+static inline u32 cciss_tag_contains_index(u32 tag)
+{
+#define DIRECT_LOOKUP_BIT 0x10
+ return tag & DIRECT_LOOKUP_BIT;
+}
+
+static inline u32 cciss_tag_to_index(u32 tag)
+{
+#define DIRECT_LOOKUP_SHIFT 5
+ return tag >> DIRECT_LOOKUP_SHIFT;
+}
+
+static inline u32 cciss_tag_discard_error_bits(u32 tag)
+{
+#define CCISS_ERROR_BITS 0x03
+ return tag & ~CCISS_ERROR_BITS;
+}
+
+static inline void cciss_mark_tag_indexed(u32 *tag)
+{
+ *tag |= DIRECT_LOOKUP_BIT;
+}
+
+static inline void cciss_set_tag_index(u32 *tag, u32 index)
+{
+ *tag |= (index << DIRECT_LOOKUP_SHIFT);
+}
+
/*
* Get a request and submit it to the controller.
*/
@@ -3163,7 +3226,8 @@ static void do_cciss_request(struct request_queue *q)
BUG_ON(creq->nr_phys_segments > h->maxsgentries);
- if ((c = cmd_alloc(h, 1)) == NULL)
+ c = cmd_alloc(h);
+ if (!c)
goto full;
blk_start_request(creq);
@@ -3180,8 +3244,8 @@ static void do_cciss_request(struct request_queue *q)
/* got command from pool, so use the command block index instead */
/* for direct lookups. */
/* The first 2 bits are reserved for controller error reporting. */
- c->Header.Tag.lower = (c->cmdindex << 3);
- c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
+ cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex);
+ cciss_mark_tag_indexed(&c->Header.Tag.lower);
memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */
c->Request.Type.Type = TYPE_CMD; /* It is a command. */
@@ -3192,11 +3256,8 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[0] =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
start_blk = blk_rq_pos(creq);
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
+ dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n",
(int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
-#endif /* CCISS_DEBUG */
-
sg_init_table(tmp_sg, h->maxsgentries);
seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -3236,17 +3297,18 @@ static void do_cciss_request(struct request_queue *q)
if (seg > h->maxSG)
h->maxSG = seg;
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: Submitting %ld sectors in %d segments "
+ dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments "
"chained[%d]\n",
blk_rq_sectors(creq), seg, chained);
-#endif /* CCISS_DEBUG */
- c->Header.SGList = c->Header.SGTotal = seg + chained;
- if (seg > h->max_cmd_sgentries)
+ c->Header.SGTotal = seg + chained;
+ if (seg <= h->max_cmd_sgentries)
+ c->Header.SGList = c->Header.SGTotal;
+ else
c->Header.SGList = h->max_cmd_sgentries;
+ set_performant_mode(h, c);
- if (likely(blk_fs_request(creq))) {
+ if (likely(creq->cmd_type == REQ_TYPE_FS)) {
if(h->cciss_read == CCISS_READ_10) {
c->Request.CDB[1] = 0;
c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
@@ -3276,11 +3338,12 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
c->Request.CDB[14] = c->Request.CDB[15] = 0;
}
- } else if (blk_pc_request(creq)) {
+ } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
c->Request.CDBLen = creq->cmd_len;
memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
} else {
- printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
+ dev_warn(&h->pdev->dev, "bad request type %d\n",
+ creq->cmd_type);
BUG();
}
@@ -3313,72 +3376,131 @@ static inline int interrupt_pending(ctlr_info_t *h)
static inline long interrupt_not_for_us(ctlr_info_t *h)
{
- return (((h->access.intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0)));
+ return ((h->access.intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0));
}
-static irqreturn_t do_cciss_intr(int irq, void *dev_id)
+static inline int bad_tag(ctlr_info_t *h, u32 tag_index,
+ u32 raw_tag)
{
- ctlr_info_t *h = dev_id;
+ if (unlikely(tag_index >= h->nr_cmds)) {
+ dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c,
+ u32 raw_tag)
+{
+ removeQ(c);
+ if (likely(c->cmd_type == CMD_RWREQ))
+ complete_command(h, c, 0);
+ else if (c->cmd_type == CMD_IOCTL_PEND)
+ complete(c->waiting);
+#ifdef CONFIG_CISS_SCSI_TAPE
+ else if (c->cmd_type == CMD_SCSI)
+ complete_scsi_command(c, 0, raw_tag);
+#endif
+}
+
+static inline u32 next_command(ctlr_info_t *h)
+{
+ u32 a;
+
+ if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
+ return h->access.command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+ (h->reply_pool_head)++;
+ h->commands_outstanding--;
+ } else {
+ a = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+ h->reply_pool_head = h->reply_pool;
+ h->reply_pool_wraparound ^= 1;
+ }
+ return a;
+}
+
+/* process completion of an indexed ("direct lookup") command */
+static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
+{
+ u32 tag_index;
CommandList_struct *c;
+
+ tag_index = cciss_tag_to_index(raw_tag);
+ if (bad_tag(h, tag_index, raw_tag))
+ return next_command(h);
+ c = h->cmd_pool + tag_index;
+ finish_cmd(h, c, raw_tag);
+ return next_command(h);
+}
+
+/* process completion of a non-indexed command */
+static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
+{
+ u32 tag;
+ CommandList_struct *c = NULL;
+ struct hlist_node *tmp;
+ __u32 busaddr_masked, tag_masked;
+
+ tag = cciss_tag_discard_error_bits(raw_tag);
+ hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ busaddr_masked = cciss_tag_discard_error_bits(c->busaddr);
+ tag_masked = cciss_tag_discard_error_bits(tag);
+ if (busaddr_masked == tag_masked) {
+ finish_cmd(h, c, raw_tag);
+ return next_command(h);
+ }
+ }
+ bad_tag(h, h->nr_cmds + 1, raw_tag);
+ return next_command(h);
+}
+
+static irqreturn_t do_cciss_intx(int irq, void *dev_id)
+{
+ ctlr_info_t *h = dev_id;
unsigned long flags;
- __u32 a, a1, a2;
+ u32 raw_tag;
if (interrupt_not_for_us(h))
return IRQ_NONE;
- /*
- * If there are completed commands in the completion queue,
- * we had better do something about it.
- */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
while (interrupt_pending(h)) {
- while ((a = get_next_completion(h)) != FIFO_EMPTY) {
- a1 = a;
- if ((a & 0x04)) {
- a2 = (a >> 3);
- if (a2 >= h->nr_cmds) {
- printk(KERN_WARNING
- "cciss: controller cciss%d failed, stopping.\n",
- h->ctlr);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
- fail_all_cmds(h->ctlr);
- return IRQ_HANDLED;
- }
-
- c = h->cmd_pool + a2;
- a = c->busaddr;
-
- } else {
- struct hlist_node *tmp;
-
- a &= ~3;
- c = NULL;
- hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
- if (c->busaddr == a)
- break;
- }
- }
- /*
- * If we've found the command, take it off the
- * completion Q and free it
- */
- if (c && c->busaddr == a) {
- removeQ(c);
- if (c->cmd_type == CMD_RWREQ) {
- complete_command(h, c, 0);
- } else if (c->cmd_type == CMD_IOCTL_PEND) {
- complete(c->waiting);
- }
-# ifdef CONFIG_CISS_SCSI_TAPE
- else if (c->cmd_type == CMD_SCSI)
- complete_scsi_command(c, 0, a1);
-# endif
- continue;
- }
+ raw_tag = get_next_completion(h);
+ while (raw_tag != FIFO_EMPTY) {
+ if (cciss_tag_contains_index(raw_tag))
+ raw_tag = process_indexed_cmd(h, raw_tag);
+ else
+ raw_tag = process_nonindexed_cmd(h, raw_tag);
}
}
+ spin_unlock_irqrestore(&h->lock, flags);
+ return IRQ_HANDLED;
+}
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never
+ * check the interrupt pending register because it is not set.
+ */
+static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id)
+{
+ ctlr_info_t *h = dev_id;
+ unsigned long flags;
+ u32 raw_tag;
+
+ spin_lock_irqsave(&h->lock, flags);
+ raw_tag = get_next_completion(h);
+ while (raw_tag != FIFO_EMPTY) {
+ if (cciss_tag_contains_index(raw_tag))
+ raw_tag = process_indexed_cmd(h, raw_tag);
+ else
+ raw_tag = process_nonindexed_cmd(h, raw_tag);
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
@@ -3510,18 +3632,17 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
switch (c->err_info->SenseInfo[12]) {
case STATE_CHANGED:
- printk(KERN_WARNING "cciss%d: a state change "
- "detected, command retried\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "a state change "
+ "detected, command retried\n");
return 1;
break;
case LUN_FAILED:
- printk(KERN_WARNING "cciss%d: LUN failure "
- "detected, action required\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "LUN failure "
+ "detected, action required\n");
return 1;
break;
case REPORT_LUNS_CHANGED:
- printk(KERN_WARNING "cciss%d: report LUN data "
- "changed\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "report LUN data changed\n");
/*
* Here, we could call add_to_scan_list and wake up the scan thread,
* except that it's quite likely that we will get more than one
@@ -3541,19 +3662,18 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
return 1;
break;
case POWER_OR_RESET:
- printk(KERN_WARNING "cciss%d: a power on "
- "or device reset detected\n", h->ctlr);
+ dev_warn(&h->pdev->dev,
+ "a power on or device reset detected\n");
return 1;
break;
case UNIT_ATTENTION_CLEARED:
- printk(KERN_WARNING "cciss%d: unit attention "
- "cleared by another initiator\n", h->ctlr);
+ dev_warn(&h->pdev->dev,
+ "unit attention cleared by another initiator\n");
return 1;
break;
default:
- printk(KERN_WARNING "cciss%d: unknown "
- "unit attention detected\n", h->ctlr);
- return 1;
+ dev_warn(&h->pdev->dev, "unknown unit attention detected\n");
+ return 1;
}
}
@@ -3562,39 +3682,41 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
* the io functions.
* This is for debug only.
*/
-#ifdef CCISS_DEBUG
-static void print_cfg_table(CfgTable_struct *tb)
+static void print_cfg_table(ctlr_info_t *h)
{
int i;
char temp_name[17];
+ CfgTable_struct *tb = h->cfgtable;
- printk("Controller Configuration information\n");
- printk("------------------------------------\n");
+ dev_dbg(&h->pdev->dev, "Controller Configuration information\n");
+ dev_dbg(&h->pdev->dev, "------------------------------------\n");
for (i = 0; i < 4; i++)
temp_name[i] = readb(&(tb->Signature[i]));
temp_name[4] = '\0';
- printk(" Signature = %s\n", temp_name);
- printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
- printk(" Transport methods supported = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name);
+ dev_dbg(&h->pdev->dev, " Spec Number = %d\n",
+ readl(&(tb->SpecValence)));
+ dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n",
readl(&(tb->TransportSupport)));
- printk(" Transport methods active = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n",
readl(&(tb->TransportActive)));
- printk(" Requested transport Method = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n",
readl(&(tb->HostWrite.TransportRequest)));
- printk(" Coalesce Interrupt Delay = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n",
readl(&(tb->HostWrite.CoalIntDelay)));
- printk(" Coalesce Interrupt Count = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n",
readl(&(tb->HostWrite.CoalIntCount)));
- printk(" Max outstanding commands = 0x%d\n",
+ dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%d\n",
readl(&(tb->CmdsOutMax)));
- printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
+ dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n",
+ readl(&(tb->BusTypes)));
for (i = 0; i < 16; i++)
temp_name[i] = readb(&(tb->ServerName[i]));
temp_name[16] = '\0';
- printk(" Server Name = %s\n", temp_name);
- printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
+ dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name);
+ dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n",
+ readl(&(tb->HeartBeat)));
}
-#endif /* CCISS_DEBUG */
static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
{
@@ -3618,7 +3740,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
offset += 8;
break;
default: /* reserved in PCI 2.2 */
- printk(KERN_WARNING
+ dev_warn(&pdev->dev,
"Base address is invalid\n");
return -1;
break;
@@ -3630,12 +3752,182 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
return -1;
}
+/* Fill in bucket_map[], given nsgs (the max number of
+ * scatter gather elements supported) and bucket[],
+ * which is an array of 8 integers. The bucket[] array
+ * contains 8 different DMA transfer sizes (in 16
+ * byte increments) which the controller uses to fetch
+ * commands. This function fills in bucket_map[], which
+ * maps a given number of scatter gather elements to one of
+ * the 8 DMA transfer sizes. The point of it is to allow the
+ * controller to only do as much DMA as needed to fetch the
+ * command, with the DMA transfer size encoded in the lower
+ * bits of the command address.
+ */
+static void calc_bucket_map(int bucket[], int num_buckets,
+ int nsgs, int *bucket_map)
+{
+ int i, j, b, size;
+
+ /* even a command with 0 SGs requires 4 blocks */
+#define MINIMUM_TRANSFER_BLOCKS 4
+#define NUM_BUCKETS 8
+ /* Note, bucket_map must have nsgs+1 entries. */
+ for (i = 0; i <= nsgs; i++) {
+ /* Compute size of a command with i SG entries */
+ size = i + MINIMUM_TRANSFER_BLOCKS;
+ b = num_buckets; /* Assume the biggest bucket */
+ /* Find the bucket that is just big enough */
+ for (j = 0; j < 8; j++) {
+ if (bucket[j] >= size) {
+ b = j;
+ break;
+ }
+ }
+ /* for a command with i SG entries, use bucket b. */
+ bucket_map[i] = b;
+ }
+}
+
+static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
+{
+ int i;
+
+ /* under certain very rare conditions, this can take awhile.
+ * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+ * as we enter this code.) */
+ for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+ if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+ break;
+ msleep(10);
+ }
+}
+
+static __devinit void cciss_enter_performant_mode(ctlr_info_t *h)
+{
+ /* This is a bit complicated. There are 8 registers on
+ * the controller which we write to to tell it 8 different
+ * sizes of commands which there may be. It's a way of
+ * reducing the DMA done to fetch each command. Encoded into
+ * each command's tag are 3 bits which communicate to the controller
+ * which of the eight sizes that command fits within. The size of
+ * each command depends on how many scatter gather entries there are.
+ * Each SG entry requires 16 bytes. The eight registers are programmed
+ * with the number of 16-byte blocks a command of that size requires.
+ * The smallest command possible requires 5 such 16 byte blocks.
+ * the largest command possible requires MAXSGENTRIES + 4 16-byte
+ * blocks. Note, this only extends to the SG entries contained
+ * within the command block, and does not extend to chained blocks
+ * of SG elements. bft[] contains the eight values we write to
+ * the registers. They are not evenly distributed, but have more
+ * sizes for small commands, and fewer sizes for larger commands.
+ */
+ __u32 trans_offset;
+ int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
+ /*
+ * 5 = 1 s/g entry or 4k
+ * 6 = 2 s/g entry or 8k
+ * 8 = 4 s/g entry or 16k
+ * 10 = 6 s/g entry or 24k
+ */
+ unsigned long register_value;
+ BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
+
+ h->reply_pool_wraparound = 1; /* spec: init to 1 */
+
+ /* Controller spec: zero out this buffer. */
+ memset(h->reply_pool, 0, h->max_commands * sizeof(__u64));
+ h->reply_pool_head = h->reply_pool;
+
+ trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+ calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries,
+ h->blockFetchTable);
+ writel(bft[0], &h->transtable->BlockFetch0);
+ writel(bft[1], &h->transtable->BlockFetch1);
+ writel(bft[2], &h->transtable->BlockFetch2);
+ writel(bft[3], &h->transtable->BlockFetch3);
+ writel(bft[4], &h->transtable->BlockFetch4);
+ writel(bft[5], &h->transtable->BlockFetch5);
+ writel(bft[6], &h->transtable->BlockFetch6);
+ writel(bft[7], &h->transtable->BlockFetch7);
+
+ /* size of controller ring buffer */
+ writel(h->max_commands, &h->transtable->RepQSize);
+ writel(1, &h->transtable->RepQCount);
+ writel(0, &h->transtable->RepQCtrAddrLow32);
+ writel(0, &h->transtable->RepQCtrAddrHigh32);
+ writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
+ writel(0, &h->transtable->RepQAddr0High32);
+ writel(CFGTBL_Trans_Performant,
+ &(h->cfgtable->HostWrite.TransportRequest));
+
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ cciss_wait_for_mode_change_ack(h);
+ register_value = readl(&(h->cfgtable->TransportActive));
+ if (!(register_value & CFGTBL_Trans_Performant))
+ dev_warn(&h->pdev->dev, "cciss: unable to get board into"
+ " performant mode\n");
+}
+
+static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
+{
+ __u32 trans_support;
+
+ dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
+ /* Attempt to put controller into performant mode if supported */
+ /* Does board support performant mode? */
+ trans_support = readl(&(h->cfgtable->TransportSupport));
+ if (!(trans_support & PERFORMANT_MODE))
+ return;
+
+ dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n");
+ /* Performant mode demands commands on a 32 byte boundary
+ * pci_alloc_consistent aligns on page boundarys already.
+ * Just need to check if divisible by 32
+ */
+ if ((sizeof(CommandList_struct) % 32) != 0) {
+ dev_warn(&h->pdev->dev, "%s %d %s\n",
+ "cciss info: command size[",
+ (int)sizeof(CommandList_struct),
+ "] not divisible by 32, no performant mode..\n");
+ return;
+ }
+
+ /* Performant mode ring buffer and supporting data structures */
+ h->reply_pool = (__u64 *)pci_alloc_consistent(
+ h->pdev, h->max_commands * sizeof(__u64),
+ &(h->reply_pool_dhandle));
+
+ /* Need a block fetch table for performant mode */
+ h->blockFetchTable = kmalloc(((h->maxsgentries+1) *
+ sizeof(__u32)), GFP_KERNEL);
+
+ if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
+ goto clean_up;
+
+ cciss_enter_performant_mode(h);
+
+ /* Change the access methods to the performant access methods */
+ h->access = SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
+
+ return;
+clean_up:
+ kfree(h->blockFetchTable);
+ if (h->reply_pool)
+ pci_free_consistent(h->pdev,
+ h->max_commands * sizeof(__u64),
+ h->reply_pool,
+ h->reply_pool_dhandle);
+ return;
+
+} /* cciss_put_controller_into_performant_mode */
+
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use IO-APIC mode.
*/
-static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
- struct pci_dev *pdev, __u32 board_id)
+static void __devinit cciss_interrupt_mode(ctlr_info_t *h)
{
#ifdef CONFIG_PCI_MSI
int err;
@@ -3644,268 +3936,283 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
};
/* Some boards advertise MSI but don't really support it */
- if ((board_id == 0x40700E11) ||
- (board_id == 0x40800E11) ||
- (board_id == 0x40820E11) || (board_id == 0x40830E11))
+ if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
+ (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
goto default_int_mode;
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
- err = pci_enable_msix(pdev, cciss_msix_entries, 4);
+ if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
+ err = pci_enable_msix(h->pdev, cciss_msix_entries, 4);
if (!err) {
- c->intr[0] = cciss_msix_entries[0].vector;
- c->intr[1] = cciss_msix_entries[1].vector;
- c->intr[2] = cciss_msix_entries[2].vector;
- c->intr[3] = cciss_msix_entries[3].vector;
- c->msix_vector = 1;
+ h->intr[0] = cciss_msix_entries[0].vector;
+ h->intr[1] = cciss_msix_entries[1].vector;
+ h->intr[2] = cciss_msix_entries[2].vector;
+ h->intr[3] = cciss_msix_entries[3].vector;
+ h->msix_vector = 1;
return;
}
if (err > 0) {
- printk(KERN_WARNING "cciss: only %d MSI-X vectors "
- "available\n", err);
+ dev_warn(&h->pdev->dev,
+ "only %d MSI-X vectors available\n", err);
goto default_int_mode;
} else {
- printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
- err);
+ dev_warn(&h->pdev->dev,
+ "MSI-X init failed %d\n", err);
goto default_int_mode;
}
}
- if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
- if (!pci_enable_msi(pdev)) {
- c->msi_vector = 1;
- } else {
- printk(KERN_WARNING "cciss: MSI init failed\n");
- }
+ if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
+ if (!pci_enable_msi(h->pdev))
+ h->msi_vector = 1;
+ else
+ dev_warn(&h->pdev->dev, "MSI init failed\n");
}
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
- c->intr[SIMPLE_MODE_INT] = pdev->irq;
+ h->intr[PERF_MODE_INT] = h->pdev->irq;
return;
}
-static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
+static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
{
- ushort subsystem_vendor_id, subsystem_device_id, command;
- __u32 board_id, scratchpad = 0;
- __u64 cfg_offset;
- __u32 cfg_base_addr;
- __u64 cfg_base_addr_index;
- int i, prod_index, err;
+ int i;
+ u32 subsystem_vendor_id, subsystem_device_id;
subsystem_vendor_id = pdev->subsystem_vendor;
subsystem_device_id = pdev->subsystem_device;
- board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
- subsystem_vendor_id);
+ *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
+ subsystem_vendor_id;
for (i = 0; i < ARRAY_SIZE(products); i++) {
/* Stand aside for hpsa driver on request */
if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
return -ENODEV;
- if (board_id == products[i].board_id)
- break;
- }
- prod_index = i;
- if (prod_index == ARRAY_SIZE(products)) {
- dev_warn(&pdev->dev,
- "unrecognized board ID: 0x%08lx, ignoring.\n",
- (unsigned long) board_id);
- return -ENODEV;
+ if (*board_id == products[i].board_id)
+ return i;
}
+ dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
+ *board_id);
+ return -ENODEV;
+}
- /* check to see if controller has been disabled */
- /* BEFORE trying to enable it */
- (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
- if (!(command & 0x02)) {
- printk(KERN_WARNING
- "cciss: controller appears to be disabled\n");
- return -ENODEV;
- }
+static inline bool cciss_board_disabled(ctlr_info_t *h)
+{
+ u16 command;
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
- return err;
- }
+ (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command);
+ return ((command & PCI_COMMAND_MEMORY) == 0);
+}
- err = pci_request_regions(pdev, "cciss");
- if (err) {
- printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
- "aborting\n");
- return err;
- }
+static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar)
+{
+ int i;
-#ifdef CCISS_DEBUG
- printk("command = %x\n", command);
- printk("irq = %x\n", pdev->irq);
- printk("board_id = %x\n", board_id);
-#endif /* CCISS_DEBUG */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ /* addressing mode bits already removed */
+ *memory_bar = pci_resource_start(pdev, i);
+ dev_dbg(&pdev->dev, "memory BAR = %lx\n",
+ *memory_bar);
+ return 0;
+ }
+ dev_warn(&pdev->dev, "no memory BAR found\n");
+ return -ENODEV;
+}
-/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
- * else we use the IO-APIC interrupt assigned to us by system ROM.
- */
- cciss_interrupt_mode(c, pdev, board_id);
+static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h)
+{
+ int i;
+ u32 scratchpad;
- /* find the memory BAR */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
- break;
- }
- if (i == DEVICE_COUNT_RESOURCE) {
- printk(KERN_WARNING "cciss: No memory BAR found\n");
- err = -ENODEV;
- goto err_out_free_res;
+ for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) {
+ scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (scratchpad == CCISS_FIRMWARE_READY)
+ return 0;
+ msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
}
+ dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+ return -ENODEV;
+}
- c->paddr = pci_resource_start(pdev, i); /* addressing mode bits
- * already removed
- */
+static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
+ void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset)
+{
+ *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
+ *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
+ *cfg_base_addr &= (u32) 0x0000ffff;
+ *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
+ if (*cfg_base_addr_index == -1) {
+ dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, "
+ "*cfg_base_addr = 0x%08x\n", *cfg_base_addr);
+ return -ENODEV;
+ }
+ return 0;
+}
-#ifdef CCISS_DEBUG
- printk("address 0 = %lx\n", c->paddr);
-#endif /* CCISS_DEBUG */
- c->vaddr = remap_pci_mem(c->paddr, 0x250);
+static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
+{
+ u64 cfg_offset;
+ u32 cfg_base_addr;
+ u64 cfg_base_addr_index;
+ u32 trans_offset;
+ int rc;
- /* Wait for the board to become ready. (PCI hotplug needs this.)
- * We poll for up to 120 secs, once per 100ms. */
- for (i = 0; i < 1200; i++) {
- scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
- if (scratchpad == CCISS_FIRMWARE_READY)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */
- }
- if (scratchpad != CCISS_FIRMWARE_READY) {
- printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
- err = -ENODEV;
- goto err_out_free_res;
- }
+ rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ if (rc)
+ return rc;
+ h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable));
+ if (!h->cfgtable)
+ return -ENOMEM;
+ /* Find performant mode table. */
+ trans_offset = readl(&h->cfgtable->TransMethodOffset);
+ h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index)+cfg_offset+trans_offset,
+ sizeof(*h->transtable));
+ if (!h->transtable)
+ return -ENOMEM;
+ return 0;
+}
- /* get the address index number */
- cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
- cfg_base_addr &= (__u32) 0x0000ffff;
-#ifdef CCISS_DEBUG
- printk("cfg base address = %x\n", cfg_base_addr);
-#endif /* CCISS_DEBUG */
- cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
-#ifdef CCISS_DEBUG
- printk("cfg base address index = %llx\n",
- (unsigned long long)cfg_base_addr_index);
-#endif /* CCISS_DEBUG */
- if (cfg_base_addr_index == -1) {
- printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
- err = -ENODEV;
- goto err_out_free_res;
+static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
+{
+ h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+ if (h->max_commands < 16) {
+ dev_warn(&h->pdev->dev, "Controller reports "
+ "max supported commands of %d, an obvious lie. "
+ "Using 16. Ensure that firmware is up to date.\n",
+ h->max_commands);
+ h->max_commands = 16;
}
+}
- cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
-#ifdef CCISS_DEBUG
- printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
-#endif /* CCISS_DEBUG */
- c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
- cfg_base_addr_index) +
- cfg_offset, sizeof(CfgTable_struct));
- c->board_id = board_id;
-
-#ifdef CCISS_DEBUG
- print_cfg_table(c->cfgtable);
-#endif /* CCISS_DEBUG */
-
- /* Some controllers support Zero Memory Raid (ZMR).
- * When configured in ZMR mode the number of supported
- * commands drops to 64. So instead of just setting an
- * arbitrary value we make the driver a little smarter.
- * We read the config table to tell us how many commands
- * are supported on the controller then subtract 4 to
- * leave a little room for ioctl calls.
- */
- c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
- c->maxsgentries = readl(&(c->cfgtable->MaxSGElements));
-
+/* Interrogate the hardware for some limits:
+ * max commands, max SG elements without chaining, and with chaining,
+ * SG chain block size, etc.
+ */
+static void __devinit cciss_find_board_params(ctlr_info_t *h)
+{
+ cciss_get_max_perf_mode_cmds(h);
+ h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
+ h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
/*
- * Limit native command to 32 s/g elements to save dma'able memory.
+ * Limit in-command s/g elements to 32 save dma'able memory.
* Howvever spec says if 0, use 31
*/
-
- c->max_cmd_sgentries = 31;
- if (c->maxsgentries > 512) {
- c->max_cmd_sgentries = 32;
- c->chainsize = c->maxsgentries - c->max_cmd_sgentries + 1;
- c->maxsgentries -= 1; /* account for chain pointer */
+ h->max_cmd_sgentries = 31;
+ if (h->maxsgentries > 512) {
+ h->max_cmd_sgentries = 32;
+ h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1;
+ h->maxsgentries--; /* save one for chain pointer */
} else {
- c->maxsgentries = 31; /* Default to traditional value */
- c->chainsize = 0; /* traditional */
+ h->maxsgentries = 31; /* default to traditional values */
+ h->chainsize = 0;
}
+}
- c->product_name = products[prod_index].product_name;
- c->access = *(products[prod_index].access);
- c->nr_cmds = c->max_commands - 4;
- if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
- (readb(&c->cfgtable->Signature[1]) != 'I') ||
- (readb(&c->cfgtable->Signature[2]) != 'S') ||
- (readb(&c->cfgtable->Signature[3]) != 'S')) {
- printk("Does not appear to be a valid CISS config table\n");
- err = -ENODEV;
- goto err_out_free_res;
+static inline bool CISS_signature_present(ctlr_info_t *h)
+{
+ if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
+ (readb(&h->cfgtable->Signature[1]) != 'I') ||
+ (readb(&h->cfgtable->Signature[2]) != 'S') ||
+ (readb(&h->cfgtable->Signature[3]) != 'S')) {
+ dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
+ return false;
}
+ return true;
+}
+
+/* Need to enable prefetch in the SCSI core for 6400 in x86 */
+static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h)
+{
#ifdef CONFIG_X86
- {
- /* Need to enable prefetch in the SCSI core for 6400 in x86 */
- __u32 prefetch;
- prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
- prefetch |= 0x100;
- writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
- }
+ u32 prefetch;
+
+ prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
+ prefetch |= 0x100;
+ writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
#endif
+}
- /* Disabling DMA prefetch and refetch for the P600.
- * An ASIC bug may result in accesses to invalid memory addresses.
- * We've disabled prefetch for some time now. Testing with XEN
- * kernels revealed a bug in the refetch if dom0 resides on a P600.
- */
- if(board_id == 0x3225103C) {
- __u32 dma_prefetch;
- __u32 dma_refetch;
- dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
- dma_prefetch |= 0x8000;
- writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
- pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
- dma_refetch |= 0x1;
- pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
+/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
+ * in a prefetch beyond physical memory.
+ */
+static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
+{
+ u32 dma_prefetch;
+ __u32 dma_refetch;
+
+ if (h->board_id != 0x3225103C)
+ return;
+ dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+ dma_prefetch |= 0x8000;
+ writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+ pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch);
+ dma_refetch |= 0x1;
+ pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
+}
+
+static int __devinit cciss_pci_init(ctlr_info_t *h)
+{
+ int prod_index, err;
+
+ prod_index = cciss_lookup_board_id(h->pdev, &h->board_id);
+ if (prod_index < 0)
+ return -ENODEV;
+ h->product_name = products[prod_index].product_name;
+ h->access = *(products[prod_index].access);
+
+ if (cciss_board_disabled(h)) {
+ dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+ return -ENODEV;
+ }
+ err = pci_enable_device(h->pdev);
+ if (err) {
+ dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
+ return err;
}
-#ifdef CCISS_DEBUG
- printk("Trying to put board into Simple mode\n");
-#endif /* CCISS_DEBUG */
- c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
- /* Update the field, and then ring the doorbell */
- writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
- writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
+ err = pci_request_regions(h->pdev, "cciss");
+ if (err) {
+ dev_warn(&h->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
+ return err;
+ }
- /* under certain very rare conditions, this can take awhile.
- * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
- * as we enter this code.) */
- for (i = 0; i < MAX_CONFIG_WAIT; i++) {
- if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
- break;
- /* delay and try again */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(1));
+ dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq);
+ dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id);
+
+/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
+ * else we use the IO-APIC interrupt assigned to us by system ROM.
+ */
+ cciss_interrupt_mode(h);
+ err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr);
+ if (err)
+ goto err_out_free_res;
+ h->vaddr = remap_pci_mem(h->paddr, 0x250);
+ if (!h->vaddr) {
+ err = -ENOMEM;
+ goto err_out_free_res;
}
+ err = cciss_wait_for_board_ready(h);
+ if (err)
+ goto err_out_free_res;
+ err = cciss_find_cfgtables(h);
+ if (err)
+ goto err_out_free_res;
+ print_cfg_table(h);
+ cciss_find_board_params(h);
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "I counter got to %d %x\n", i,
- readl(c->vaddr + SA5_DOORBELL));
-#endif /* CCISS_DEBUG */
-#ifdef CCISS_DEBUG
- print_cfg_table(c->cfgtable);
-#endif /* CCISS_DEBUG */
-
- if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
- printk(KERN_WARNING "cciss: unable to get board into"
- " simple mode\n");
+ if (!CISS_signature_present(h)) {
err = -ENODEV;
goto err_out_free_res;
}
+ cciss_enable_scsi_prefetch(h);
+ cciss_p600_dma_prefetch_quirk(h);
+ cciss_put_controller_into_performant_mode(h);
return 0;
err_out_free_res:
@@ -3913,42 +4220,47 @@ err_out_free_res:
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
- pci_release_regions(pdev);
+ if (h->transtable)
+ iounmap(h->transtable);
+ if (h->cfgtable)
+ iounmap(h->cfgtable);
+ if (h->vaddr)
+ iounmap(h->vaddr);
+ pci_release_regions(h->pdev);
return err;
}
/* Function to find the first free pointer into our hba[] array
* Returns -1 if no free entries are left.
*/
-static int alloc_cciss_hba(void)
+static int alloc_cciss_hba(struct pci_dev *pdev)
{
int i;
for (i = 0; i < MAX_CTLR; i++) {
if (!hba[i]) {
- ctlr_info_t *p;
+ ctlr_info_t *h;
- p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
- if (!p)
+ h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
+ if (!h)
goto Enomem;
- hba[i] = p;
+ hba[i] = h;
return i;
}
}
- printk(KERN_WARNING "cciss: This driver supports a maximum"
+ dev_warn(&pdev->dev, "This driver supports a maximum"
" of %d controllers.\n", MAX_CTLR);
return -1;
Enomem:
- printk(KERN_ERR "cciss: out of memory.\n");
+ dev_warn(&pdev->dev, "out of memory.\n");
return -1;
}
-static void free_hba(int n)
+static void free_hba(ctlr_info_t *h)
{
- ctlr_info_t *h = hba[n];
int i;
- hba[n] = NULL;
+ hba[h->ctlr] = NULL;
for (i = 0; i < h->highest_lun + 1; i++)
if (h->gendisk[i] != NULL)
put_disk(h->gendisk[i]);
@@ -4028,7 +4340,8 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
/* we leak the DMA buffer here ... no choice since the controller could
still complete the command. */
if (i == 10) {
- printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n",
+ dev_err(&pdev->dev,
+ "controller message %02x:%02x timed out\n",
opcode, type);
return -ETIMEDOUT;
}
@@ -4036,12 +4349,12 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
if (tag & 2) {
- printk(KERN_ERR "cciss: controller message %02x:%02x failed\n",
+ dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
opcode, type);
return -EIO;
}
- printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n",
+ dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
opcode, type);
return 0;
}
@@ -4062,7 +4375,7 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev)
if (pos) {
pci_read_config_word(pdev, msi_control_reg(pos), &control);
if (control & PCI_MSI_FLAGS_ENABLE) {
- printk(KERN_INFO "cciss: resetting MSI\n");
+ dev_info(&pdev->dev, "resetting MSI\n");
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
}
}
@@ -4071,7 +4384,7 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev)
if (pos) {
pci_read_config_word(pdev, msi_control_reg(pos), &control);
if (control & PCI_MSIX_FLAGS_ENABLE) {
- printk(KERN_INFO "cciss: resetting MSI-X\n");
+ dev_info(&pdev->dev, "resetting MSI-X\n");
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
}
}
@@ -4079,68 +4392,144 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev)
return 0;
}
-/* This does a hard reset of the controller using PCI power management
- * states. */
-static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
+static int cciss_controller_hard_reset(struct pci_dev *pdev,
+ void * __iomem vaddr, bool use_doorbell)
{
- u16 pmcsr, saved_config_space[32];
- int i, pos;
+ u16 pmcsr;
+ int pos;
- printk(KERN_INFO "cciss: using PCI PM to reset controller\n");
+ if (use_doorbell) {
+ /* For everything after the P600, the PCI power state method
+ * of resetting the controller doesn't work, so we have this
+ * other way using the doorbell register.
+ */
+ dev_info(&pdev->dev, "using doorbell to reset controller\n");
+ writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL);
+ msleep(1000);
+ } else { /* Try to do it the PCI power state way */
+
+ /* Quoting from the Open CISS Specification: "The Power
+ * Management Control/Status Register (CSR) controls the power
+ * state of the device. The normal operating state is D0,
+ * CSR=00h. The software off state is D3, CSR=03h. To reset
+ * the controller, place the interface device in D3 then to D0,
+ * this causes a secondary PCI reset which will reset the
+ * controller." */
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pos == 0) {
+ dev_err(&pdev->dev,
+ "cciss_controller_hard_reset: "
+ "PCI PM not supported\n");
+ return -ENODEV;
+ }
+ dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+ /* enter the D3hot power management state */
+ pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= PCI_D3hot;
+ pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
- /* This is very nearly the same thing as
+ msleep(500);
- pci_save_state(pci_dev);
- pci_set_power_state(pci_dev, PCI_D3hot);
- pci_set_power_state(pci_dev, PCI_D0);
- pci_restore_state(pci_dev);
+ /* enter the D0 power management state */
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= PCI_D0;
+ pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
- but we can't use these nice canned kernel routines on
- kexec, because they also check the MSI/MSI-X state in PCI
- configuration space and do the wrong thing when it is
- set/cleared. Also, the pci_save/restore_state functions
- violate the ordering requirements for restoring the
- configuration space from the CCISS document (see the
- comment below). So we roll our own .... */
+ msleep(500);
+ }
+ return 0;
+}
- for (i = 0; i < 32; i++)
- pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+/* This does a hard reset of the controller using PCI power management
+ * states or using the doorbell register. */
+static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
+{
+ u16 saved_config_space[32];
+ u64 cfg_offset;
+ u32 cfg_base_addr;
+ u64 cfg_base_addr_index;
+ void __iomem *vaddr;
+ unsigned long paddr;
+ u32 misc_fw_support, active_transport;
+ int rc, i;
+ CfgTable_struct __iomem *cfgtable;
+ bool use_doorbell;
+ u32 board_id;
+
+ /* For controllers as old a the p600, this is very nearly
+ * the same thing as
+ *
+ * pci_save_state(pci_dev);
+ * pci_set_power_state(pci_dev, PCI_D3hot);
+ * pci_set_power_state(pci_dev, PCI_D0);
+ * pci_restore_state(pci_dev);
+ *
+ * but we can't use these nice canned kernel routines on
+ * kexec, because they also check the MSI/MSI-X state in PCI
+ * configuration space and do the wrong thing when it is
+ * set/cleared. Also, the pci_save/restore_state functions
+ * violate the ordering requirements for restoring the
+ * configuration space from the CCISS document (see the
+ * comment below). So we roll our own ....
+ *
+ * For controllers newer than the P600, the pci power state
+ * method of resetting doesn't work so we have another way
+ * using the doorbell register.
+ */
- pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pos == 0) {
- printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n");
+ /* Exclude 640x boards. These are two pci devices in one slot
+ * which share a battery backed cache module. One controls the
+ * cache, the other accesses the cache through the one that controls
+ * it. If we reset the one controlling the cache, the other will
+ * likely not be happy. Just forbid resetting this conjoined mess.
+ */
+ cciss_lookup_board_id(pdev, &board_id);
+ if (board_id == 0x409C0E11 || board_id == 0x409D0E11) {
+ dev_warn(&pdev->dev, "Cannot reset Smart Array 640x "
+ "due to shared cache module.");
return -ENODEV;
}
- /* Quoting from the Open CISS Specification: "The Power
- * Management Control/Status Register (CSR) controls the power
- * state of the device. The normal operating state is D0,
- * CSR=00h. The software off state is D3, CSR=03h. To reset
- * the controller, place the interface device in D3 then to
- * D0, this causes a secondary PCI reset which will reset the
- * controller." */
+ for (i = 0; i < 32; i++)
+ pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
- /* enter the D3hot power management state */
- pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D3hot;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+ /* find the first memory BAR, so we can find the cfg table */
+ rc = cciss_pci_find_memory_BAR(pdev, &paddr);
+ if (rc)
+ return rc;
+ vaddr = remap_pci_mem(paddr, 0x250);
+ if (!vaddr)
+ return -ENOMEM;
- schedule_timeout_uninterruptible(HZ >> 1);
+ /* find cfgtable in order to check if reset via doorbell is supported */
+ rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ if (rc)
+ goto unmap_vaddr;
+ cfgtable = remap_pci_mem(pci_resource_start(pdev,
+ cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
+ if (!cfgtable) {
+ rc = -ENOMEM;
+ goto unmap_vaddr;
+ }
- /* enter the D0 power management state */
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D0;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+ /* If reset via doorbell register is supported, use that. */
+ misc_fw_support = readl(&cfgtable->misc_fw_support);
+ use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
- schedule_timeout_uninterruptible(HZ >> 1);
+ rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
+ if (rc)
+ goto unmap_cfgtable;
/* Restore the PCI configuration space. The Open CISS
* Specification says, "Restore the PCI Configuration
* Registers, offsets 00h through 60h. It is important to
* restore the command register, 16-bits at offset 04h,
* last. Do not restore the configuration status register,
- * 16-bits at offset 06h." Note that the offset is 2*i. */
+ * 16-bits at offset 06h." Note that the offset is 2*i.
+ */
for (i = 0; i < 32; i++) {
if (i == 2 || i == 3)
continue;
@@ -4149,6 +4538,63 @@ static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
wmb();
pci_write_config_word(pdev, 4, saved_config_space[2]);
+ /* Some devices (notably the HP Smart Array 5i Controller)
+ need a little pause here */
+ msleep(CCISS_POST_RESET_PAUSE_MSECS);
+
+ /* Controller should be in simple mode at this point. If it's not,
+ * It means we're on one of those controllers which doesn't support
+ * the doorbell reset method and on which the PCI power management reset
+ * method doesn't work (P800, for example.)
+ * In those cases, don't try to proceed, as it generally doesn't work.
+ */
+ active_transport = readl(&cfgtable->TransportActive);
+ if (active_transport & PERFORMANT_MODE) {
+ dev_warn(&pdev->dev, "Unable to successfully reset controller,"
+ " Ignoring controller.\n");
+ rc = -ENODEV;
+ }
+
+unmap_cfgtable:
+ iounmap(cfgtable);
+
+unmap_vaddr:
+ iounmap(vaddr);
+ return rc;
+}
+
+static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
+{
+ int rc, i;
+
+ if (!reset_devices)
+ return 0;
+
+ /* Reset the controller with a PCI power-cycle or via doorbell */
+ rc = cciss_kdump_hard_reset_controller(pdev);
+
+ /* -ENOTSUPP here means we cannot reset the controller
+ * but it's already (and still) up and running in
+ * "performant mode". Or, it might be 640x, which can't reset
+ * due to concerns about shared bbwc between 6402/6404 pair.
+ */
+ if (rc == -ENOTSUPP)
+ return 0; /* just try to do the kdump anyhow. */
+ if (rc)
+ return -ENODEV;
+ if (cciss_reset_msi(pdev))
+ return -ENODEV;
+
+ /* Now try to get the controller to respond to a no-op */
+ for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
+ if (cciss_noop(pdev) == 0)
+ break;
+ else
+ dev_warn(&pdev->dev, "no-op failed%s\n",
+ (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ?
+ "; re-trying" : ""));
+ msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS);
+ }
return 0;
}
@@ -4166,46 +4612,31 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
int rc;
int dac, return_code;
InquiryData_struct *inq_buff;
+ ctlr_info_t *h;
- if (reset_devices) {
- /* Reset the controller with a PCI power-cycle */
- if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev))
- return -ENODEV;
-
- /* Now try to get the controller to respond to a no-op. Some
- devices (notably the HP Smart Array 5i Controller) need
- up to 30 seconds to respond. */
- for (i=0; i<30; i++) {
- if (cciss_noop(pdev) == 0)
- break;
-
- schedule_timeout_uninterruptible(HZ);
- }
- if (i == 30) {
- printk(KERN_ERR "cciss: controller seems dead\n");
- return -EBUSY;
- }
- }
-
- i = alloc_cciss_hba();
+ rc = cciss_init_reset_devices(pdev);
+ if (rc)
+ return rc;
+ i = alloc_cciss_hba(pdev);
if (i < 0)
return -1;
- hba[i]->busy_initializing = 1;
- INIT_HLIST_HEAD(&hba[i]->cmpQ);
- INIT_HLIST_HEAD(&hba[i]->reqQ);
- mutex_init(&hba[i]->busy_shutting_down);
+ h = hba[i];
+ h->pdev = pdev;
+ h->busy_initializing = 1;
+ INIT_HLIST_HEAD(&h->cmpQ);
+ INIT_HLIST_HEAD(&h->reqQ);
+ mutex_init(&h->busy_shutting_down);
- if (cciss_pci_init(hba[i], pdev) != 0)
+ if (cciss_pci_init(h) != 0)
goto clean_no_release_regions;
- sprintf(hba[i]->devname, "cciss%d", i);
- hba[i]->ctlr = i;
- hba[i]->pdev = pdev;
+ sprintf(h->devname, "cciss%d", i);
+ h->ctlr = i;
- init_completion(&hba[i]->scan_wait);
+ init_completion(&h->scan_wait);
- if (cciss_create_hba_sysfs_entry(hba[i]))
+ if (cciss_create_hba_sysfs_entry(h))
goto clean0;
/* configure PCI DMA stuff */
@@ -4214,7 +4645,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
dac = 0;
else {
- printk(KERN_ERR "cciss: no suitable DMA available\n");
+ dev_err(&h->pdev->dev, "no suitable DMA available\n");
goto clean1;
}
@@ -4224,151 +4655,161 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
* 8 controller support.
*/
if (i < MAX_CTLR_ORIG)
- hba[i]->major = COMPAQ_CISS_MAJOR + i;
- rc = register_blkdev(hba[i]->major, hba[i]->devname);
+ h->major = COMPAQ_CISS_MAJOR + i;
+ rc = register_blkdev(h->major, h->devname);
if (rc == -EBUSY || rc == -EINVAL) {
- printk(KERN_ERR
- "cciss: Unable to get major number %d for %s "
- "on hba %d\n", hba[i]->major, hba[i]->devname, i);
+ dev_err(&h->pdev->dev,
+ "Unable to get major number %d for %s "
+ "on hba %d\n", h->major, h->devname, i);
goto clean1;
} else {
if (i >= MAX_CTLR_ORIG)
- hba[i]->major = rc;
+ h->major = rc;
}
/* make sure the board interrupts are off */
- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
- if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
- IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
- printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
- hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
- goto clean2;
+ h->access.set_intr_mask(h, CCISS_INTR_OFF);
+ if (h->msi_vector || h->msix_vector) {
+ if (request_irq(h->intr[PERF_MODE_INT],
+ do_cciss_msix_intr,
+ IRQF_DISABLED, h->devname, h)) {
+ dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
+ h->intr[PERF_MODE_INT], h->devname);
+ goto clean2;
+ }
+ } else {
+ if (request_irq(h->intr[PERF_MODE_INT], do_cciss_intx,
+ IRQF_DISABLED, h->devname, h)) {
+ dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
+ h->intr[PERF_MODE_INT], h->devname);
+ goto clean2;
+ }
}
- printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
- hba[i]->devname, pdev->device, pci_name(pdev),
- hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
+ dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
+ h->devname, pdev->device, pci_name(pdev),
+ h->intr[PERF_MODE_INT], dac ? "" : " not");
- hba[i]->cmd_pool_bits =
- kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+ h->cmd_pool_bits =
+ kmalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
* sizeof(unsigned long), GFP_KERNEL);
- hba[i]->cmd_pool = (CommandList_struct *)
- pci_alloc_consistent(hba[i]->pdev,
- hba[i]->nr_cmds * sizeof(CommandList_struct),
- &(hba[i]->cmd_pool_dhandle));
- hba[i]->errinfo_pool = (ErrorInfo_struct *)
- pci_alloc_consistent(hba[i]->pdev,
- hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
- &(hba[i]->errinfo_pool_dhandle));
- if ((hba[i]->cmd_pool_bits == NULL)
- || (hba[i]->cmd_pool == NULL)
- || (hba[i]->errinfo_pool == NULL)) {
- printk(KERN_ERR "cciss: out of memory");
+ h->cmd_pool = (CommandList_struct *)
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(CommandList_struct),
+ &(h->cmd_pool_dhandle));
+ h->errinfo_pool = (ErrorInfo_struct *)
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(ErrorInfo_struct),
+ &(h->errinfo_pool_dhandle));
+ if ((h->cmd_pool_bits == NULL)
+ || (h->cmd_pool == NULL)
+ || (h->errinfo_pool == NULL)) {
+ dev_err(&h->pdev->dev, "out of memory");
goto clean4;
}
/* Need space for temp scatter list */
- hba[i]->scatter_list = kmalloc(hba[i]->max_commands *
+ h->scatter_list = kmalloc(h->max_commands *
sizeof(struct scatterlist *),
GFP_KERNEL);
- for (k = 0; k < hba[i]->nr_cmds; k++) {
- hba[i]->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
- hba[i]->maxsgentries,
+ for (k = 0; k < h->nr_cmds; k++) {
+ h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
+ h->maxsgentries,
GFP_KERNEL);
- if (hba[i]->scatter_list[k] == NULL) {
- printk(KERN_ERR "cciss%d: could not allocate "
- "s/g lists\n", i);
+ if (h->scatter_list[k] == NULL) {
+ dev_err(&h->pdev->dev,
+ "could not allocate s/g lists\n");
goto clean4;
}
}
- hba[i]->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[i],
- hba[i]->chainsize, hba[i]->nr_cmds);
- if (!hba[i]->cmd_sg_list && hba[i]->chainsize > 0)
+ h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
+ h->chainsize, h->nr_cmds);
+ if (!h->cmd_sg_list && h->chainsize > 0)
goto clean4;
- spin_lock_init(&hba[i]->lock);
+ spin_lock_init(&h->lock);
/* Initialize the pdev driver private data.
- have it point to hba[i]. */
- pci_set_drvdata(pdev, hba[i]);
+ have it point to h. */
+ pci_set_drvdata(pdev, h);
/* command and error info recs zeroed out before
they are used */
- memset(hba[i]->cmd_pool_bits, 0,
- DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+ memset(h->cmd_pool_bits, 0,
+ DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
* sizeof(unsigned long));
- hba[i]->num_luns = 0;
- hba[i]->highest_lun = -1;
+ h->num_luns = 0;
+ h->highest_lun = -1;
for (j = 0; j < CISS_MAX_LUN; j++) {
- hba[i]->drv[j] = NULL;
- hba[i]->gendisk[j] = NULL;
+ h->drv[j] = NULL;
+ h->gendisk[j] = NULL;
}
- cciss_scsi_setup(i);
+ cciss_scsi_setup(h);
/* Turn the interrupts on so we can service requests */
- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
+ h->access.set_intr_mask(h, CCISS_INTR_ON);
/* Get the firmware version */
inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
if (inq_buff == NULL) {
- printk(KERN_ERR "cciss: out of memory\n");
+ dev_err(&h->pdev->dev, "out of memory\n");
goto clean4;
}
- return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
+ return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK) {
- hba[i]->firm_ver[0] = inq_buff->data_byte[32];
- hba[i]->firm_ver[1] = inq_buff->data_byte[33];
- hba[i]->firm_ver[2] = inq_buff->data_byte[34];
- hba[i]->firm_ver[3] = inq_buff->data_byte[35];
+ h->firm_ver[0] = inq_buff->data_byte[32];
+ h->firm_ver[1] = inq_buff->data_byte[33];
+ h->firm_ver[2] = inq_buff->data_byte[34];
+ h->firm_ver[3] = inq_buff->data_byte[35];
} else { /* send command failed */
- printk(KERN_WARNING "cciss: unable to determine firmware"
+ dev_warn(&h->pdev->dev, "unable to determine firmware"
" version of controller\n");
}
kfree(inq_buff);
- cciss_procinit(i);
+ cciss_procinit(h);
- hba[i]->cciss_max_sectors = 8192;
+ h->cciss_max_sectors = 8192;
- rebuild_lun_table(hba[i], 1, 0);
- hba[i]->busy_initializing = 0;
+ rebuild_lun_table(h, 1, 0);
+ h->busy_initializing = 0;
return 1;
clean4:
- kfree(hba[i]->cmd_pool_bits);
+ kfree(h->cmd_pool_bits);
/* Free up sg elements */
- for (k = 0; k < hba[i]->nr_cmds; k++)
- kfree(hba[i]->scatter_list[k]);
- kfree(hba[i]->scatter_list);
- cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds);
- if (hba[i]->cmd_pool)
- pci_free_consistent(hba[i]->pdev,
- hba[i]->nr_cmds * sizeof(CommandList_struct),
- hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
- if (hba[i]->errinfo_pool)
- pci_free_consistent(hba[i]->pdev,
- hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
- hba[i]->errinfo_pool,
- hba[i]->errinfo_pool_dhandle);
- free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
+ for (k = 0; k < h->nr_cmds; k++)
+ kfree(h->scatter_list[k]);
+ kfree(h->scatter_list);
+ cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
+ if (h->cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(CommandList_struct),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ if (h->errinfo_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(ErrorInfo_struct),
+ h->errinfo_pool,
+ h->errinfo_pool_dhandle);
+ free_irq(h->intr[PERF_MODE_INT], h);
clean2:
- unregister_blkdev(hba[i]->major, hba[i]->devname);
+ unregister_blkdev(h->major, h->devname);
clean1:
- cciss_destroy_hba_sysfs_entry(hba[i]);
+ cciss_destroy_hba_sysfs_entry(h);
clean0:
pci_release_regions(pdev);
clean_no_release_regions:
- hba[i]->busy_initializing = 0;
+ h->busy_initializing = 0;
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_set_drvdata(pdev, NULL);
- free_hba(i);
+ free_hba(h);
return -1;
}
@@ -4381,55 +4822,51 @@ static void cciss_shutdown(struct pci_dev *pdev)
h = pci_get_drvdata(pdev);
flush_buf = kzalloc(4, GFP_KERNEL);
if (!flush_buf) {
- printk(KERN_WARNING
- "cciss:%d cache not flushed, out of memory.\n",
- h->ctlr);
+ dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n");
return;
}
/* write all data in the battery backed cache to disk */
memset(flush_buf, 0, 4);
- return_code = sendcmd_withirq(CCISS_CACHE_FLUSH, h->ctlr, flush_buf,
+ return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
4, 0, CTLR_LUNID, TYPE_CMD);
kfree(flush_buf);
if (return_code != IO_OK)
- printk(KERN_WARNING "cciss%d: Error flushing cache\n",
- h->ctlr);
+ dev_warn(&h->pdev->dev, "Error flushing cache\n");
h->access.set_intr_mask(h, CCISS_INTR_OFF);
- free_irq(h->intr[2], h);
+ free_irq(h->intr[PERF_MODE_INT], h);
}
static void __devexit cciss_remove_one(struct pci_dev *pdev)
{
- ctlr_info_t *tmp_ptr;
+ ctlr_info_t *h;
int i, j;
if (pci_get_drvdata(pdev) == NULL) {
- printk(KERN_ERR "cciss: Unable to remove device \n");
+ dev_err(&pdev->dev, "Unable to remove device\n");
return;
}
- tmp_ptr = pci_get_drvdata(pdev);
- i = tmp_ptr->ctlr;
+ h = pci_get_drvdata(pdev);
+ i = h->ctlr;
if (hba[i] == NULL) {
- printk(KERN_ERR "cciss: device appears to "
- "already be removed \n");
+ dev_err(&pdev->dev, "device appears to already be removed\n");
return;
}
- mutex_lock(&hba[i]->busy_shutting_down);
+ mutex_lock(&h->busy_shutting_down);
- remove_from_scan_list(hba[i]);
- remove_proc_entry(hba[i]->devname, proc_cciss);
- unregister_blkdev(hba[i]->major, hba[i]->devname);
+ remove_from_scan_list(h);
+ remove_proc_entry(h->devname, proc_cciss);
+ unregister_blkdev(h->major, h->devname);
/* remove it from the disk list */
for (j = 0; j < CISS_MAX_LUN; j++) {
- struct gendisk *disk = hba[i]->gendisk[j];
+ struct gendisk *disk = h->gendisk[j];
if (disk) {
struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP) {
- cciss_destroy_ld_sysfs_entry(hba[i], j, 1);
+ cciss_destroy_ld_sysfs_entry(h, j, 1);
del_gendisk(disk);
}
if (q)
@@ -4438,39 +4875,41 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
}
#ifdef CONFIG_CISS_SCSI_TAPE
- cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
+ cciss_unregister_scsi(h); /* unhook from SCSI subsystem */
#endif
cciss_shutdown(pdev);
#ifdef CONFIG_PCI_MSI
- if (hba[i]->msix_vector)
- pci_disable_msix(hba[i]->pdev);
- else if (hba[i]->msi_vector)
- pci_disable_msi(hba[i]->pdev);
+ if (h->msix_vector)
+ pci_disable_msix(h->pdev);
+ else if (h->msi_vector)
+ pci_disable_msi(h->pdev);
#endif /* CONFIG_PCI_MSI */
- iounmap(hba[i]->vaddr);
+ iounmap(h->transtable);
+ iounmap(h->cfgtable);
+ iounmap(h->vaddr);
- pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
- hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
- pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
- hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
- kfree(hba[i]->cmd_pool_bits);
+ pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct),
+ h->errinfo_pool, h->errinfo_pool_dhandle);
+ kfree(h->cmd_pool_bits);
/* Free up sg elements */
- for (j = 0; j < hba[i]->nr_cmds; j++)
- kfree(hba[i]->scatter_list[j]);
- kfree(hba[i]->scatter_list);
- cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds);
+ for (j = 0; j < h->nr_cmds; j++)
+ kfree(h->scatter_list[j]);
+ kfree(h->scatter_list);
+ cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
- cciss_destroy_hba_sysfs_entry(hba[i]);
- mutex_unlock(&hba[i]->busy_shutting_down);
- free_hba(i);
+ cciss_destroy_hba_sysfs_entry(h);
+ mutex_unlock(&h->busy_shutting_down);
+ free_hba(h);
}
static struct pci_driver cciss_pci_driver = {
@@ -4495,7 +4934,6 @@ static int __init cciss_init(void)
* array of them, the size must be a multiple of 8 bytes.
*/
BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
-
printk(KERN_INFO DRIVER_NAME "\n");
err = bus_register(&cciss_bus_type);
@@ -4532,8 +4970,8 @@ static void __exit cciss_cleanup(void)
/* double check that all controller entrys have been removed */
for (i = 0; i < MAX_CTLR; i++) {
if (hba[i] != NULL) {
- printk(KERN_WARNING "cciss: had to remove"
- " controller %d\n", i);
+ dev_warn(&hba[i]->pdev->dev,
+ "had to remove controller\n");
cciss_remove_one(hba[i]->pdev);
}
}
@@ -4542,46 +4980,5 @@ static void __exit cciss_cleanup(void)
bus_unregister(&cciss_bus_type);
}
-static void fail_all_cmds(unsigned long ctlr)
-{
- /* If we get here, the board is apparently dead. */
- ctlr_info_t *h = hba[ctlr];
- CommandList_struct *c;
- unsigned long flags;
-
- printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
- h->alive = 0; /* the controller apparently died... */
-
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-
- pci_disable_device(h->pdev); /* Make sure it is really dead. */
-
- /* move everything off the request queue onto the completed queue */
- while (!hlist_empty(&h->reqQ)) {
- c = hlist_entry(h->reqQ.first, CommandList_struct, list);
- removeQ(c);
- h->Qdepth--;
- addQ(&h->cmpQ, c);
- }
-
- /* Now, fail everything on the completed queue with a HW error */
- while (!hlist_empty(&h->cmpQ)) {
- c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
- removeQ(c);
- if (c->cmd_type != CMD_MSG_STALE)
- c->err_info->CommandStatus = CMD_HARDWARE_ERR;
- if (c->cmd_type == CMD_RWREQ) {
- complete_command(h, c, 0);
- } else if (c->cmd_type == CMD_IOCTL_PEND)
- complete(c->waiting);
-#ifdef CONFIG_CISS_SCSI_TAPE
- else if (c->cmd_type == CMD_SCSI)
- complete_scsi_command(c, 0, 0);
-#endif
- }
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- return;
-}
-
module_init(cciss_init);
module_exit(cciss_cleanup);