summaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme.c
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2012-07-27 13:57:23 -0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2012-07-27 13:57:23 -0400
commita0cadb85b8b758608ae0759151e29de7581c6731 (patch)
tree3fc3c95f4cef33866a1da81daa86906bd5fa762c /drivers/block/nvme.c
parent8fc23e032debd682f5ba9fc524a5846c10d2c522 (diff)
NVMe: Do not set IO queue depth beyond device max
Set the depth for IO queues to the device's maximum supported queue entries if the requested depth exceeds the device's capabilities. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r--drivers/block/nvme.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 11951fa11a9..af1ef39bd6b 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -893,7 +893,8 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
struct device *dmadev = &dev->pci_dev->dev;
- unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+ unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
+ sizeof(struct nvme_cmd_info));
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
if (!nvmeq)
return NULL;
@@ -1391,7 +1392,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
{
- int result, cpu, i, nr_io_queues, db_bar_size;
+ int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues);
@@ -1437,9 +1438,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
cpu = cpumask_next(cpu, cpu_online_mask);
}
+ q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
+ NVME_Q_DEPTH);
for (i = 0; i < nr_io_queues; i++) {
- dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
- NVME_Q_DEPTH, i);
+ dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
if (IS_ERR(dev->queues[i + 1]))
return PTR_ERR(dev->queues[i + 1]);
dev->queue_count++;