diff options
author | Jens Axboe <axboe@fb.com> | 2014-06-05 15:21:56 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-06-06 08:04:46 -0600 |
commit | a4391c6465d9c978fd4bded12e34bdde3f5458f0 (patch) | |
tree | fc5d01ebc746cec35f0d4628f05349b1d87dd8a8 /block/blk-mq.c | |
parent | f27b087b81b70513b8c61ec20596c868f7b93474 (diff) |
blk-mq: bump max tag depth to 10K tags
For some scsi-mq cases, the tag map can be huge. So increase the
max number of tags we support.
Additionally, don't fail with EINVAL if a user requests too many
tags. Warn that the tag depth has been adjusted down, and store
the new value inside the tag_set passed in.
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 4e4cd620805..a6ee74e2795 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1967,13 +1967,19 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, return NOTIFY_OK; } +/* + * Alloc a tag set to be associated with one or more request queues. + * May fail with EINVAL for various error conditions. May adjust the + * requested depth down, if if it too large. In that case, the set + * value will be stored in set->queue_depth. + */ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) { int i; if (!set->nr_hw_queues) return -EINVAL; - if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH) + if (!set->queue_depth) return -EINVAL; if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) return -EINVAL; @@ -1981,6 +1987,11 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) return -EINVAL; + if (set->queue_depth > BLK_MQ_MAX_DEPTH) { + pr_info("blk-mq: reduced tag depth to %u\n", + BLK_MQ_MAX_DEPTH); + set->queue_depth = BLK_MQ_MAX_DEPTH; + } set->tags = kmalloc_node(set->nr_hw_queues * sizeof(struct blk_mq_tags *), |