diff options
-rw-r--r-- | block/blk-mq.c | 26 | ||||
-rw-r--r-- | drivers/block/null_blk.c | 28 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 2 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 10 |
4 files changed, 6 insertions, 60 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 5cc4b871cb1..f27fe44230c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1335,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) } EXPORT_SYMBOL(blk_mq_map_queue); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, - unsigned int hctx_index, - int node) -{ - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); -} -EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); - -void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, - unsigned int hctx_index) -{ - kfree(hctx); -} -EXPORT_SYMBOL(blk_mq_free_single_hw_queue); - static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { @@ -1590,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q, queue_for_each_hw_ctx(q, hctx, i) { free_cpumask_var(hctx->cpumask); - set->ops->free_hctx(hctx, i); + kfree(hctx); } } @@ -1811,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) for (i = 0; i < set->nr_hw_queues; i++) { int node = blk_mq_hw_queue_to_node(map, i); - hctxs[i] = set->ops->alloc_hctx(set, i, node); + hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), + GFP_KERNEL, node); if (!hctxs[i]) goto err_hctxs; @@ -1898,7 +1884,7 @@ err_hctxs: if (!hctxs[i]) break; free_cpumask_var(hctxs[i]->cpumask); - set->ops->free_hctx(hctxs[i], i); + kfree(hctxs[i]); } err_map: kfree(hctxs); @@ -1983,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) return -EINVAL; - if (!set->nr_hw_queues || - !set->ops->queue_rq || !set->ops->map_queue || - !set->ops->alloc_hctx || !set->ops->free_hctx) + if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) return -EINVAL; diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 4d33c8c25fb..b40af63a547 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -321,18 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) return BLK_MQ_RQ_QUEUE_OK; } -static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set, - unsigned int hctx_index, - int node) -{ - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); -} - -static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) -{ - kfree(hctx); -} - static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) { BUG_ON(!nullb); @@ -360,17 +348,6 @@ static struct blk_mq_ops null_mq_ops = { .map_queue = blk_mq_map_queue, .init_hctx = null_init_hctx, .complete = null_softirq_done_fn, - .alloc_hctx = blk_mq_alloc_single_hw_queue, - .free_hctx = blk_mq_free_single_hw_queue, -}; - -static struct blk_mq_ops null_mq_ops_pernode = { - .queue_rq = null_queue_rq, - .map_queue = blk_mq_map_queue, - .init_hctx = null_init_hctx, - .complete = null_softirq_done_fn, - .alloc_hctx = null_alloc_hctx, - .free_hctx = null_free_hctx, }; static void null_del_dev(struct nullb *nullb) @@ -496,10 +473,7 @@ static int null_add_dev(void) goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { - if (use_per_node_hctx) - nullb->tag_set.ops = &null_mq_ops_pernode; - else - nullb->tag_set.ops = &null_mq_ops; + nullb->tag_set.ops = &null_mq_ops; nullb->tag_set.nr_hw_queues = submit_queues; nullb->tag_set.queue_depth = hw_queue_depth; nullb->tag_set.numa_node = home_node; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7a51f065edc..16c21c0cb14 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -497,8 +497,6 @@ static int virtblk_init_request(void *data, struct request *rq, static struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .map_queue = blk_mq_map_queue, - .alloc_hctx = blk_mq_alloc_single_hw_queue, - .free_hctx = blk_mq_free_single_hw_queue, .complete = virtblk_request_done, .init_request = virtblk_init_request, }; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2bd82f39912..91dfb75ce39 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -79,9 +79,6 @@ struct blk_mq_tag_set { typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); -typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *, - unsigned int, int); -typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_request_fn)(void *, struct request *, unsigned int, @@ -108,12 +105,6 @@ struct blk_mq_ops { softirq_done_fn *complete; /* - * Override for hctx allocations (should probably go) - */ - alloc_hctx_fn *alloc_hctx; - free_hctx_fn *free_hctx; - - /* * Called when the block layer side of a hardware queue has been * set up, allowing the driver to allocate/init matching structures. * Ditto for exit/teardown. @@ -166,7 +157,6 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); void blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error); |