From: Yu Kuai <yukuai3@xxxxxxxxxx> This helper only support to allocate the default number of requests, add a new parameter to support specific number of requests. Prepare to fix potential deadlock in the case nr_requests grow. Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx> Reviewed-by: Nilay Shroff <nilay@xxxxxxxxxxxxx> --- block/blk-mq-sched.c | 14 +++++--------- block/blk-mq-sched.h | 2 +- block/blk-mq.h | 11 +++++++++++ block/elevator.c | 3 ++- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index e2ce4a28e6c9..d06bb137a743 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -454,7 +454,7 @@ void blk_mq_free_sched_tags_batch(struct xarray *et_table, } struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, - unsigned int nr_hw_queues) + unsigned int nr_hw_queues, unsigned int nr_requests) { unsigned int nr_tags; int i; @@ -470,13 +470,8 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, nr_tags * sizeof(struct blk_mq_tags *), gfp); if (!et) return NULL; - /* - * Default to double of smaller one between hw queue_depth and - * 128, since we don't split into sync/async like the old code - * did. Additionally, this is a per-hw queue depth. - */ - et->nr_requests = 2 * min_t(unsigned int, set->queue_depth, - BLKDEV_DEFAULT_RQ); + + et->nr_requests = nr_requests; et->nr_hw_queues = nr_hw_queues; if (blk_mq_is_shared_tags(set->flags)) { @@ -521,7 +516,8 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table, * concurrently. */ if (q->elevator) { - et = blk_mq_alloc_sched_tags(set, nr_hw_queues); + et = blk_mq_alloc_sched_tags(set, nr_hw_queues, + blk_mq_default_nr_requests(set)); if (!et) goto out_unwind; if (xa_insert(et_table, q->id, et, gfp)) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index fe83187f41db..8e21a6b1415d 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -24,7 +24,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); void blk_mq_sched_free_rqs(struct request_queue *q); struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, - unsigned int nr_hw_queues); + unsigned int nr_hw_queues, unsigned int nr_requests); int blk_mq_alloc_sched_tags_batch(struct xarray *et_table, struct blk_mq_tag_set *set, unsigned int nr_hw_queues); void blk_mq_free_sched_tags(struct elevator_tags *et, diff --git a/block/blk-mq.h b/block/blk-mq.h index 5d42c7d3a952..3a1d4c37d1bc 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -109,6 +109,17 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf, return ctx->hctxs[blk_mq_get_hctx_type(opf)]; } +/* + * Default to double of smaller one between hw queue_depth and + * 128, since we don't split into sync/async like the old code + * did. Additionally, this is a per-hw queue depth. + */ +static inline unsigned int blk_mq_default_nr_requests( + struct blk_mq_tag_set *set) +{ + return 2 * min_t(unsigned int, set->queue_depth, BLKDEV_DEFAULT_RQ); +} + /* * sysfs helpers */ diff --git a/block/elevator.c b/block/elevator.c index fe96c6f4753c..e2ebfbf107b3 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -669,7 +669,8 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx) lockdep_assert_held(&set->update_nr_hwq_lock); if (strncmp(ctx->name, "none", 4)) { - ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues); + ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues, + blk_mq_default_nr_requests(set)); if (!ctx->et) return -ENOMEM; } -- 2.39.2