From: Yu Kuai <yukuai3@xxxxxxxxxx> queue_requests_store() is the only caller of blk_mq_update_nr_requests(), and blk_mq_update_nr_requests() is the only caller of blk_mq_tag_update_depth(), however, they all have checkings for nr_requests input by user. Make code cleaner by moving all the checkings to the top function: 1) nr_requests > reserved tags; 2) if there is elevator, 4 <= nr_requests <= 2048; 3) if elevator is none, 4 <= nr_requests < tag_set->queue_depth; Meanwhile, case 2 is the only case tags can grow and -ENOMEM might be returned. Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx> --- block/blk-mq-tag.c | 16 +--------------- block/blk-mq.c | 13 ++++--------- block/blk-mq.h | 2 +- block/blk-sysfs.c | 13 +++++++++++++ 4 files changed, 19 insertions(+), 25 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d880c50629d6..7613a9889eb1 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -584,14 +584,10 @@ void blk_mq_free_tags(struct blk_mq_tags *tags) } int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, - struct blk_mq_tags **tagsptr, unsigned int tdepth, - bool can_grow) + struct blk_mq_tags **tagsptr, unsigned int tdepth) { struct blk_mq_tags *tags = *tagsptr; - if (tdepth <= tags->nr_reserved_tags) - return -EINVAL; - /* * If we are allowed to grow beyond the original size, allocate * a new set of tags before freeing the old one. @@ -600,16 +596,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tag_set *set = hctx->queue->tag_set; struct blk_mq_tags *new; - if (!can_grow) - return -EINVAL; - - /* - * We need some sort of upper limit, set it high enough that - * no valid use cases should require more. - */ - if (tdepth > MAX_SCHED_RQ) - return -EINVAL; - /* * Only the sbitmap needs resizing since we allocated the max * initially. diff --git a/block/blk-mq.c b/block/blk-mq.c index ea2995d4a917..048d6b2cffe6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4924,9 +4924,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) unsigned long i; int ret = 0; - if (q->nr_requests == nr) - return 0; - blk_mq_quiesce_queue(q); queue_for_each_hw_ctx(q, hctx, i) { @@ -4936,13 +4933,11 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) * If we're using an MQ scheduler, just update the scheduler * queue depth. This is similar to what the old code would do. */ - if (hctx->sched_tags) { + if (hctx->sched_tags) ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, - nr, true); - } else { - ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, - false); - } + nr); + else + ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr); if (ret) goto out; } diff --git a/block/blk-mq.h b/block/blk-mq.h index affb2e14b56e..2b3ade60c90b 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -171,7 +171,7 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, - struct blk_mq_tags **tags, unsigned int depth, bool can_grow); + struct blk_mq_tags **tags, unsigned int depth); void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size); void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 1086f7b9da28..f3d08edcc34f 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -75,12 +75,25 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count) memflags = blk_mq_freeze_queue(q); mutex_lock(&q->elevator_lock); + + if (q->nr_requests == nr) + goto unlock; + if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; + if (nr <= q->tag_set->reserved_tags || + (q->elevator && nr > MAX_SCHED_RQ) || + (!q->elevator && nr > q->tag_set->queue_depth)) { + ret = -EINVAL; + goto unlock; + } + err = blk_mq_update_nr_requests(disk->queue, nr); if (err) ret = err; + +unlock: mutex_unlock(&q->elevator_lock); blk_mq_unfreeze_queue(q, memflags); return ret; -- 2.39.2