sched debugfs shares same lifetime with scheduler's kobject, and same lock(elevator lock), so move sched debugfs register/unregister into elevator_register_queue() and elevator_unregister_queue(). Then we needn't blk_mq_debugfs_register() for us to register sched debugfs any more. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq-debugfs.c | 12 ------------ block/blk-mq-sched.c | 7 ++----- block/elevator.c | 6 ++++++ block/elevator.h | 3 +++ 4 files changed, 11 insertions(+), 17 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 3421b5521fe2..c308699ded58 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -624,22 +624,10 @@ void blk_mq_debugfs_register(struct request_queue *q) debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); - /* - * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir - * didn't exist yet (because we don't know what to name the directory - * until the queue is registered to a gendisk). - */ - if (q->elevator && !q->sched_debugfs_dir) - blk_mq_debugfs_register_sched(q); - - /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx->debugfs_dir) blk_mq_debugfs_register_hctx(q, hctx); - if (q->elevator && !hctx->sched_debugfs_dir) - blk_mq_debugfs_register_sched_hctx(q, hctx); } - if (q->rq_qos) { struct rq_qos *rqos = q->rq_qos; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index f66abaa25430..14552c58c4e8 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -436,7 +436,7 @@ static int blk_mq_init_sched_shared_tags(struct request_queue *queue) return 0; } -static void blk_mq_sched_reg_debugfs(struct request_queue *q) +void blk_mq_sched_reg_debugfs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; @@ -448,7 +448,7 @@ static void blk_mq_sched_reg_debugfs(struct request_queue *q) mutex_unlock(&q->debugfs_mutex); } -static void blk_mq_sched_unreg_debugfs(struct request_queue *q) +void blk_mq_sched_unreg_debugfs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; @@ -505,7 +505,6 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) } } } - blk_mq_sched_reg_debugfs(q); return 0; @@ -544,8 +543,6 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) unsigned long i; unsigned int flags = 0; - blk_mq_sched_unreg_debugfs(q); - queue_for_each_hw_ctx(q, hctx, i) { if (e->type->ops.exit_hctx && hctx->sched_data) { e->type->ops.exit_hctx(hctx, i); diff --git a/block/elevator.c b/block/elevator.c index 5051a98dc08c..cf48613c6e62 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -459,6 +459,9 @@ int elv_register_queue(struct request_queue *q, bool uevent) lockdep_assert_held(&q->elevator_lock); + if (test_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) + return 0; + error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched"); if (!error) { const struct elv_fs_entry *attr = e->type->elevator_attrs; @@ -472,6 +475,7 @@ int elv_register_queue(struct request_queue *q, bool uevent) if (uevent) kobject_uevent(&e->kobj, KOBJ_ADD); + blk_mq_sched_reg_debugfs(q); set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags); } return error; @@ -486,6 +490,8 @@ void elv_unregister_queue(struct request_queue *q) if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) { kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); + + blk_mq_sched_unreg_debugfs(q); } } diff --git a/block/elevator.h b/block/elevator.h index e4e44dfac503..80ff9b28a66f 100644 --- a/block/elevator.h +++ b/block/elevator.h @@ -182,4 +182,7 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) +void blk_mq_sched_reg_debugfs(struct request_queue *q); +void blk_mq_sched_unreg_debugfs(struct request_queue *q); + #endif /* _ELEVATOR_H */ -- 2.47.0