Refactor the read/readahead completion logic into two new functions, iomap_readfolio_complete() and iomap_readfolio_submit(). This helps make iomap read/readahead generic when the code will be moved out of CONFIG_BLOCK scope. Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx> --- fs/iomap/buffered-io.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 4b173aad04ed..f2bfb3e17bb0 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -447,6 +447,20 @@ static int iomap_readpage_iter(struct iomap_iter *iter, return iomap_iter_advance(iter, &length); } +static void iomap_readfolio_submit(const struct iomap_readpage_ctx *ctx) +{ + if (ctx->bio) + submit_bio(ctx->bio); +} + +static void iomap_readfolio_complete(const struct iomap_readpage_ctx *ctx) +{ + iomap_readfolio_submit(ctx); + + if (ctx->cur_folio && !ctx->folio_unlocked) + folio_unlock(ctx->cur_folio); +} + static int iomap_read_folio_iter(struct iomap_iter *iter, struct iomap_readpage_ctx *ctx) { @@ -478,13 +492,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) while ((ret = iomap_iter(&iter, ops)) > 0) iter.status = iomap_read_folio_iter(&iter, &ctx); - if (ctx.bio) { - submit_bio(ctx.bio); - WARN_ON_ONCE(!ctx.folio_unlocked); - } else { - WARN_ON_ONCE(ctx.folio_unlocked); - folio_unlock(folio); - } + iomap_readfolio_complete(&ctx); /* * Just like mpage_readahead and block_read_full_folio, we always @@ -550,10 +558,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) while (iomap_iter(&iter, ops) > 0) iter.status = iomap_readahead_iter(&iter, &ctx); - if (ctx.bio) - submit_bio(ctx.bio); - if (ctx.cur_folio && !ctx.folio_unlocked) - folio_unlock(ctx.cur_folio); + iomap_readfolio_complete(&ctx); } EXPORT_SYMBOL_GPL(iomap_readahead); -- 2.47.3