Move the iomap_readpage_iter() async bio read logic into a separate helper function. This is needed to make iomap read/readahead more generically usable, especially for filesystems that do not require CONFIG_BLOCK. Rename iomap_read_folio_range() to iomap_read_folio_range_sync() to diferentiate between the synchronous and asynchronous bio folio read calls. Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx> --- fs/iomap/buffered-io.c | 68 ++++++++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 29 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index fd827398afd2..f8bdb2428819 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -357,36 +357,15 @@ struct iomap_readpage_ctx { struct readahead_control *rac; }; -static int iomap_readpage_iter(struct iomap_iter *iter, - struct iomap_readpage_ctx *ctx) +static void iomap_read_folio_range_async(const struct iomap_iter *iter, + struct iomap_readpage_ctx *ctx, loff_t pos, size_t plen) { + struct folio *folio = ctx->cur_folio; const struct iomap *iomap = &iter->iomap; - loff_t pos = iter->pos; + struct iomap_folio_state *ifs = folio->private; + size_t poff = offset_in_folio(folio, pos); loff_t length = iomap_length(iter); - struct folio *folio = ctx->cur_folio; - struct iomap_folio_state *ifs; - size_t poff, plen; sector_t sector; - int ret; - - if (iomap->type == IOMAP_INLINE) { - ret = iomap_read_inline_data(iter, folio); - if (ret) - return ret; - return iomap_iter_advance(iter, &length); - } - - /* zero post-eof blocks as the page may be mapped */ - ifs = ifs_alloc(iter->inode, folio, iter->flags); - iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); - if (plen == 0) - goto done; - - if (iomap_block_needs_zeroing(iter, pos)) { - folio_zero_range(folio, poff, plen); - iomap_set_range_uptodate(folio, poff, plen); - goto done; - } ctx->cur_folio_in_bio = true; if (ifs) { @@ -425,6 +404,37 @@ static int iomap_readpage_iter(struct iomap_iter *iter, ctx->bio->bi_end_io = iomap_read_end_io; bio_add_folio_nofail(ctx->bio, folio, plen, poff); } +} + +static int iomap_readpage_iter(struct iomap_iter *iter, + struct iomap_readpage_ctx *ctx) +{ + const struct iomap *iomap = &iter->iomap; + loff_t pos = iter->pos; + loff_t length = iomap_length(iter); + struct folio *folio = ctx->cur_folio; + size_t poff, plen; + int ret; + + if (iomap->type == IOMAP_INLINE) { + ret = iomap_read_inline_data(iter, folio); + if (ret) + return ret; + return iomap_iter_advance(iter, &length); + } + + /* zero post-eof blocks as the page may be mapped */ + ifs_alloc(iter->inode, folio, iter->flags); + iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); + if (plen == 0) + goto done; + + if (iomap_block_needs_zeroing(iter, pos)) { + folio_zero_range(folio, poff, plen); + iomap_set_range_uptodate(folio, poff, plen); + } else { + iomap_read_folio_range_async(iter, ctx, pos, plen); + } done: /* @@ -549,7 +559,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) } EXPORT_SYMBOL_GPL(iomap_readahead); -static int iomap_read_folio_range(const struct iomap_iter *iter, +static int iomap_read_folio_range_sync(const struct iomap_iter *iter, struct folio *folio, loff_t pos, size_t len) { const struct iomap *srcmap = iomap_iter_srcmap(iter); @@ -562,7 +572,7 @@ static int iomap_read_folio_range(const struct iomap_iter *iter, return submit_bio_wait(&bio); } #else -static int iomap_read_folio_range(const struct iomap_iter *iter, +static int iomap_read_folio_range_sync(const struct iomap_iter *iter, struct folio *folio, loff_t pos, size_t len) { WARN_ON_ONCE(1); @@ -739,7 +749,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, status = write_ops->read_folio_range(iter, folio, block_start, plen); else - status = iomap_read_folio_range(iter, + status = iomap_read_folio_range_sync(iter, folio, block_start, plen); if (status) return status; -- 2.47.3