Move ifs read_bytes_pending addition logic into a separate helper, iomap_start_folio_read(), which will be needed later on by user-provided read callbacks (not yet added) for read/readahead.This is the counterpart to the already currently-existing iomap_finish_folio_read(). Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx> --- fs/iomap/buffered-io.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index a3a9b6146c2f..6a9f9a9e591f 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -324,6 +324,17 @@ struct iomap_readfolio_ctx { }; #ifdef CONFIG_BLOCK +static void iomap_start_folio_read(struct folio *folio, size_t len) +{ + struct iomap_folio_state *ifs = folio->private; + + if (ifs) { + spin_lock_irq(&ifs->state_lock); + ifs->read_bytes_pending += len; + spin_unlock_irq(&ifs->state_lock); + } +} + static void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len, int error) { @@ -361,18 +372,13 @@ static void iomap_read_folio_range_async(struct iomap_iter *iter, { struct folio *folio = ctx->cur_folio; const struct iomap *iomap = &iter->iomap; - struct iomap_folio_state *ifs = folio->private; size_t poff = offset_in_folio(folio, pos); loff_t length = iomap_length(iter); struct bio *bio = iter->private; sector_t sector; ctx->folio_unlocked = true; - if (ifs) { - spin_lock_irq(&ifs->state_lock); - ifs->read_bytes_pending += plen; - spin_unlock_irq(&ifs->state_lock); - } + iomap_start_folio_read(folio, plen); sector = iomap_sector(iomap, pos); if (!bio || bio_end_sector(bio) != sector || -- 2.47.3