On Mon, Jul 28, 2025 at 1:31 PM Andrey Albershteyn <aalbersh@xxxxxxxxxx> wrote: > > From: Andrey Albershteyn <aalbersh@xxxxxxxxxx> > > Add iomap_writepages_unbound() without limit in form of EOF. XFS > will use this to write metadata (fs-verity Merkle tree) in range far > beyond EOF. > > Signed-off-by: Andrey Albershteyn <aalbersh@xxxxxxxxxx> > --- > fs/iomap/buffered-io.c | 51 +++++++++++++++++++++++++++++++++++++++----------- > include/linux/iomap.h | 3 +++ > 2 files changed, 43 insertions(+), 11 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index 3729391a18f3..7bef232254a3 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -1881,18 +1881,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, > int error = 0; > u32 rlen; > > - WARN_ON_ONCE(!folio_test_locked(folio)); > - WARN_ON_ONCE(folio_test_dirty(folio)); > - WARN_ON_ONCE(folio_test_writeback(folio)); > - > - trace_iomap_writepage(inode, pos, folio_size(folio)); > - > - if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) { > - folio_unlock(folio); > - return 0; > - } > WARN_ON_ONCE(end_pos <= pos); > > + trace_iomap_writepage(inode, pos, folio_size(folio)); > + > if (i_blocks_per_folio(inode, folio) > 1) { > if (!ifs) { > ifs = ifs_alloc(inode, folio, 0); > @@ -1956,6 +1948,23 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, > return error; > } > > +/* Map pages bound by EOF */ > +static int iomap_writepage_map_eof(struct iomap_writepage_ctx *wpc, > + struct writeback_control *wbc, struct folio *folio) > +{ > + int error; > + struct inode *inode = folio->mapping->host; > + u64 end_pos = folio_pos(folio) + folio_size(folio); > + > + if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) { > + folio_unlock(folio); > + return 0; > + } > + > + error = iomap_writepage_map(wpc, wbc, folio); > + return error; > +} > + > int > iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, > struct iomap_writepage_ctx *wpc, > @@ -1972,9 +1981,29 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, > PF_MEMALLOC)) > return -EIO; > > + wpc->ops = ops; > + while ((folio = writeback_iter(mapping, wbc, folio, &error))) { > + WARN_ON_ONCE(!folio_test_locked(folio)); > + WARN_ON_ONCE(folio_test_dirty(folio)); > + WARN_ON_ONCE(folio_test_writeback(folio)); > + > + error = iomap_writepage_map_eof(wpc, wbc, folio); > + } > + return iomap_submit_ioend(wpc, error); > +} > +EXPORT_SYMBOL_GPL(iomap_writepages); > + > +int > +iomap_writepages_unbound(struct address_space *mapping, struct writeback_control *wbc, > + struct iomap_writepage_ctx *wpc, > + const struct iomap_writeback_ops *ops) > +{ > + struct folio *folio = NULL; > + int error; > + > wpc->ops = ops; > while ((folio = writeback_iter(mapping, wbc, folio, &error))) > error = iomap_writepage_map(wpc, wbc, folio); > return iomap_submit_ioend(wpc, error); > } > -EXPORT_SYMBOL_GPL(iomap_writepages); > +EXPORT_SYMBOL_GPL(iomap_writepages_unbound); > diff --git a/include/linux/iomap.h b/include/linux/iomap.h > index 522644d62f30..4a0b5ebb79e9 100644 > --- a/include/linux/iomap.h > +++ b/include/linux/iomap.h > @@ -464,6 +464,9 @@ void iomap_sort_ioends(struct list_head *ioend_list); > int iomap_writepages(struct address_space *mapping, > struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, > const struct iomap_writeback_ops *ops); > +int iomap_writepages_unbound(struct address_space *mapping, > + struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, > + const struct iomap_writeback_ops *ops); > Just curious, instead of having a new api for iomap_writepages_unbound, does adding a bitfield for unbound to the iomap_writepage_ctx struct suffice? afaict, the logic between the two paths is identical except for the iomap_writepage_handle_eof() call and some WARN_ONs - if that gets gated behind the bitfield check, then it seems like it does the same thing logically but imo is more straightforward to follow the code flow of. But maybe I"m missing some reason why this wouldn't work? > /* > * Flags for direct I/O ->end_io: > > -- > 2.50.0 > >