On Fri, Aug 29, 2025 at 04:56:25PM -0700, Joanne Koong wrote: > Read folio data into the page cache using iomap. This gives us granular > uptodate tracking for large folios, which optimizes how much data needs > to be read in. If some portions of the folio are already uptodate (eg > through a prior write), we only need to read in the non-uptodate > portions. > > Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx> Looks fine to me, Reviewed-by: "Darrick J. Wong" <djwong@xxxxxxxxxx> --D > --- > fs/fuse/file.c | 72 ++++++++++++++++++++++++++++++++++---------------- > 1 file changed, 49 insertions(+), 23 deletions(-) > > diff --git a/fs/fuse/file.c b/fs/fuse/file.c > index 5525a4520b0f..bdfb13cdee4b 100644 > --- a/fs/fuse/file.c > +++ b/fs/fuse/file.c > @@ -828,22 +828,62 @@ static int fuse_do_readfolio(struct file *file, struct folio *folio, > return 0; > } > > +static int fuse_iomap_begin(struct inode *inode, loff_t offset, loff_t length, > + unsigned int flags, struct iomap *iomap, > + struct iomap *srcmap) > +{ > + iomap->type = IOMAP_MAPPED; > + iomap->length = length; > + iomap->offset = offset; > + return 0; > +} > + > +static const struct iomap_ops fuse_iomap_ops = { > + .iomap_begin = fuse_iomap_begin, > +}; > + > +struct fuse_fill_read_data { > + struct file *file; > +}; > + > +static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter, > + struct folio *folio, loff_t pos, > + size_t len) > +{ > + struct fuse_fill_read_data *data = iter->private; > + struct file *file = data->file; > + size_t off = offset_in_folio(folio, pos); > + int ret; > + > + /* > + * for non-readahead read requests, do reads synchronously since > + * it's not guaranteed that the server can handle out-of-order reads > + */ > + iomap_start_folio_read(folio, len); > + ret = fuse_do_readfolio(file, folio, off, len); > + iomap_finish_folio_read(folio, off, len, ret); > + return ret; > +} > + > +static const struct iomap_read_ops fuse_iomap_read_ops = { > + .read_folio_range = fuse_iomap_read_folio_range_async, > +}; > + > static int fuse_read_folio(struct file *file, struct folio *folio) > { > struct inode *inode = folio->mapping->host; > + struct fuse_fill_read_data data = { > + .file = file, > + }; > int err; > > - err = -EIO; > - if (fuse_is_bad(inode)) > - goto out; > - > - err = fuse_do_readfolio(file, folio, 0, folio_size(folio)); > - if (!err) > - folio_mark_uptodate(folio); > + if (fuse_is_bad(inode)) { > + folio_unlock(folio); > + return -EIO; > + } > > + err = iomap_read_folio(folio, &fuse_iomap_ops, &fuse_iomap_read_ops, &data); > fuse_invalidate_atime(inode); > - out: > - folio_unlock(folio); > return err; > } > > @@ -1394,20 +1434,6 @@ static const struct iomap_write_ops fuse_iomap_write_ops = { > .read_folio_range = fuse_iomap_read_folio_range, > }; > > -static int fuse_iomap_begin(struct inode *inode, loff_t offset, loff_t length, > - unsigned int flags, struct iomap *iomap, > - struct iomap *srcmap) > -{ > - iomap->type = IOMAP_MAPPED; > - iomap->length = length; > - iomap->offset = offset; > - return 0; > -} > - > -static const struct iomap_ops fuse_iomap_ops = { > - .iomap_begin = fuse_iomap_begin, > -}; > - > static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) > { > struct file *file = iocb->ki_filp; > -- > 2.47.3 > >