Re: [PATCH v1 04/16] iomap: use iomap_iter->private for stashing read/readahead bio

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Aug 29, 2025 at 04:56:15PM -0700, Joanne Koong wrote:
> Use the iomap_iter->private field for stashing any read/readahead bios
> instead of defining the bio as part of the iomap_readpage_ctx struct.
> This makes the read/readahead interface more generic. Some filesystems
> that will be using iomap for read/readahead may not have CONFIG_BLOCK
> set.

Sorry, but I don't like abusing iomap_iter::private because (a) it's a
void pointer which means shenanigans; and (b) private exists to store
some private data for an iomap caller, not iomap itself.

> Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx>
> ---
>  fs/iomap/buffered-io.c | 49 +++++++++++++++++++++---------------------
>  1 file changed, 25 insertions(+), 24 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index f2bfb3e17bb0..9db233a4a82c 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -353,11 +353,10 @@ static void iomap_read_end_io(struct bio *bio)
>  struct iomap_readpage_ctx {
>  	struct folio		*cur_folio;
>  	bool			folio_unlocked;
> -	struct bio		*bio;

Does this work if you do:

#ifdef CONFIG_BLOCK
	struct bio		*bio;
#endif

Hm?  Possibly with a forward declaration of struct bio to shut the
compiler up?

--D

>  	struct readahead_control *rac;
>  };
>  
> -static void iomap_read_folio_range_async(const struct iomap_iter *iter,
> +static void iomap_read_folio_range_async(struct iomap_iter *iter,
>  		struct iomap_readpage_ctx *ctx, loff_t pos, size_t plen)
>  {
>  	struct folio *folio = ctx->cur_folio;
> @@ -365,6 +364,7 @@ static void iomap_read_folio_range_async(const struct iomap_iter *iter,
>  	struct iomap_folio_state *ifs = folio->private;
>  	size_t poff = offset_in_folio(folio, pos);
>  	loff_t length = iomap_length(iter);
> +	struct bio *bio = iter->private;
>  	sector_t sector;
>  
>  	ctx->folio_unlocked = true;
> @@ -375,34 +375,32 @@ static void iomap_read_folio_range_async(const struct iomap_iter *iter,
>  	}
>  
>  	sector = iomap_sector(iomap, pos);
> -	if (!ctx->bio ||
> -	    bio_end_sector(ctx->bio) != sector ||
> -	    !bio_add_folio(ctx->bio, folio, plen, poff)) {
> +	if (!bio || bio_end_sector(bio) != sector ||
> +	    !bio_add_folio(bio, folio, plen, poff)) {
>  		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
>  		gfp_t orig_gfp = gfp;
>  		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
>  
> -		if (ctx->bio)
> -			submit_bio(ctx->bio);
> +		if (bio)
> +			submit_bio(bio);
>  
>  		if (ctx->rac) /* same as readahead_gfp_mask */
>  			gfp |= __GFP_NORETRY | __GFP_NOWARN;
> -		ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
> -				     REQ_OP_READ, gfp);
> +		bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
> +				REQ_OP_READ, gfp);
>  		/*
>  		 * If the bio_alloc fails, try it again for a single page to
>  		 * avoid having to deal with partial page reads.  This emulates
>  		 * what do_mpage_read_folio does.
>  		 */
> -		if (!ctx->bio) {
> -			ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
> -					     orig_gfp);
> -		}
> +		if (!bio)
> +			bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
> +		iter->private = bio;
>  		if (ctx->rac)
> -			ctx->bio->bi_opf |= REQ_RAHEAD;
> -		ctx->bio->bi_iter.bi_sector = sector;
> -		ctx->bio->bi_end_io = iomap_read_end_io;
> -		bio_add_folio_nofail(ctx->bio, folio, plen, poff);
> +			bio->bi_opf |= REQ_RAHEAD;
> +		bio->bi_iter.bi_sector = sector;
> +		bio->bi_end_io = iomap_read_end_io;
> +		bio_add_folio_nofail(bio, folio, plen, poff);
>  	}
>  }
>  
> @@ -447,15 +445,18 @@ static int iomap_readpage_iter(struct iomap_iter *iter,
>  	return iomap_iter_advance(iter, &length);
>  }
>  
> -static void iomap_readfolio_submit(const struct iomap_readpage_ctx *ctx)
> +static void iomap_readfolio_submit(const struct iomap_iter *iter)
>  {
> -	if (ctx->bio)
> -		submit_bio(ctx->bio);
> +	struct bio *bio = iter->private;
> +
> +	if (bio)
> +		submit_bio(bio);
>  }
>  
> -static void iomap_readfolio_complete(const struct iomap_readpage_ctx *ctx)
> +static void iomap_readfolio_complete(const struct iomap_iter *iter,
> +		const struct iomap_readpage_ctx *ctx)
>  {
> -	iomap_readfolio_submit(ctx);
> +	iomap_readfolio_submit(iter);
>  
>  	if (ctx->cur_folio && !ctx->folio_unlocked)
>  		folio_unlock(ctx->cur_folio);
> @@ -492,7 +493,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
>  	while ((ret = iomap_iter(&iter, ops)) > 0)
>  		iter.status = iomap_read_folio_iter(&iter, &ctx);
>  
> -	iomap_readfolio_complete(&ctx);
> +	iomap_readfolio_complete(&iter, &ctx);
>  
>  	/*
>  	 * Just like mpage_readahead and block_read_full_folio, we always
> @@ -558,7 +559,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
>  	while (iomap_iter(&iter, ops) > 0)
>  		iter.status = iomap_readahead_iter(&iter, &ctx);
>  
> -	iomap_readfolio_complete(&ctx);
> +	iomap_readfolio_complete(&iter, &ctx);
>  }
>  EXPORT_SYMBOL_GPL(iomap_readahead);
>  
> -- 
> 2.47.3
> 
> 




[Index of Archives]     [XFS Filesystem Development (older mail)]     [Linux Filesystem Development]     [Linux Audio Users]     [Yosemite Trails]     [Linux Kernel]     [Linux RAID]     [Linux SCSI]


  Powered by Linux