Move the following code into the only caller of disk_zone_wplug_add_bio(): - bio->bi_opf &= ~REQ_NOWAIT - wplug->flags |= BLK_ZONE_WPLUG_PLUGGED - The disk_zone_wplug_schedule_bio_work() call. No functionality has been changed. This patch prepares for zoned write pipelining by removing the code from disk_zone_wplug_add_bio() that does not apply to all zoned write pipelining bio processing cases. Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- block/blk-zoned.c | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 089f106fd82e..2a85e3b7b081 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -778,8 +778,6 @@ static inline void disk_zone_wplug_add_bio(struct gendisk *disk, struct blk_zone_wplug *zwplug, struct bio *bio, unsigned int nr_segs) { - bool schedule_bio_work = false; - /* * Grab an extra reference on the BIO request queue usage counter. * This reference will be reused to submit a request for the BIO for @@ -795,16 +793,6 @@ static inline void disk_zone_wplug_add_bio(struct gendisk *disk, */ bio_clear_polled(bio); - /* - * REQ_NOWAIT BIOs are always handled using the zone write plug BIO - * work, which can block. So clear the REQ_NOWAIT flag and schedule the - * work if this is the first BIO we are plugging. - */ - if (bio->bi_opf & REQ_NOWAIT) { - schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED); - bio->bi_opf &= ~REQ_NOWAIT; - } - /* * Reuse the poll cookie field to store the number of segments when * split to the hardware limits. @@ -818,11 +806,6 @@ static inline void disk_zone_wplug_add_bio(struct gendisk *disk, * at the tail of the list to preserve the sequential write order. */ bio_list_add(&zwplug->bio_list, bio); - - zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; - - if (schedule_bio_work) - disk_zone_wplug_schedule_bio_work(disk, zwplug); } /* @@ -987,6 +970,7 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) { struct gendisk *disk = bio->bi_bdev->bd_disk; sector_t sector = bio->bi_iter.bi_sector; + bool schedule_bio_work = false; struct blk_zone_wplug *zwplug; gfp_t gfp_mask = GFP_NOIO; unsigned long flags; @@ -1031,13 +1015,17 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) /* If the zone is already plugged, add the BIO to the plug BIO list. */ if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) - goto plug; + goto queue_bio; /* * Do the same for REQ_NOWAIT BIOs to ensure that we will not see a * BLK_STS_AGAIN failure if we let the BIO execute. */ - if (bio->bi_opf & REQ_NOWAIT) - goto plug; + if (bio->bi_opf & REQ_NOWAIT) { + bio->bi_opf &= ~REQ_NOWAIT; + if (!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)) + goto plug; + goto queue_bio; + } if (!blk_zone_wplug_prepare_bio(zwplug, bio)) { spin_unlock_irqrestore(&zwplug->lock, flags); @@ -1053,7 +1041,13 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) return false; plug: + zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; + schedule_bio_work = true; + +queue_bio: disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs); + if (schedule_bio_work) + disk_zone_wplug_schedule_bio_work(disk, zwplug); spin_unlock_irqrestore(&zwplug->lock, flags);