From: Keith Busch <kbusch@xxxxxxxxxx> This will make it easier to add different sources of the bvec table, like for upcoming integrity support, rather than assume to use the bio's bi_io_vec. It also makes iterating "special" payloads more in common with iterating normal payloads. Signed-off-by: Keith Busch <kbusch@xxxxxxxxxx> --- block/blk-mq-dma.c | 30 ++++++++++++++++-------------- include/linux/blk-mq-dma.h | 1 + 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c index 61fbdb715220f..08ce66175a7a3 100644 --- a/block/blk-mq-dma.c +++ b/block/blk-mq-dma.c @@ -10,23 +10,17 @@ static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter) unsigned int max_size; struct bio_vec bv; - if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { - if (!iter->bio) - return false; - iter->paddr = bvec_phys(&req->special_vec); - iter->len = req->special_vec.bv_len; - iter->bio = NULL; - return true; - } - if (!iter->iter.bi_size) return false; - bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); + bv = mp_bvec_iter_bvec(iter->bvec, iter->iter); iter->paddr = bvec_phys(&bv); max_size = get_max_segment_size(&req->q->limits, iter->paddr, UINT_MAX); bv.bv_len = min(bv.bv_len, max_size); - bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len); + bvec_iter_advance_single(iter->bvec, &iter->iter, bv.bv_len); + + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) + return true; /* * If we are entirely done with this bi_io_vec entry, check if the next @@ -40,15 +34,16 @@ static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter) if (!iter->iter.bi_size) { iter->bio = iter->bio->bi_next; iter->iter = iter->bio->bi_iter; + iter->bvec = iter->bio->bi_io_vec; } - next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); + next = mp_bvec_iter_bvec(iter->bvec, iter->iter); if (bv.bv_len + next.bv_len > max_size || !biovec_phys_mergeable(req->q, &bv, &next)) break; bv.bv_len += next.bv_len; - bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len); + bvec_iter_advance_single(iter->bvec, &iter->iter, next.bv_len); } iter->len = bv.bv_len; @@ -151,6 +146,11 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, memset(&iter->p2pdma, 0, sizeof(iter->p2pdma)); iter->status = BLK_STS_OK; + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) + iter->iter.bvec = &req->special_vec; + else + iter->iter.bvec = req->bio->bi_io_vec; + /* * Grab the first segment ASAP because we'll need it to check for P2P * transfers. @@ -244,8 +244,10 @@ int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, int nsegs = 0; /* the internal flush request may not have bio attached */ - if (bio) + if (bio) { iter.iter = bio->bi_iter; + iter.bvec = bio->bi_io_vec; + } while (blk_map_iter_next(rq, &iter)) { *last_sg = blk_next_sg(last_sg, sglist); diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h index 1e5988afdb978..c82f880dee914 100644 --- a/include/linux/blk-mq-dma.h +++ b/include/linux/blk-mq-dma.h @@ -8,6 +8,7 @@ struct blk_map_iter { phys_addr_t paddr; u32 len; + struct bio_vec *bvec; struct bvec_iter iter; struct bio *bio; }; -- 2.47.3