From: Keith Busch <kbusch@xxxxxxxxxx> In preparing for dma mapping integrity metadata, move the common dma setup to a helper. Signed-off-by: Keith Busch <kbusch@xxxxxxxxxx> --- block/blk-mq-dma.c | 60 +++++++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c index 988c27667df67..bc694fecb39dc 100644 --- a/block/blk-mq-dma.c +++ b/block/blk-mq-dma.c @@ -135,36 +135,12 @@ static struct blk_map_iter blk_rq_map_iter(struct request *rq) }; } -/** - * blk_rq_dma_map_iter_start - map the first DMA segment for a request - * @req: request to map - * @dma_dev: device to map to - * @state: DMA IOVA state - * @iter: block layer DMA iterator - * - * Start DMA mapping @req to @dma_dev. @state and @iter are provided by the - * caller and don't need to be initialized. @state needs to be stored for use - * at unmap time, @iter is only needed at map time. - * - * Returns %false if there is no segment to map, including due to an error, or - * %true ft it did map a segment. - * - * If a segment was mapped, the DMA address for it is returned in @iter.addr and - * the length in @iter.len. If no segment was mapped the status code is - * returned in @iter.status. - * - * The caller can call blk_rq_dma_map_coalesce() to check if further segments - * need to be mapped after this, or go straight to blk_rq_dma_map_iter_next() - * to try to map the following segments. - */ -bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, - struct dma_iova_state *state, struct blk_dma_iter *iter) +static bool blk_dma_map_iter_start(struct request *req, struct device *dma_dev, + struct dma_iova_state *state, struct blk_dma_iter *iter, + unsigned int total_len) { - unsigned int total_len = blk_rq_payload_bytes(req); - memset(&iter->p2pdma, 0, sizeof(iter->p2pdma)); iter->status = BLK_STS_OK; - iter->iter = blk_rq_map_iter(req); /* * Grab the first segment ASAP because we'll need it to check for P2P @@ -194,6 +170,36 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, return blk_rq_dma_map_iova(req, dma_dev, state, iter); return blk_dma_map_direct(req, dma_dev, iter); } + +/** + * blk_rq_dma_map_iter_start - map the first DMA segment for a request + * @req: request to map + * @dma_dev: device to map to + * @state: DMA IOVA state + * @iter: block layer DMA iterator + * + * Start DMA mapping @req to @dma_dev. @state and @iter are provided by the + * caller and don't need to be initialized. @state needs to be stored for use + * at unmap time, @iter is only needed at map time. + * + * Returns %false if there is no segment to map, including due to an error, or + * %true ft it did map a segment. + * + * If a segment was mapped, the DMA address for it is returned in @iter.addr and + * the length in @iter.len. If no segment was mapped the status code is + * returned in @iter.status. + * + * The caller can call blk_rq_dma_map_coalesce() to check if further segments + * need to be mapped after this, or go straight to blk_rq_dma_map_iter_next() + * to try to map the following segments. + */ +bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, + struct dma_iova_state *state, struct blk_dma_iter *iter) +{ + iter->iter = blk_rq_map_iter(req); + return blk_dma_map_iter_start(req, dma_dev, state, iter, + blk_rq_payload_bytes(req)); +} EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start); /** -- 2.47.3