ublk_get_req_ref() and ublk_put_req_ref() currently call ublk_need_req_ref(ubq) to check whether the ublk device features require reference counting of its requests. However, all callers already know that reference counting is required: - __ublk_check_and_get_req() is only called from ublk_check_and_get_req() if user copy is enabled, and from ublk_register_io_buf() if zero copy is enabled - ublk_io_release() is only called for requests registered by ublk_register_io_buf(), which requires zero copy - ublk_ch_read_iter() and ublk_ch_write_iter() only call ublk_put_req_ref() if ublk_check_and_get_req() succeeded, which requires user copy to be enabled So drop the ublk_need_req_ref() check and the ubq argument in ublk_get_req_ref() and ublk_put_req_ref(). Signed-off-by: Caleb Sander Mateos <csander@xxxxxxxxxxxxxxx> --- drivers/block/ublk_drv.c | 35 +++++++++++------------------------ 1 file changed, 11 insertions(+), 24 deletions(-) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 199028f36ec8..ebc56681eb68 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -697,28 +697,19 @@ static inline void ublk_init_req_ref(const struct ublk_queue *ubq, { if (ublk_need_req_ref(ubq)) refcount_set(&io->ref, UBLK_REFCOUNT_INIT); } -static inline bool ublk_get_req_ref(const struct ublk_queue *ubq, - struct ublk_io *io) +static inline bool ublk_get_req_ref(struct ublk_io *io) { - if (ublk_need_req_ref(ubq)) - return refcount_inc_not_zero(&io->ref); - - return true; + return refcount_inc_not_zero(&io->ref); } -static inline void ublk_put_req_ref(const struct ublk_queue *ubq, - struct ublk_io *io, struct request *req) +static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req) { - if (ublk_need_req_ref(ubq)) { - if (refcount_dec_and_test(&io->ref)) - __ublk_complete_rq(req); - } else { + if (refcount_dec_and_test(&io->ref)) __ublk_complete_rq(req); - } } static inline void ublk_sub_req_ref(struct ublk_io *io, struct request *req) { unsigned sub_refs = UBLK_REFCOUNT_INIT - io->task_registered_buffers; @@ -2019,11 +2010,11 @@ static void ublk_io_release(void *priv) * but unregistered on task. Or after UBLK_IO_COMMIT_AND_FETCH_REQ. */ if (current == io->task && io->task_registered_buffers) io->task_registered_buffers--; else - ublk_put_req_ref(ubq, io, rq); + ublk_put_req_ref(io, rq); } static int ublk_register_io_buf(struct io_uring_cmd *cmd, const struct ublk_queue *ubq, struct ublk_io *io, @@ -2041,11 +2032,11 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd, return -EINVAL; ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index, issue_flags); if (ret) { - ublk_put_req_ref(ubq, io, req); + ublk_put_req_ref(io, req); return ret; } return 0; } @@ -2338,11 +2329,11 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, */ req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); if (!req) return NULL; - if (!ublk_get_req_ref(ubq, io)) + if (!ublk_get_req_ref(io)) return NULL; if (unlikely(!blk_mq_request_started(req) || req->tag != tag)) goto fail_put; @@ -2352,11 +2343,11 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, if (offset > blk_rq_bytes(req)) goto fail_put; return req; fail_put: - ublk_put_req_ref(ubq, io, req); + ublk_put_req_ref(io, req); return NULL; } static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd, unsigned int issue_flags) @@ -2468,48 +2459,44 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb, goto fail; *off = buf_off; return req; fail: - ublk_put_req_ref(ubq, *io, req); + ublk_put_req_ref(*io, req); return ERR_PTR(-EACCES); } static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct ublk_queue *ubq; struct request *req; struct ublk_io *io; size_t buf_off; size_t ret; req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST, &io); if (IS_ERR(req)) return PTR_ERR(req); ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST); - ubq = req->mq_hctx->driver_data; - ublk_put_req_ref(ubq, io, req); + ublk_put_req_ref(io, req); return ret; } static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from) { - struct ublk_queue *ubq; struct request *req; struct ublk_io *io; size_t buf_off; size_t ret; req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE, &io); if (IS_ERR(req)) return PTR_ERR(req); ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE); - ubq = req->mq_hctx->driver_data; - ublk_put_req_ref(ubq, io, req); + ublk_put_req_ref(io, req); return ret; } static const struct file_operations ublk_ch_fops = { -- 2.45.2