Commit 79525b51acc1 ("io_uring: fix nvme's 32b cqes on mixed cq") split out a separate io_uring_cmd_done32() helper for ->uring_cmd() implementations that return 32-byte CQEs. The res2 value passed to io_uring_cmd_done() is now unused because __io_uring_cmd_done() ignores it when is_cqe32 is passed as false. So drop the parameter from io_uring_cmd_done() to simplify the callers and clarify that it's not possible to return an extra value beyond the 32-bit CQE result. Signed-off-by: Caleb Sander Mateos <csander@xxxxxxxxxxxxxxx> --- block/ioctl.c | 2 +- drivers/block/ublk_drv.c | 6 +++--- fs/btrfs/ioctl.c | 2 +- fs/fuse/dev_uring.c | 8 ++++---- include/linux/io_uring/cmd.h | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/block/ioctl.c b/block/ioctl.c index f7b0006ca45d..c9ea8e53871e 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -774,11 +774,11 @@ static void blk_cmd_complete(struct io_uring_cmd *cmd, unsigned int issue_flags) struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd); if (bic->res == -EAGAIN && bic->nowait) io_uring_cmd_issue_blocking(cmd); else - io_uring_cmd_done(cmd, bic->res, 0, issue_flags); + io_uring_cmd_done(cmd, bic->res, issue_flags); } static void bio_cmd_bio_end_io(struct bio *bio) { struct io_uring_cmd *cmd = bio->bi_private; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 99abd67b708b..48c409d1e1bb 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1186,11 +1186,11 @@ static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req, int res, unsigned issue_flags) { struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req); /* tell ublksrv one io request is coming */ - io_uring_cmd_done(cmd, res, 0, issue_flags); + io_uring_cmd_done(cmd, res, issue_flags); } #define UBLK_REQUEUE_DELAY_MS 3 static inline void __ublk_abort_rq(struct ublk_queue *ubq, @@ -1803,11 +1803,11 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag, if (!done) io->flags |= UBLK_IO_FLAG_CANCELED; spin_unlock(&ubq->cancel_lock); if (!done) - io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags); + io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags); } /* * The ublk char device won't be closed when calling cancel fn, so both * ublk device and queue are guaranteed to be live @@ -2450,11 +2450,11 @@ static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd, unsigned int issue_flags) { int ret = ublk_ch_uring_cmd_local(cmd, issue_flags); if (ret != -EIOCBQUEUED) - io_uring_cmd_done(cmd, ret, 0, issue_flags); + io_uring_cmd_done(cmd, ret, issue_flags); } static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) { if (unlikely(issue_flags & IO_URING_F_CANCEL)) { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7e13de2bdcbf..168d84421a78 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -4683,11 +4683,11 @@ static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int iss out: btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); - io_uring_cmd_done(cmd, ret, 0, issue_flags); + io_uring_cmd_done(cmd, ret, issue_flags); add_rchar(current, ret); for (index = 0; index < priv->nr_pages; index++) __free_page(priv->pages[index]); diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c index 249b210becb1..a30c44234a4e 100644 --- a/fs/fuse/dev_uring.c +++ b/fs/fuse/dev_uring.c @@ -349,11 +349,11 @@ static void fuse_uring_entry_teardown(struct fuse_ring_ent *ent) list_move(&ent->list, &queue->ent_released); ent->state = FRRS_RELEASED; spin_unlock(&queue->lock); if (cmd) - io_uring_cmd_done(cmd, -ENOTCONN, 0, IO_URING_F_UNLOCKED); + io_uring_cmd_done(cmd, -ENOTCONN, IO_URING_F_UNLOCKED); if (req) fuse_uring_stop_fuse_req_end(req); } @@ -516,11 +516,11 @@ static void fuse_uring_cancel(struct io_uring_cmd *cmd, } spin_unlock(&queue->lock); if (need_cmd_done) { /* no queue lock to avoid lock order issues */ - io_uring_cmd_done(cmd, -ENOTCONN, 0, issue_flags); + io_uring_cmd_done(cmd, -ENOTCONN, issue_flags); } } static void fuse_uring_prepare_cancel(struct io_uring_cmd *cmd, int issue_flags, struct fuse_ring_ent *ring_ent) @@ -731,11 +731,11 @@ static int fuse_uring_send_next_to_ring(struct fuse_ring_ent *ent, ent->cmd = NULL; ent->state = FRRS_USERSPACE; list_move_tail(&ent->list, &queue->ent_in_userspace); spin_unlock(&queue->lock); - io_uring_cmd_done(cmd, 0, 0, issue_flags); + io_uring_cmd_done(cmd, 0, issue_flags); return 0; } /* * Make a ring entry available for fuse_req assignment @@ -1198,11 +1198,11 @@ static void fuse_uring_send(struct fuse_ring_ent *ent, struct io_uring_cmd *cmd, ent->state = FRRS_USERSPACE; list_move_tail(&ent->list, &queue->ent_in_userspace); ent->cmd = NULL; spin_unlock(&queue->lock); - io_uring_cmd_done(cmd, ret, 0, issue_flags); + io_uring_cmd_done(cmd, ret, issue_flags); } /* * This prepares and sends the ring request in fuse-uring task context. * User buffers are not mapped yet - the application does not have permission diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index 02d50f08f668..7509025b4071 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -158,13 +158,13 @@ static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd) { return cmd_to_io_kiocb(cmd)->ctx; } static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, - u64 res2, unsigned issue_flags) + unsigned issue_flags) { - return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, false); + return __io_uring_cmd_done(ioucmd, ret, 0, issue_flags, false); } static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret, u64 res2, unsigned issue_flags) { -- 2.45.2