On Mon, Apr 14, 2025 at 02:39:33PM -0600, Jens Axboe wrote: > On 4/14/25 1:58 PM, Uday Shankar wrote: > > +static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub, > > + struct ublk_queue *ubq, struct ublk_io *io, > > + const struct ublksrv_io_cmd *ub_cmd, > > + unsigned int issue_flags) > > +{ > > + int ret = 0; > > + > > + if (issue_flags & IO_URING_F_NONBLOCK) > > + return -EAGAIN; > > + > > + mutex_lock(&ub->mutex); > > This looks like overkill, if we can trylock the mutex that should surely > be fine? And I would imagine succeed most of the time, hence making the > inline/fastpath fine with F_NONBLOCK? Yeah, makes sense. How about this? diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index cdb1543fa4a9..bf4a88cb1413 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1832,8 +1832,8 @@ static void ublk_nosrv_work(struct work_struct *work) /* device can only be started after all IOs are ready */ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) + __must_hold(&ub->mutex) { - mutex_lock(&ub->mutex); ubq->nr_io_ready++; if (ublk_queue_ready(ubq)) { ubq->ubq_daemon = current; @@ -1845,7 +1845,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) } if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) complete_all(&ub->completion); - mutex_unlock(&ub->mutex); } static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id, @@ -1929,6 +1928,55 @@ static int ublk_unregister_io_buf(struct io_uring_cmd *cmd, return io_buffer_unregister_bvec(cmd, index, issue_flags); } +static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub, + struct ublk_queue *ubq, struct ublk_io *io, + const struct ublksrv_io_cmd *ub_cmd, + unsigned int issue_flags) +{ + int ret = 0; + + if (!mutex_trylock(&ub->mutex)) { + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + else + mutex_lock(&ub->mutex); + } + + /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */ + if (ublk_queue_ready(ubq)) { + ret = -EBUSY; + goto out; + } + + /* allow each command to be FETCHed at most once */ + if (io->flags & UBLK_IO_FLAG_ACTIVE) { + ret = -EINVAL; + goto out; + } + + WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV); + + if (ublk_need_map_io(ubq)) { + /* + * FETCH_RQ has to provide IO buffer if NEED GET + * DATA is not enabled + */ + if (!ub_cmd->addr && !ublk_need_get_data(ubq)) + goto out; + } else if (ub_cmd->addr) { + /* User copy requires addr to be unset */ + ret = -EINVAL; + goto out; + } + + ublk_fill_io_cmd(io, cmd, ub_cmd->addr); + ublk_mark_io_ready(ub, ubq); + +out: + mutex_unlock(&ub->mutex); + return ret; +} + static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags, const struct ublksrv_io_cmd *ub_cmd) @@ -1985,34 +2033,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, case UBLK_IO_UNREGISTER_IO_BUF: return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags); case UBLK_IO_FETCH_REQ: - /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */ - if (ublk_queue_ready(ubq)) { - ret = -EBUSY; - goto out; - } - /* - * The io is being handled by server, so COMMIT_RQ is expected - * instead of FETCH_REQ - */ - if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) - goto out; - - if (ublk_need_map_io(ubq)) { - /* - * FETCH_RQ has to provide IO buffer if NEED GET - * DATA is not enabled - */ - if (!ub_cmd->addr && !ublk_need_get_data(ubq)) - goto out; - } else if (ub_cmd->addr) { - /* User copy requires addr to be unset */ - ret = -EINVAL; - goto out; - } - - ublk_fill_io_cmd(io, cmd, ub_cmd->addr); - ublk_mark_io_ready(ub, ubq); - break; + return ublk_fetch(cmd, ub, ubq, io, ub_cmd, issue_flags); case UBLK_IO_COMMIT_AND_FETCH_REQ: req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);