Re: [RFC PATCH 5/7] ublk: prepare for supporting to register request buffer automatically

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Apr 28, 2025 at 2:45 AM Ming Lei <ming.lei@xxxxxxxxxx> wrote:
>
> UBLK_F_SUPPORT_ZERO_COPY requires ublk server to issue explicit buffer
> register/unregister uring_cmd for each IO, this way is not only inefficient,
> but also introduce dependency between buffer consumer and buffer register/
> unregister uring_cmd, please see tools/testing/selftests/ublk/stripe.c
> in which backing file IO has to be issued one by one by IOSQE_IO_LINK.
>
> Prepare for adding feature UBLK_F_AUTO_BUF_REG for addressing the existing
> zero copy limitation:
>
> - register request buffer automatically to ublk uring_cmd's io_uring
>   context before delivering io command to ublk server
>
> - unregister request buffer automatically from the ublk uring_cmd's
>   io_uring context when completing the request
>
> - io_uring will unregister the buffer automatically when uring is
>   exiting, so we needn't worry about accident exit
>
> For using this feature, ublk server has to create one sparse buffer table
>
> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx>
> ---
>  drivers/block/ublk_drv.c | 85 +++++++++++++++++++++++++++++++++++-----
>  1 file changed, 76 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
> index 9cd331d12fa6..1fd20e481a60 100644
> --- a/drivers/block/ublk_drv.c
> +++ b/drivers/block/ublk_drv.c
> @@ -133,6 +133,14 @@ struct ublk_uring_cmd_pdu {
>   */
>  #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
>
> +/*
> + * request buffer is registered automatically, so we have to unregister it
> + * before completing this request.
> + *
> + * io_uring will unregister buffer automatically for us during exiting.
> + */
> +#define UBLK_IO_FLAG_AUTO_BUF_REG      0x10
> +
>  /* atomic RW with ubq->cancel_lock */
>  #define UBLK_IO_FLAG_CANCELED  0x80000000
>
> @@ -205,6 +213,7 @@ struct ublk_params_header {
>         __u32   types;
>  };
>
> +static void ublk_io_release(void *priv);
>  static void ublk_stop_dev_unlocked(struct ublk_device *ub);
>  static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
>  static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
> @@ -615,6 +624,11 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
>         return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
>  }
>
> +static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
> +{
> +       return false;
> +}
> +
>  static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
>  {
>         return ubq->flags & UBLK_F_USER_COPY;
> @@ -622,7 +636,8 @@ static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
>
>  static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
>  {
> -       return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq);
> +       return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
> +               !ublk_support_auto_buf_reg(ubq);
>  }
>
>  static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
> @@ -633,17 +648,22 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
>          *
>          * for zero copy, request buffer need to be registered to io_uring
>          * buffer table, so reference is needed
> +        *
> +        * For auto buffer register, ublk server still may issue
> +        * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up,
> +        * so reference is required too.
>          */
> -       return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq);
> +       return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) ||
> +               ublk_support_auto_buf_reg(ubq);
>  }
>
>  static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
> -               struct request *req)
> +               struct request *req, int init_ref)
>  {
>         if (ublk_need_req_ref(ubq)) {
>                 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
>
> -               refcount_set(&data->ref, 1);
> +               refcount_set(&data->ref, init_ref);
>         }
>  }
>
> @@ -1157,6 +1177,37 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
>                 blk_mq_end_request(rq, BLK_STS_IOERR);
>  }
>
> +static bool ublk_auto_buf_reg(struct ublk_queue *ubq, struct request *req,
> +                             struct ublk_io *io, unsigned int issue_flags)
> +{
> +       struct io_buf_data data = {
> +               .rq    = req,
> +               .index = req->tag,

It feels a bit misleading to specify a value here that is always
overwritten by ublk_init_auto_buf_reg() in the next patch. Can you
just omit the field from the initialization here? Same comment for
ublk_auto_buf_unreg().

> +               .release = ublk_io_release,
> +       };
> +       int ret;
> +
> +       /* one extra reference is dropped by ublk_io_release */
> +       ublk_init_req_ref(ubq, req, 2);

I think the ublk_need_req_ref(ubq) check in ublk_init_req_ref() is not
needed here, since this is only called when
ublk_support_auto_buf_reg(ubq) is true. Maybe just inline the
refcount_set() here? Then you could drop the ubq argument to
ublk_auto_buf_reg(), and ublk_init_req_ref() wouldn't need to be
modified.

Best,
Caleb

> +       ret = io_buffer_register_bvec(io->cmd, &data, issue_flags);
> +       if (ret) {
> +               blk_mq_end_request(req, BLK_STS_IOERR);
> +               return false;
> +       }
> +       io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
> +       return true;
> +}
> +
> +static bool ublk_prep_buf_reg(struct ublk_queue *ubq, struct request *req,
> +                             struct ublk_io *io, unsigned int issue_flags)
> +{
> +       if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
> +               return ublk_auto_buf_reg(ubq, req, io, issue_flags);
> +
> +       ublk_init_req_ref(ubq, req, 1);
> +       return true;
> +}
> +
>  static void ublk_start_io(struct ublk_queue *ubq, struct request *req,
>                           struct ublk_io *io)
>  {
> @@ -1181,8 +1232,6 @@ static void ublk_start_io(struct ublk_queue *ubq, struct request *req,
>                 ublk_get_iod(ubq, req->tag)->nr_sectors =
>                         mapped_bytes >> 9;
>         }
> -
> -       ublk_init_req_ref(ubq, req);
>  }
>
>  static void ublk_dispatch_req(struct ublk_queue *ubq,
> @@ -1225,7 +1274,9 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
>         }
>
>         ublk_start_io(ubq, req, io);
> -       ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
> +
> +       if (ublk_prep_buf_reg(ubq, req, io, issue_flags))
> +               ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
>  }
>
>  static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
> @@ -2007,9 +2058,21 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
>         return ret;
>  }
>
> +static void ublk_auto_buf_unreg(struct ublk_io *io, struct io_uring_cmd *cmd,
> +                               struct request *req, unsigned int issue_flags)
> +{
> +       struct io_buf_data data = {
> +               .index = req->tag,
> +       };
> +
> +       WARN_ON_ONCE(io_buffer_unregister_bvec(cmd, &data, issue_flags));
> +       io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
> +}
> +
>  static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
>                                  struct ublk_io *io, struct io_uring_cmd *cmd,
> -                                const struct ublksrv_io_cmd *ub_cmd)
> +                                const struct ublksrv_io_cmd *ub_cmd,
> +                                unsigned int issue_flags)
>  {
>         struct request *req;
>
> @@ -2033,6 +2096,9 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
>                 return -EINVAL;
>         }
>
> +       if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG)
> +               ublk_auto_buf_unreg(io, cmd, req, issue_flags);
> +
>         ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
>
>         /* now this cmd slot is owned by ublk driver */
> @@ -2065,6 +2131,7 @@ static void ublk_get_data(struct ublk_queue *ubq, struct ublk_io *io)
>                         ublk_get_iod(ubq, req->tag)->addr);
>
>         ublk_start_io(ubq, req, io);
> +       ublk_init_req_ref(ubq, req, 1);
>  }
>
>  static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
> @@ -2124,7 +2191,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
>                         goto out;
>                 break;
>         case UBLK_IO_COMMIT_AND_FETCH_REQ:
> -               ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd);
> +               ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags);
>                 if (ret)
>                         goto out;
>                 break;
> --
> 2.47.0
>





[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux