Re: [PATCH 5/9] ublk: move device reset into ublk_ch_release()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Apr 14, 2025 at 02:29:48PM -0600, Uday Shankar wrote:
> On Mon, Apr 14, 2025 at 07:25:46PM +0800, Ming Lei wrote:
> > ublk_ch_release() is called after ublk char device is closed, when all
> > uring_cmd are done, so it is perfect fine to move ublk device reset to
> > ublk_ch_release() from ublk_ctrl_start_recovery().
> > 
> > This way can avoid to grab the exiting daemon task_struct too long.
> 
> Nice, I had noticed this leak too, where we keep the task struct ref
> until the new daemon comes around. Thanks for the fix!
> 
> > 
> > However, reset of the following ublk IO flags has to be moved until ublk
> > io_uring queues are ready:
> > 
> > - ubq->canceling
> > 
> > For requeuing IO in case of ublk_nosrv_dev_should_queue_io() before device
> > is recovered
> > 
> > - ubq->fail_io
> > 
> > For failing IO in case of UBLK_F_USER_RECOVERY_FAIL_IO before device is
> > recovered
> > 
> > - ublk_io->flags
> > 
> > For preventing using io->cmd
> > 
> > With this way, recovery is simplified a lot.
> > 
> > Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx>
> > ---
> >  drivers/block/ublk_drv.c | 121 +++++++++++++++++++++++----------------
> >  1 file changed, 72 insertions(+), 49 deletions(-)
> > 
> > diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
> > index e0213222e3cf..b68bd4172fa8 100644
> > --- a/drivers/block/ublk_drv.c
> > +++ b/drivers/block/ublk_drv.c
> > @@ -1074,7 +1074,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
> >  
> >  static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
> >  {
> > -	return ubq->ubq_daemon->flags & PF_EXITING;
> > +	return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING;
> >  }
> >  
> >  /* todo: handle partial completion */
> > @@ -1470,6 +1470,37 @@ static const struct blk_mq_ops ublk_mq_ops = {
> >  	.timeout	= ublk_timeout,
> >  };
> >  
> > +static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
> > +{
> > +	int i;
> > +
> > +	/* All old ioucmds have to be completed */
> > +	ubq->nr_io_ready = 0;
> > +
> > +	/*
> > +	 * old daemon is PF_EXITING, put it now
> > +	 *
> > +	 * It could be NULL in case of closing one quisced device.
> > +	 */
> > +	if (ubq->ubq_daemon)
> > +		put_task_struct(ubq->ubq_daemon);
> > +	/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
> > +	ubq->ubq_daemon = NULL;
> > +	ubq->timeout = false;
> > +
> > +	for (i = 0; i < ubq->q_depth; i++) {
> > +		struct ublk_io *io = &ubq->ios[i];
> > +
> > +		/*
> > +		 * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
> > +		 * io->cmd
> > +		 */
> > +		io->flags &= UBLK_IO_FLAG_CANCELED;
> > +		io->cmd = NULL;
> > +		io->addr = 0;
> > +	}
> > +}
> > +
> >  static int ublk_ch_open(struct inode *inode, struct file *filp)
> >  {
> >  	struct ublk_device *ub = container_of(inode->i_cdev,
> > @@ -1481,10 +1512,26 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
> >  	return 0;
> >  }
> >  
> > +static void ublk_reset_ch_dev(struct ublk_device *ub)
> > +{
> > +	int i;
> > +
> > +	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
> > +		ublk_queue_reinit(ub, ublk_get_queue(ub, i));
> > +
> > +	/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
> > +	ub->mm = NULL;
> > +	ub->nr_queues_ready = 0;
> > +	ub->nr_privileged_daemon = 0;
> > +}
> > +
> >  static int ublk_ch_release(struct inode *inode, struct file *filp)
> >  {
> >  	struct ublk_device *ub = filp->private_data;
> >  
> > +	/* all uring_cmd has been done now, reset device & ubq */
> > +	ublk_reset_ch_dev(ub);
> > +
> >  	clear_bit(UB_STATE_OPEN, &ub->state);
> >  	return 0;
> >  }
> > @@ -1831,6 +1878,24 @@ static void ublk_nosrv_work(struct work_struct *work)
> >  	ublk_cancel_dev(ub);
> >  }
> >  
> > +/* reset ublk io_uring queue & io flags */
> > +static void ublk_reset_io_flags(struct ublk_device *ub)
> > +{
> > +	int i, j;
> > +
> > +	for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
> > +		struct ublk_queue *ubq = ublk_get_queue(ub, i);
> > +
> > +		/* UBLK_IO_FLAG_CANCELED can be cleared now */
> > +		spin_lock(&ubq->cancel_lock);
> 
> Do we need this? I think at this point there shouldn't be any concurrent
> activity we need to protect against.

Yeah, the lock isn't necessary, but doing it here actually has document benefit.


Thanks,
Ming





[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux