[PATCH 2/3] md: replace ->openers with ->active

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



It only checks openers when stopping an array. But some users still can
access mddev through sysfs interfaces. Now ->active is added once mddev
is accessed which is done in function mddev_get protected by lock
all_mddevs_lock. They are md_open, md_seq_show, md_attr_show/store and
md_notify_reboot and md_exit.

->openers is only added in md_open while mddev_get is called too. So we
can replace ->openers with ->active. This can guarantee no one access the
array once MD_CLOSING is set.

At the same time, ->open_mutex is replaced with all_mddevs_lock. Though
all_mddevs_lock is a global lock, the only place checks ->active and sets
MD_CLOSING in ioctl path. So it doesn't affect performance.

Signed-off-by: Xiao Ni <xni@xxxxxxxxxx>
---
 drivers/md/md.c | 47 +++++++++++++++++------------------------------
 drivers/md/md.h | 11 -----------
 2 files changed, 17 insertions(+), 41 deletions(-)

diff --git a/drivers/md/md.c b/drivers/md/md.c
index e14253433c49..4ca3d04ce13f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -523,18 +523,27 @@ void mddev_resume(struct mddev *mddev)
 EXPORT_SYMBOL_GPL(mddev_resume);
 
 /* sync bdev before setting device to readonly or stopping raid*/
-static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_num)
+static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev)
 {
-	mutex_lock(&mddev->open_mutex);
-	if (mddev->pers && atomic_read(&mddev->openers) > opener_num) {
-		mutex_unlock(&mddev->open_mutex);
+	spin_lock(&all_mddevs_lock);
+
+	/*
+	 * there are two places that call this function and ->active
+	 * is added before calling this function. So the array can't
+	 *  be stopped when ->active is bigger than 1.
+	 */
+	if (mddev->pers && atomic_read(&mddev->active) > 1) {
+
+		spin_unlock(&all_mddevs_lock);
 		return -EBUSY;
 	}
+
 	if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
-		mutex_unlock(&mddev->open_mutex);
+		spin_unlock(&all_mddevs_lock);
 		return -EBUSY;
 	}
-	mutex_unlock(&mddev->open_mutex);
+
+	spin_unlock(&all_mddevs_lock);
 
 	sync_blockdev(mddev->gendisk->part0);
 	return 0;
@@ -663,7 +672,6 @@ int mddev_init(struct mddev *mddev)
 	/* We want to start with the refcount at zero */
 	percpu_ref_put(&mddev->writes_pending);
 
-	mutex_init(&mddev->open_mutex);
 	mutex_init(&mddev->reconfig_mutex);
 	mutex_init(&mddev->suspend_mutex);
 	mutex_init(&mddev->bitmap_info.mutex);
@@ -672,7 +680,6 @@ int mddev_init(struct mddev *mddev)
 	INIT_LIST_HEAD(&mddev->deleting);
 	timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
 	atomic_set(&mddev->active, 1);
-	atomic_set(&mddev->openers, 0);
 	atomic_set(&mddev->sync_seq, 0);
 	spin_lock_init(&mddev->lock);
 	init_waitqueue_head(&mddev->sb_wait);
@@ -4421,8 +4428,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
 	case read_auto:
 		if (!mddev->pers || !md_is_rdwr(mddev))
 			break;
-		/* write sysfs will not open mddev and opener should be 0 */
-		err = mddev_set_closing_and_sync_blockdev(mddev, 0);
+		err = mddev_set_closing_and_sync_blockdev(mddev);
 		if (err)
 			return err;
 		break;
@@ -7738,7 +7744,7 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
 		/* Need to flush page cache, and ensure no-one else opens
 		 * and writes
 		 */
-		err = mddev_set_closing_and_sync_blockdev(mddev, 1);
+		err = mddev_set_closing_and_sync_blockdev(mddev);
 		if (err)
 			return err;
 	}
@@ -7938,7 +7944,6 @@ static int md_set_read_only(struct block_device *bdev, bool ro)
 static int md_open(struct gendisk *disk, blk_mode_t mode)
 {
 	struct mddev *mddev;
-	int err;
 
 	spin_lock(&all_mddevs_lock);
 	mddev = mddev_get(disk->private_data);
@@ -7946,25 +7951,8 @@ static int md_open(struct gendisk *disk, blk_mode_t mode)
 	if (!mddev)
 		return -ENODEV;
 
-	err = mutex_lock_interruptible(&mddev->open_mutex);
-	if (err)
-		goto out;
-
-	err = -ENODEV;
-	if (test_bit(MD_CLOSING, &mddev->flags))
-		goto out_unlock;
-
-	atomic_inc(&mddev->openers);
-	mutex_unlock(&mddev->open_mutex);
-
 	disk_check_media_change(disk);
 	return 0;
-
-out_unlock:
-	mutex_unlock(&mddev->open_mutex);
-out:
-	mddev_put(mddev);
-	return err;
 }
 
 static void md_release(struct gendisk *disk)
@@ -7972,7 +7960,6 @@ static void md_release(struct gendisk *disk)
 	struct mddev *mddev = disk->private_data;
 
 	BUG_ON(!mddev);
-	atomic_dec(&mddev->openers);
 	mddev_put(mddev);
 }
 
diff --git a/drivers/md/md.h b/drivers/md/md.h
index a9dccb3d84ed..60dc0943e05b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -496,19 +496,8 @@ struct mddev {
 	int				recovery_disabled;
 
 	int				in_sync;	/* know to not need resync */
-	/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
-	 * that we are never stopping an array while it is open.
-	 * 'reconfig_mutex' protects all other reconfiguration.
-	 * These locks are separate due to conflicting interactions
-	 * with disk->open_mutex.
-	 * Lock ordering is:
-	 *  reconfig_mutex -> disk->open_mutex
-	 *  disk->open_mutex -> open_mutex:  e.g. __blkdev_get -> md_open
-	 */
-	struct mutex			open_mutex;
 	struct mutex			reconfig_mutex;
 	atomic_t			active;		/* general refcount */
-	atomic_t			openers;	/* number of active opens */
 
 	int				changed;	/* True if we might need to
 							 * reread partition info */
-- 
2.32.0 (Apple Git-132)





[Index of Archives]     [Linux RAID Wiki]     [ATA RAID]     [Linux SCSI Target Infrastructure]     [Linux Block]     [Linux IDE]     [Linux SCSI]     [Linux Hams]     [Device Mapper]     [Device Mapper Cryptographics]     [Kernel]     [Linux Admin]     [Linux Net]     [GFS]     [RPM]     [git]     [Yosemite Forum]


  Powered by Linux