[PATCH v1 4/8] iomap: add writepages support for IOMAP_IN_MEM iomaps

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This allows IOMAP_IN_MEM iomaps to use iomap_writepages() for handling
writeback. This lets IOMAP_IN_MEM iomaps use some of the internal
features in iomaps such as granular dirty tracking for large folios.

This introduces a new iomap_writeback_ops callback, writeback_folio(),
callers may pass in which hands off folio writeback logic to the caller
for writing back dirty pages instead of relying on mapping blocks.

This exposes two apis, iomap_start_folio_write() and
iomap_finish_folio_write(), which callers may find useful in their
writeback_folio() callback implementation.

Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx>
---
 fs/iomap/buffered-io-bio.c | 12 --------
 fs/iomap/buffered-io.c     | 60 ++++++++++++++++++++++++++++++++------
 include/linux/iomap.h      | 18 ++++++++++--
 3 files changed, 67 insertions(+), 23 deletions(-)

diff --git a/fs/iomap/buffered-io-bio.c b/fs/iomap/buffered-io-bio.c
index 03841bed72e7..b7bc838cf477 100644
--- a/fs/iomap/buffered-io-bio.c
+++ b/fs/iomap/buffered-io-bio.c
@@ -96,18 +96,6 @@ int iomap_bio_read_folio_sync(loff_t block_start, struct folio *folio, size_t po
 	return submit_bio_wait(&bio);
 }
 
-static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
-		size_t len)
-{
-	struct iomap_folio_state *ifs = folio->private;
-
-	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
-	WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
-
-	if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
-		folio_end_writeback(folio);
-}
-
 /*
  * We're now finished for good with this ioend structure.  Update the page
  * state, release holds on bios, and finally free up memory.  Do not use the
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index fd2ea1306d88..92f08b316d47 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1441,15 +1441,15 @@ EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
 
 /*
  * Submit an ioend.
- *
- * If @error is non-zero, it means that we have a situation where some part of
- * the submission process has failed after we've marked pages for writeback.
- * We cannot cancel ioend directly in that case, so call the bio end I/O handler
- * with the error status here to run the normal I/O completion handler to clear
- * the writeback bit and let the file system proess the errors.
  */
 int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
 {
+	if (wpc->iomap.type == IOMAP_IN_MEM) {
+		if (wpc->ops->submit_ioend)
+			error = wpc->ops->submit_ioend(wpc, error);
+		return error;
+	}
+
 	if (!wpc->ioend)
 		return error;
 
@@ -1468,6 +1468,13 @@ int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
 			iomap_submit_bio(&wpc->ioend->io_bio);
 	}
 
+	/*
+	 * If error is non-zero, it means that we have a situation where some part of
+	 * the submission process has failed after we've marked pages for writeback.
+	 * We cannot cancel ioend directly in that case, so call the bio end I/O handler
+	 * with the error status here to run the normal I/O completion handler to clear
+	 * the writeback bit and let the file system process the errors.
+	 */
 	if (error)
 		iomap_bio_ioend_error(wpc, error);
 
@@ -1635,8 +1642,17 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	 */
 	end_aligned = round_up(end_pos, i_blocksize(inode));
 	while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
-		error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
-				pos, end_pos, rlen, &count);
+		if (wpc->ops->writeback_folio) {
+			WARN_ON_ONCE(wpc->ops->map_blocks);
+			error = wpc->ops->writeback_folio(wpc, folio, inode,
+							  offset_in_folio(folio, pos),
+							  rlen);
+		} else {
+			WARN_ON_ONCE(wpc->iomap.type == IOMAP_IN_MEM);
+			error = iomap_writepage_map_blocks(wpc, wbc, folio,
+							   inode, pos, end_pos,
+							   rlen, &count);
+		}
 		if (error)
 			break;
 		pos += rlen;
@@ -1664,7 +1680,11 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 		if (atomic_dec_and_test(&ifs->write_bytes_pending))
 			folio_end_writeback(folio);
 	} else {
-		if (!count)
+		/*
+		 * If wpc->ops->writeback_folio is set, then it is responsible
+		 * for ending the writeback itself.
+		 */
+		if (!count && !wpc->ops->writeback_folio)
 			folio_end_writeback(folio);
 	}
 	mapping_set_error(inode->i_mapping, error);
@@ -1693,3 +1713,25 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
 	return iomap_submit_ioend(wpc, error);
 }
 EXPORT_SYMBOL_GPL(iomap_writepages);
+
+void iomap_start_folio_write(struct inode *inode, struct folio *folio, size_t len)
+{
+	struct iomap_folio_state *ifs = folio->private;
+
+	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
+	if (ifs)
+		atomic_add(len, &ifs->write_bytes_pending);
+}
+EXPORT_SYMBOL_GPL(iomap_start_folio_write);
+
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio, size_t len)
+{
+	struct iomap_folio_state *ifs = folio->private;
+
+	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
+	WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
+
+	if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
+		folio_end_writeback(folio);
+}
+EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e748aeebe1a5..4b5e083fa802 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -424,8 +424,8 @@ static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
 
 struct iomap_writeback_ops {
 	/*
-	 * Required, maps the blocks so that writeback can be performed on
-	 * the range starting at offset.
+	 * Required if ->writeback_folio is not set. Maps the blocks so that
+	 * writeback can be performed on the range starting at offset.
 	 *
 	 * Can return arbitrarily large regions, but we need to call into it at
 	 * least once per folio to allow the file systems to synchronize with
@@ -436,6 +436,16 @@ struct iomap_writeback_ops {
 	 */
 	int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
 			  loff_t offset, unsigned len);
+	/*
+	 * Forwards the folio writeback logic to the caller.
+	 *
+	 * Required for IOMAP_IN_MEM iomaps or if ->map_blocks is not set.
+	 *
+	 * The caller is responsible for ending writeback on the folio after
+	 * it's fully done processing it.
+	 */
+	int (*writeback_folio)(struct iomap_writepage_ctx *wpc, struct folio *folio,
+			       struct inode *inode, loff_t offset, unsigned len);
 
 	/*
 	 * Optional, allows the file systems to hook into bio submission,
@@ -459,6 +469,7 @@ struct iomap_writepage_ctx {
 	struct iomap_ioend	*ioend;
 	const struct iomap_writeback_ops *ops;
 	u32			nr_folios;	/* folios added to the ioend */
+	void			*private;
 };
 
 struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio,
@@ -538,4 +549,7 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
 
 extern struct bio_set iomap_ioend_bioset;
 
+void iomap_start_folio_write(struct inode *inode, struct folio *folio, size_t len);
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio, size_t len);
+
 #endif /* LINUX_IOMAP_H */
-- 
2.47.1





[Index of Archives]     [XFS Filesystem Development (older mail)]     [Linux Filesystem Development]     [Linux Audio Users]     [Yosemite Trails]     [Linux Kernel]     [Linux RAID]     [Linux SCSI]


  Powered by Linux