[PATCH v3 6/7] block: Rework splitting of encrypted bios

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Modify splitting of encrypted bios as follows:
- Stop calling blk_crypto_bio_prep() for bio-based block drivers that do not
  call bio_split_to_limits().
- For request-based block drivers and for bio-based block drivers that call
  bio_split_to_limits(), call blk_crypto_bio_prep() after bio splitting
  happened instead of before bio splitting happened.
- In bio_split_rw(), restrict the bio size to the smaller size of what is
  supported by the block driver and the crypto fallback code.

The advantages of these changes are as follows:
- This patch fixes write errors on zoned storage caused by out-of-order
  submission of bios. This out-of-order submission happens if both the
  crypto fallback code and bio_split_to_limits() split a bio.
- Less code duplication. The crypto fallback code now calls
  bio_split_to_limits() instead of open-coding it.

Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx>
---
 block/blk-core.c            |  4 ----
 block/blk-crypto-fallback.c | 41 +++++++++----------------------------
 block/blk-crypto-internal.h |  7 +++++++
 block/blk-merge.c           |  9 ++++++--
 4 files changed, 24 insertions(+), 37 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index 2c3c8576aa9b..5af5f8c3cd06 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -626,10 +626,6 @@ static void __submit_bio(struct bio *bio)
 	/* If plug is not used, add new plug here to cache nsecs time. */
 	struct blk_plug plug;
 
-	bio = blk_crypto_bio_prep(bio);
-	if (unlikely(!bio))
-		return;
-
 	blk_start_plug(&plug);
 
 	if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index ba5f1c887574..b54ad41e4192 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -214,7 +214,7 @@ blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
  * the bio size supported by the encryption fallback code. This function
  * calculates the upper limit for the bio size.
  */
-static unsigned int blk_crypto_max_io_size(struct bio *bio)
+unsigned int blk_crypto_max_io_size(struct bio *bio)
 {
 	unsigned int i = 0;
 	unsigned int num_bytes = 0;
@@ -229,28 +229,6 @@ static unsigned int blk_crypto_max_io_size(struct bio *bio)
 	return num_bytes >> SECTOR_SHIFT;
 }
 
-static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
-{
-	struct bio *bio = *bio_ptr;
-	unsigned int num_sectors = blk_crypto_max_io_size(bio);
-
-	if (num_sectors < bio_sectors(bio)) {
-		struct bio *split_bio;
-
-		split_bio = bio_split(bio, num_sectors, GFP_NOIO,
-				      &crypto_bio_split);
-		if (IS_ERR(split_bio)) {
-			bio->bi_status = BLK_STS_RESOURCE;
-			return false;
-		}
-		bio_chain(split_bio, bio);
-		submit_bio_noacct(bio);
-		*bio_ptr = split_bio;
-	}
-
-	return true;
-}
-
 union blk_crypto_iv {
 	__le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 	u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
@@ -268,8 +246,8 @@ static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
 /*
  * The crypto API fallback's encryption routine.
  * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
- * and return the bounce bio. May split input bio if it's too large. Returns the
- * bounce bio on success. Returns %NULL and sets bio->bi_status on error.
+ * and return the bounce bio. Returns the bounce bio on success. Returns %NULL
+ * and sets bio->bi_status on error.
  */
 static struct bio *blk_crypto_fallback_encrypt_bio(struct bio *src_bio)
 {
@@ -285,9 +263,12 @@ static struct bio *blk_crypto_fallback_encrypt_bio(struct bio *src_bio)
 	unsigned int i, j;
 	blk_status_t blk_st;
 
-	/* Split the bio if it's too big for single page bvec */
-	if (!blk_crypto_fallback_split_bio_if_needed(&src_bio))
+	/* Verify that bio splitting has occurred. */
+	if (WARN_ON_ONCE(bio_sectors(src_bio) >
+			 blk_crypto_max_io_size(src_bio))) {
+		src_bio->bi_status = BLK_STS_IOERR;
 		return NULL;
+	}
 
 	bc = src_bio->bi_crypt_context;
 	data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
@@ -481,10 +462,8 @@ static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
  *
  * @bio: bio to prepare
  *
- * If bio is doing a WRITE operation, this splits the bio into two parts if it's
- * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a
- * bounce bio for the first part, encrypts it, and updates bio_ptr to point to
- * the bounce bio.
+ * For WRITE operations, a bounce bio is allocated, encrypted, and *bio_ptr is
+ * updated to point to the bounce bio.
  *
  * For a READ operation, we mark the bio for decryption by using bi_private and
  * bi_end_io.
diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
index 212e5bbfc95f..920cfc14c244 100644
--- a/block/blk-crypto-internal.h
+++ b/block/blk-crypto-internal.h
@@ -223,6 +223,8 @@ struct bio *blk_crypto_fallback_bio_prep(struct bio *bio);
 
 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
 
+unsigned int blk_crypto_max_io_size(struct bio *bio);
+
 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
 
 static inline int
@@ -245,6 +247,11 @@ blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
 	return 0;
 }
 
+static inline unsigned int blk_crypto_max_io_size(struct bio *bio)
+{
+	return UINT_MAX;
+}
+
 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
 
 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 70d704615be5..f4e210279cd3 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,6 +9,7 @@
 #include <linux/blk-integrity.h>
 #include <linux/part_stat.h>
 #include <linux/blk-cgroup.h>
+#include <linux/blk-crypto.h>
 
 #include <trace/events/block.h>
 
@@ -124,10 +125,12 @@ static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
 		trace_block_split(split, bio->bi_iter.bi_sector);
 		WARN_ON_ONCE(bio_zone_write_plugging(bio));
 		submit_bio_noacct(bio);
-		return split;
+
+		bio = split;
 	}
 
-	return bio;
+	return blk_crypto_bio_prep(bio);
+
 error:
 	bio->bi_status = errno_to_blk_status(split_sectors);
 	bio_endio(bio);
@@ -211,6 +214,8 @@ static inline unsigned get_max_io_size(struct bio *bio,
 	else
 		max_sectors = lim->max_sectors;
 
+	max_sectors = min(max_sectors, blk_crypto_max_io_size(bio));
+
 	if (boundary_sectors) {
 		max_sectors = min(max_sectors,
 			blk_boundary_sectors_left(bio->bi_iter.bi_sector,




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux