Modify splitting of encrypted bios as follows: - Stop calling blk_crypto_bio_prep() for bio-based block drivers that do not call bio_split_to_limits(). - For request-based block drivers and for bio-based block drivers that call bio_split_to_limits(), call blk_crypto_bio_prep() after bio splitting happened instead of before bio splitting happened. - In bio_split_rw(), restrict the bio size to the smaller size of what is supported by the block driver and the crypto fallback code. The advantages of these changes are as follows: - This patch fixes write errors on zoned storage caused by out-of-order submission of bios. This out-of-order submission happens if both the crypto fallback code and bio_split_to_limits() split a bio. - Less code duplication. The crypto fallback code now calls bio_split_to_limits() instead of open-coding it. Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- block/blk-core.c | 3 --- block/blk-crypto-fallback.c | 38 ++++++++----------------------------- block/blk-crypto-internal.h | 7 +++++++ block/blk-merge.c | 12 ++++++++++-- 4 files changed, 25 insertions(+), 35 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index fdac48aec5ef..5af5f8c3cd06 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -626,9 +626,6 @@ static void __submit_bio(struct bio *bio) /* If plug is not used, add new plug here to cache nsecs time. */ struct blk_plug plug; - if (unlikely(!blk_crypto_bio_prep(&bio))) - return; - blk_start_plug(&plug); if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) { diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index 0f127230215b..481123910b5f 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -214,7 +214,7 @@ blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot, * the bio size supported by the encryption fallback code. This function * calculates the upper limit for the bio size. */ -static unsigned int blk_crypto_max_io_size(struct bio *bio) +unsigned int blk_crypto_max_io_size(struct bio *bio) { unsigned int i = 0; unsigned int num_sectors = 0; @@ -230,29 +230,6 @@ static unsigned int blk_crypto_max_io_size(struct bio *bio) return num_sectors; } -static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) -{ - struct bio *bio = *bio_ptr; - unsigned int num_sectors; - - num_sectors = blk_crypto_max_io_size(bio); - if (num_sectors < bio_sectors(bio)) { - struct bio *split_bio; - - split_bio = bio_split(bio, num_sectors, GFP_NOIO, - &crypto_bio_split); - if (IS_ERR(split_bio)) { - bio->bi_status = BLK_STS_RESOURCE; - return false; - } - bio_chain(split_bio, bio); - submit_bio_noacct(bio); - *bio_ptr = split_bio; - } - - return true; -} - union blk_crypto_iv { __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; @@ -289,9 +266,12 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) bool ret = false; blk_status_t blk_st; - /* Split the bio if it's too big for single page bvec */ - if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr)) + /* Verify that bio splitting has occurred. */ + if (WARN_ON_ONCE(bio_sectors(*bio_ptr) > + blk_crypto_max_io_size(*bio_ptr))) { + (*bio_ptr)->bi_status = BLK_STS_IOERR; return false; + } src_bio = *bio_ptr; bc = src_bio->bi_crypt_context; @@ -488,10 +468,8 @@ static void blk_crypto_fallback_decrypt_endio(struct bio *bio) * * @bio_ptr: pointer to the bio to prepare * - * If bio is doing a WRITE operation, this splits the bio into two parts if it's - * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a - * bounce bio for the first part, encrypts it, and updates bio_ptr to point to - * the bounce bio. + * For WRITE operations, a bounce bio is allocated, encrypted, and *bio_ptr is + * updated to point to the bounce bio. * * For a READ operation, we mark the bio for decryption by using bi_private and * bi_end_io. diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h index ccf6dff6ff6b..443ba1fd82e6 100644 --- a/block/blk-crypto-internal.h +++ b/block/blk-crypto-internal.h @@ -223,6 +223,8 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr); int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key); +unsigned int blk_crypto_max_io_size(struct bio *bio); + #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ static inline int @@ -245,6 +247,11 @@ blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) return 0; } +static inline unsigned int blk_crypto_max_io_size(struct bio *bio) +{ + return UINT_MAX; +} + #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */ diff --git a/block/blk-merge.c b/block/blk-merge.c index 70d704615be5..a85d1cc95577 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -9,6 +9,7 @@ #include <linux/blk-integrity.h> #include <linux/part_stat.h> #include <linux/blk-cgroup.h> +#include <linux/blk-crypto.h> #include <trace/events/block.h> @@ -124,9 +125,13 @@ static struct bio *bio_submit_split(struct bio *bio, int split_sectors) trace_block_split(split, bio->bi_iter.bi_sector); WARN_ON_ONCE(bio_zone_write_plugging(bio)); submit_bio_noacct(bio); - return split; + + bio = split; } + if (unlikely(!blk_crypto_bio_prep(&bio))) + return NULL; + return bio; error: bio->bi_status = errno_to_blk_status(split_sectors); @@ -355,9 +360,12 @@ EXPORT_SYMBOL_GPL(bio_split_rw_at); struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, unsigned *nr_segs) { + u32 max_sectors = + min(get_max_io_size(bio, lim), blk_crypto_max_io_size(bio)); + return bio_submit_split(bio, bio_split_rw_at(bio, lim, nr_segs, - get_max_io_size(bio, lim) << SECTOR_SHIFT)); + (u64)max_sectors << SECTOR_SHIFT)); } /*