FROMLIST: Update Inline Encryption from v6 to upstream version of patch series

The block layer patches for inline encryption are now in upstream, so
update Android to the upstream version of inline encryption. The
fscrypt/f2fs/ext4 patches are also updated to the latest version sent
upstream (since they can't be updated separately from the block layer
patches).

Changes v6 => v7:
 - Keyslot management is now done on a per-request basis rather than a
   per-bio basis.
 - Storage drivers can now specify the maximum number of bytes they
   can accept for the data unit number (DUN) for each crypto algorithm,
   and upper layers can specify the minimum number of bytes of DUN they
   want with the blk_crypto_key they send with the bio - a driver is
   only considered to support a blk_crypto_key if the driver supports at
   least as many DUN bytes as the upper layer wants. This is necessary
   because storage drivers may not support as many bytes as the
   algorithm specification dictates (for e.g. UFS only supports 8 byte
   DUNs for AES-256-XTS, even though the algorithm specification
   says DUNs are 16 bytes long).
 - Introduce SB_INLINECRYPT to keep track of whether inline encryption
   is enabled for a filesystem (instead of using an fscrypt_operation).
 - Expose keyslot manager declaration and embed it within ufs_hba to
   clean up code.
 - Make blk-crypto preclude blk-integrity.
 - Some bug fixes
 - Introduce UFSHCD_QUIRK_BROKEN_CRYPTO for UFS drivers that don't
   support inline encryption (yet)

Changes v7 => v8:
 - Pass a struct blk_ksm_keyslot * around instead of slot numbers which
   simplifies some functions and passes around arguments with better types
 - Make bios with no encryption context avoid making calls into blk-crypto
   by checking for the presence of bi_crypt_context before making the call
 - Make blk-integrity preclude inline encryption support at probe time
 - Many many cleanups

Changes v8 => v9:
 - Don't open code bio_has_crypt_ctx into callers of blk-crypto functions.
 - Lots of cleanups

Changes v9 => v10:
 - Incorporate Eric's fix for allowing en/decryption to happen as usual via
   fscrypt in the case that hardware doesn't support the desired crypto
   configuration, but blk-crypto-fallback is disabled. (Introduce
   struct blk_crypto_config and blk_crypto_config_supported for fscrypt
   to call, to check that either blk-crypto-fallback is enabled or the
   device supports the crypto configuration).
 - Update docs
 - Lots of cleanups

Changes v10 => v11:
 - We now allocate a new bio_crypt_ctx for each request instead of
   pulling and reusing the one in the bio inserted into the request. The
   bio_crypt_ctx of a bio is freed after the bio is ended.
 - Make each blk_ksm_keyslot store a pointer to the blk_crypto_key
   instead of a copy of the blk_crypto_key, so that each blk_crypto_key
   will have its own keyslot. We also won't need to compute the siphash
   for a blk_crypto_key anymore.
 - Minor cleanups

Changes v11 => v12:
 - Inlined some fscrypt functions
 - Minor cleanups and improved comments

Changes v12 => v13:
 - Updated docs
 - Minor cleanups
 - rebased onto linux-block/for-next

Changes v13 => fscrypt/f2fs/ext4 upstream patch series
 - rename struct fscrypt_info::ci_key to ci_enc_key
 - set dun bytes more precisely in fscrypt
 - cleanups

Bug: 137270441
Test: Test cuttlefish boots both with and without inlinecrypt mount
      option specified in fstab, while using both F2FS and EXT4 for
      userdata.img. Also verified ciphertext via
      "atest -v vts_kernel_encryption_test"
      Also tested by running gce-xfstests on both the
      auto and encrypt test groups on EXT4 and F2FS both with and
      without the inlinecrypt mount option. The UFS changes were
      tested on a Pixel 4 device.
Link: https://lore.kernel.org/linux-block/20200514003727.69001-1-satyat@google.com/
Link: https://lore.kernel.org/linux-fscrypt/20200617075732.213198-1-satyat@google.com/
Link: https://lore.kernel.org/linux-scsi/20200617081841.218985-1-satyat@google.com/
Change-Id: I57c10d370bf006c9dfcf173f21a720413017761e
Signed-off-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index d6c5788..d1f6d31 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -12,6 +12,7 @@
 #include <crypto/skcipher.h>
 #include <linux/blk-cgroup.h>
 #include <linux/blk-crypto.h>
+#include <linux/blkdev.h>
 #include <linux/crypto.h>
 #include <linux/keyslot-manager.h>
 #include <linux/mempool.h>
@@ -44,10 +45,18 @@ struct bio_fallback_crypt_ctx {
 	 * resubmitted
 	 */
 	struct bvec_iter crypt_iter;
-	u64 fallback_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
+	union {
+		struct {
+			struct work_struct work;
+			struct bio *bio;
+		};
+		struct {
+			void *bi_private_orig;
+			bio_end_io_t *bi_end_io_orig;
+		};
+	};
 };
 
-/* The following few vars are only used during the crypto API fallback */
 static struct kmem_cache *bio_fallback_crypt_ctx_cache;
 static mempool_t *bio_fallback_crypt_ctx_pool;
 
@@ -63,27 +72,14 @@ static mempool_t *bio_fallback_crypt_ctx_pool;
 static DEFINE_MUTEX(tfms_init_lock);
 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
 
-struct blk_crypto_decrypt_work {
-	struct work_struct work;
-	struct bio *bio;
-};
-
 static struct blk_crypto_keyslot {
-	struct crypto_skcipher *tfm;
 	enum blk_crypto_mode_num crypto_mode;
 	struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
 } *blk_crypto_keyslots;
 
-/* The following few vars are only used during the crypto API fallback */
-static struct keyslot_manager *blk_crypto_ksm;
+static struct blk_keyslot_manager blk_crypto_ksm;
 static struct workqueue_struct *blk_crypto_wq;
 static mempool_t *blk_crypto_bounce_page_pool;
-static struct kmem_cache *blk_crypto_decrypt_work_cache;
-
-bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
-{
-	return bc && bc->bc_ksm == blk_crypto_ksm;
-}
 
 /*
  * This is the key we set when evicting a keyslot. This *should* be the all 0's
@@ -106,21 +102,19 @@ static void blk_crypto_evict_keyslot(unsigned int slot)
 	slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
 }
 
-static int blk_crypto_keyslot_program(struct keyslot_manager *ksm,
+static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
 				      const struct blk_crypto_key *key,
 				      unsigned int slot)
 {
 	struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
-	const enum blk_crypto_mode_num crypto_mode = key->crypto_mode;
+	const enum blk_crypto_mode_num crypto_mode =
+						key->crypto_cfg.crypto_mode;
 	int err;
 
 	if (crypto_mode != slotp->crypto_mode &&
-	    slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) {
+	    slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
 		blk_crypto_evict_keyslot(slot);
-	}
 
-	if (!slotp->tfms[crypto_mode])
-		return -ENOMEM;
 	slotp->crypto_mode = crypto_mode;
 	err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
 				     key->size);
@@ -131,7 +125,7 @@ static int blk_crypto_keyslot_program(struct keyslot_manager *ksm,
 	return 0;
 }
 
-static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm,
+static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
 				    const struct blk_crypto_key *key,
 				    unsigned int slot)
 {
@@ -141,16 +135,15 @@ static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm,
 
 /*
  * The crypto API fallback KSM ops - only used for a bio when it specifies a
- * blk_crypto_mode for which we failed to get a keyslot in the device's inline
- * encryption hardware (which probably means the device doesn't have inline
- * encryption hardware that supports that crypto mode).
+ * blk_crypto_key that was not supported by the device's inline encryption
+ * hardware.
  */
-static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = {
+static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = {
 	.keyslot_program	= blk_crypto_keyslot_program,
 	.keyslot_evict		= blk_crypto_keyslot_evict,
 };
 
-static void blk_crypto_encrypt_endio(struct bio *enc_bio)
+static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
 {
 	struct bio *src_bio = enc_bio->bi_private;
 	int i;
@@ -184,12 +177,6 @@ static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
 	bio_for_each_segment(bv, bio_src, iter)
 		bio->bi_io_vec[bio->bi_vcnt++] = bv;
 
-	if (bio_integrity(bio_src) &&
-	    bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) {
-		bio_put(bio);
-		return NULL;
-	}
-
 	bio_clone_blkg_association(bio, bio_src);
 	blkcg_bio_issue_init(bio);
 
@@ -198,30 +185,30 @@ static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
 	return bio;
 }
 
-static int blk_crypto_alloc_cipher_req(struct bio *src_bio,
-				       struct skcipher_request **ciph_req_ret,
-				       struct crypto_wait *wait)
+static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
+					struct skcipher_request **ciph_req_ret,
+					struct crypto_wait *wait)
 {
 	struct skcipher_request *ciph_req;
 	const struct blk_crypto_keyslot *slotp;
+	int keyslot_idx = blk_ksm_get_slot_idx(slot);
 
-	slotp = &blk_crypto_keyslots[src_bio->bi_crypt_context->bc_keyslot];
+	slotp = &blk_crypto_keyslots[keyslot_idx];
 	ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
 					  GFP_NOIO);
-	if (!ciph_req) {
-		src_bio->bi_status = BLK_STS_RESOURCE;
-		return -ENOMEM;
-	}
+	if (!ciph_req)
+		return false;
 
 	skcipher_request_set_callback(ciph_req,
 				      CRYPTO_TFM_REQ_MAY_BACKLOG |
 				      CRYPTO_TFM_REQ_MAY_SLEEP,
 				      crypto_req_done, wait);
 	*ciph_req_ret = ciph_req;
-	return 0;
+
+	return true;
 }
 
-static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
+static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
 {
 	struct bio *bio = *bio_ptr;
 	unsigned int i = 0;
@@ -240,13 +227,14 @@ static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
 		split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
 		if (!split_bio) {
 			bio->bi_status = BLK_STS_RESOURCE;
-			return -ENOMEM;
+			return false;
 		}
 		bio_chain(split_bio, bio);
 		generic_make_request(bio);
 		*bio_ptr = split_bio;
 	}
-	return 0;
+
+	return true;
 }
 
 union blk_crypto_iv {
@@ -267,52 +255,54 @@ static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
  * The crypto API fallback's encryption routine.
  * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
  * and replace *bio_ptr with the bounce bio. May split input bio if it's too
- * large.
+ * large. Returns true on success. Returns false and sets bio->bi_status on
+ * error.
  */
-static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
+static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
 {
-	struct bio *src_bio;
+	struct bio *src_bio, *enc_bio;
+	struct bio_crypt_ctx *bc;
+	struct blk_ksm_keyslot *slot;
+	int data_unit_size;
 	struct skcipher_request *ciph_req = NULL;
 	DECLARE_CRYPTO_WAIT(wait);
 	u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
-	union blk_crypto_iv iv;
 	struct scatterlist src, dst;
-	struct bio *enc_bio;
+	union blk_crypto_iv iv;
 	unsigned int i, j;
-	int data_unit_size;
-	struct bio_crypt_ctx *bc;
-	int err = 0;
+	bool ret = false;
+	blk_status_t blk_st;
 
 	/* Split the bio if it's too big for single page bvec */
-	err = blk_crypto_split_bio_if_needed(bio_ptr);
-	if (err)
-		return err;
+	if (!blk_crypto_split_bio_if_needed(bio_ptr))
+		return false;
 
 	src_bio = *bio_ptr;
 	bc = src_bio->bi_crypt_context;
-	data_unit_size = bc->bc_key->data_unit_size;
+	data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
 
 	/* Allocate bounce bio for encryption */
 	enc_bio = blk_crypto_clone_bio(src_bio);
 	if (!enc_bio) {
 		src_bio->bi_status = BLK_STS_RESOURCE;
-		return -ENOMEM;
+		return false;
 	}
 
 	/*
 	 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
 	 * for the algorithm and key specified for this bio.
 	 */
-	err = bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm);
-	if (err) {
-		src_bio->bi_status = BLK_STS_IOERR;
+	blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
+	if (blk_st != BLK_STS_OK) {
+		src_bio->bi_status = blk_st;
 		goto out_put_enc_bio;
 	}
 
 	/* and then allocate an skcipher_request for it */
-	err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait);
-	if (err)
+	if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
+		src_bio->bi_status = BLK_STS_RESOURCE;
 		goto out_release_keyslot;
+	}
 
 	memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
 	sg_init_table(&src, 1);
@@ -332,7 +322,6 @@ static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
 
 		if (!ciphertext_page) {
 			src_bio->bi_status = BLK_STS_RESOURCE;
-			err = -ENOMEM;
 			goto out_free_bounce_pages;
 		}
 
@@ -344,11 +333,10 @@ static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
 		/* Encrypt each data unit in this page */
 		for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
 			blk_crypto_dun_to_iv(curr_dun, &iv);
-			err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
-					      &wait);
-			if (err) {
+			if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
+					    &wait)) {
 				i++;
-				src_bio->bi_status = BLK_STS_RESOURCE;
+				src_bio->bi_status = BLK_STS_IOERR;
 				goto out_free_bounce_pages;
 			}
 			bio_crypt_dun_increment(curr_dun, 1);
@@ -358,11 +346,11 @@ static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
 	}
 
 	enc_bio->bi_private = src_bio;
-	enc_bio->bi_end_io = blk_crypto_encrypt_endio;
+	enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
 	*bio_ptr = enc_bio;
+	ret = true;
 
 	enc_bio = NULL;
-	err = 0;
 	goto out_free_ciph_req;
 
 out_free_bounce_pages:
@@ -372,61 +360,53 @@ static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
 out_free_ciph_req:
 	skcipher_request_free(ciph_req);
 out_release_keyslot:
-	bio_crypt_ctx_release_keyslot(bc);
+	blk_ksm_put_slot(slot);
 out_put_enc_bio:
 	if (enc_bio)
 		bio_put(enc_bio);
 
-	return err;
-}
-
-static void blk_crypto_free_fallback_crypt_ctx(struct bio *bio)
-{
-	mempool_free(container_of(bio->bi_crypt_context,
-				  struct bio_fallback_crypt_ctx,
-				  crypt_ctx),
-		     bio_fallback_crypt_ctx_pool);
-	bio->bi_crypt_context = NULL;
+	return ret;
 }
 
 /*
  * The crypto API fallback's main decryption routine.
- * Decrypts input bio in place.
+ * Decrypts input bio in place, and calls bio_endio on the bio.
  */
-static void blk_crypto_decrypt_bio(struct work_struct *work)
+static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
 {
-	struct blk_crypto_decrypt_work *decrypt_work =
-		container_of(work, struct blk_crypto_decrypt_work, work);
-	struct bio *bio = decrypt_work->bio;
+	struct bio_fallback_crypt_ctx *f_ctx =
+		container_of(work, struct bio_fallback_crypt_ctx, work);
+	struct bio *bio = f_ctx->bio;
+	struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
+	struct blk_ksm_keyslot *slot;
 	struct skcipher_request *ciph_req = NULL;
 	DECLARE_CRYPTO_WAIT(wait);
-	struct bio_vec bv;
-	struct bvec_iter iter;
 	u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 	union blk_crypto_iv iv;
 	struct scatterlist sg;
-	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
-	struct bio_fallback_crypt_ctx *f_ctx =
-		container_of(bc, struct bio_fallback_crypt_ctx, crypt_ctx);
-	const int data_unit_size = bc->bc_key->data_unit_size;
+	struct bio_vec bv;
+	struct bvec_iter iter;
+	const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
 	unsigned int i;
-	int err;
+	blk_status_t blk_st;
 
 	/*
 	 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
 	 * for the algorithm and key specified for this bio.
 	 */
-	if (bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm)) {
-		bio->bi_status = BLK_STS_RESOURCE;
+	blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
+	if (blk_st != BLK_STS_OK) {
+		bio->bi_status = blk_st;
 		goto out_no_keyslot;
 	}
 
 	/* and then allocate an skcipher_request for it */
-	err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait);
-	if (err)
+	if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
+		bio->bi_status = BLK_STS_RESOURCE;
 		goto out;
+	}
 
-	memcpy(curr_dun, f_ctx->fallback_dun, sizeof(curr_dun));
+	memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
 	sg_init_table(&sg, 1);
 	skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
 				   iv.bytes);
@@ -452,40 +432,174 @@ static void blk_crypto_decrypt_bio(struct work_struct *work)
 
 out:
 	skcipher_request_free(ciph_req);
-	bio_crypt_ctx_release_keyslot(bc);
+	blk_ksm_put_slot(slot);
 out_no_keyslot:
-	kmem_cache_free(blk_crypto_decrypt_work_cache, decrypt_work);
-	blk_crypto_free_fallback_crypt_ctx(bio);
+	mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
 	bio_endio(bio);
 }
 
-/*
- * Queue bio for decryption.
- * Returns true iff bio was queued for decryption.
+/**
+ * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
+ *
+ * @bio: the bio to queue
+ *
+ * Restore bi_private and bi_end_io, and queue the bio for decryption into a
+ * workqueue, since this function will be called from an atomic context.
  */
-bool blk_crypto_queue_decrypt_bio(struct bio *bio)
+static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
 {
-	struct blk_crypto_decrypt_work *decrypt_work;
+	struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
+
+	bio->bi_private = f_ctx->bi_private_orig;
+	bio->bi_end_io = f_ctx->bi_end_io_orig;
 
 	/* If there was an IO error, don't queue for decrypt. */
-	if (bio->bi_status)
-		goto out;
-
-	decrypt_work = kmem_cache_zalloc(blk_crypto_decrypt_work_cache,
-					 GFP_ATOMIC);
-	if (!decrypt_work) {
-		bio->bi_status = BLK_STS_RESOURCE;
-		goto out;
+	if (bio->bi_status) {
+		mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
+		bio_endio(bio);
+		return;
 	}
 
-	INIT_WORK(&decrypt_work->work, blk_crypto_decrypt_bio);
-	decrypt_work->bio = bio;
-	queue_work(blk_crypto_wq, &decrypt_work->work);
+	INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
+	f_ctx->bio = bio;
+	queue_work(blk_crypto_wq, &f_ctx->work);
+}
+
+/**
+ * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
+ *
+ * @bio_ptr: pointer to the bio to prepare
+ *
+ * If bio is doing a WRITE operation, this splits the bio into two parts if it's
+ * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio
+ * for the first part, encrypts it, and update bio_ptr to point to the bounce
+ * bio.
+ *
+ * For a READ operation, we mark the bio for decryption by using bi_private and
+ * bi_end_io.
+ *
+ * In either case, this function will make the bio look like a regular bio (i.e.
+ * as if no encryption context was ever specified) for the purposes of the rest
+ * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
+ * currently supported together).
+ *
+ * Return: true on success. Sets bio->bi_status and returns false on error.
+ */
+bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
+{
+	struct bio *bio = *bio_ptr;
+	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
+	struct bio_fallback_crypt_ctx *f_ctx;
+
+	if (bc->bc_key->crypto_cfg.is_hw_wrapped) {
+		pr_warn_once("HW wrapped key cannot be used with fallback.\n");
+		bio->bi_status = BLK_STS_NOTSUPP;
+		return false;
+	}
+
+	if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
+		/* User didn't call blk_crypto_start_using_key() first */
+		bio->bi_status = BLK_STS_IOERR;
+		return false;
+	}
+
+	if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm,
+					  &bc->bc_key->crypto_cfg)) {
+		bio->bi_status = BLK_STS_NOTSUPP;
+		return false;
+	}
+
+	if (bio_data_dir(bio) == WRITE)
+		return blk_crypto_fallback_encrypt_bio(bio_ptr);
+
+	/*
+	 * bio READ case: Set up a f_ctx in the bio's bi_private and set the
+	 * bi_end_io appropriately to trigger decryption when the bio is ended.
+	 */
+	f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
+	f_ctx->crypt_ctx = *bc;
+	f_ctx->crypt_iter = bio->bi_iter;
+	f_ctx->bi_private_orig = bio->bi_private;
+	f_ctx->bi_end_io_orig = bio->bi_end_io;
+	bio->bi_private = (void *)f_ctx;
+	bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
+	bio_crypt_free_ctx(bio);
 
 	return true;
+}
+
+int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
+{
+	return blk_ksm_evict_key(&blk_crypto_ksm, key);
+}
+
+static bool blk_crypto_fallback_inited;
+static int blk_crypto_fallback_init(void)
+{
+	int i;
+	int err = -ENOMEM;
+
+	if (blk_crypto_fallback_inited)
+		return 0;
+
+	prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
+
+	err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
+	if (err)
+		goto out;
+	err = -ENOMEM;
+
+	blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
+	blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+	blk_crypto_ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS;
+
+	/* All blk-crypto modes have a crypto API fallback. */
+	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
+		blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF;
+	blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
+
+	blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
+					WQ_UNBOUND | WQ_HIGHPRI |
+					WQ_MEM_RECLAIM, num_online_cpus());
+	if (!blk_crypto_wq)
+		goto fail_free_ksm;
+
+	blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
+				      sizeof(blk_crypto_keyslots[0]),
+				      GFP_KERNEL);
+	if (!blk_crypto_keyslots)
+		goto fail_free_wq;
+
+	blk_crypto_bounce_page_pool =
+		mempool_create_page_pool(num_prealloc_bounce_pg, 0);
+	if (!blk_crypto_bounce_page_pool)
+		goto fail_free_keyslots;
+
+	bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
+	if (!bio_fallback_crypt_ctx_cache)
+		goto fail_free_bounce_page_pool;
+
+	bio_fallback_crypt_ctx_pool =
+		mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
+					 bio_fallback_crypt_ctx_cache);
+	if (!bio_fallback_crypt_ctx_pool)
+		goto fail_free_crypt_ctx_cache;
+
+	blk_crypto_fallback_inited = true;
+
+	return 0;
+fail_free_crypt_ctx_cache:
+	kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
+fail_free_bounce_page_pool:
+	mempool_destroy(blk_crypto_bounce_page_pool);
+fail_free_keyslots:
+	kfree(blk_crypto_keyslots);
+fail_free_wq:
+	destroy_workqueue(blk_crypto_wq);
+fail_free_ksm:
+	blk_ksm_destroy(&blk_crypto_ksm);
 out:
-	blk_crypto_free_fallback_crypt_ctx(bio);
-	return false;
+	return err;
 }
 
 /*
@@ -508,7 +622,11 @@ int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
 		return 0;
 
 	mutex_lock(&tfms_init_lock);
-	if (likely(tfms_inited[mode_num]))
+	if (tfms_inited[mode_num])
+		goto out;
+
+	err = blk_crypto_fallback_init();
+	if (err)
 		goto out;
 
 	for (i = 0; i < blk_crypto_num_keyslots; i++) {
@@ -546,100 +664,3 @@ int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
 	mutex_unlock(&tfms_init_lock);
 	return err;
 }
-
-int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
-{
-	return keyslot_manager_evict_key(blk_crypto_ksm, key);
-}
-
-int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
-{
-	struct bio *bio = *bio_ptr;
-	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
-	struct bio_fallback_crypt_ctx *f_ctx;
-
-	if (bc->bc_key->is_hw_wrapped) {
-		pr_warn_once("HW wrapped key cannot be used with fallback.\n");
-		bio->bi_status = BLK_STS_NOTSUPP;
-		return -EOPNOTSUPP;
-	}
-
-	if (!tfms_inited[bc->bc_key->crypto_mode]) {
-		bio->bi_status = BLK_STS_IOERR;
-		return -EIO;
-	}
-
-	if (bio_data_dir(bio) == WRITE)
-		return blk_crypto_encrypt_bio(bio_ptr);
-
-	/*
-	 * Mark bio as fallback crypted and replace the bio_crypt_ctx with
-	 * another one contained in a bio_fallback_crypt_ctx, so that the
-	 * fallback has space to store the info it needs for decryption.
-	 */
-	bc->bc_ksm = blk_crypto_ksm;
-	f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
-	f_ctx->crypt_ctx = *bc;
-	memcpy(f_ctx->fallback_dun, bc->bc_dun, sizeof(f_ctx->fallback_dun));
-	f_ctx->crypt_iter = bio->bi_iter;
-
-	bio_crypt_free_ctx(bio);
-	bio->bi_crypt_context = &f_ctx->crypt_ctx;
-
-	return 0;
-}
-
-int __init blk_crypto_fallback_init(void)
-{
-	int i;
-	unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
-
-	prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
-
-	/* All blk-crypto modes have a crypto API fallback. */
-	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
-		crypto_mode_supported[i] = 0xFFFFFFFF;
-	crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
-
-	blk_crypto_ksm = keyslot_manager_create(
-				NULL, blk_crypto_num_keyslots,
-				&blk_crypto_ksm_ll_ops,
-				BLK_CRYPTO_FEATURE_STANDARD_KEYS,
-				crypto_mode_supported, NULL);
-	if (!blk_crypto_ksm)
-		return -ENOMEM;
-
-	blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
-					WQ_UNBOUND | WQ_HIGHPRI |
-					WQ_MEM_RECLAIM, num_online_cpus());
-	if (!blk_crypto_wq)
-		return -ENOMEM;
-
-	blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
-				      sizeof(blk_crypto_keyslots[0]),
-				      GFP_KERNEL);
-	if (!blk_crypto_keyslots)
-		return -ENOMEM;
-
-	blk_crypto_bounce_page_pool =
-		mempool_create_page_pool(num_prealloc_bounce_pg, 0);
-	if (!blk_crypto_bounce_page_pool)
-		return -ENOMEM;
-
-	blk_crypto_decrypt_work_cache = KMEM_CACHE(blk_crypto_decrypt_work,
-						   SLAB_RECLAIM_ACCOUNT);
-	if (!blk_crypto_decrypt_work_cache)
-		return -ENOMEM;
-
-	bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
-	if (!bio_fallback_crypt_ctx_cache)
-		return -ENOMEM;
-
-	bio_fallback_crypt_ctx_pool =
-		mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
-					 bio_fallback_crypt_ctx_cache);
-	if (!bio_fallback_crypt_ctx_pool)
-		return -ENOMEM;
-
-	return 0;
-}