Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright 2019 Google LLC |
| 4 | */ |
| 5 | |
| 6 | /* |
| 7 | * Refer to Documentation/block/inline-encryption.rst for detailed explanation. |
| 8 | */ |
| 9 | |
| 10 | #define pr_fmt(fmt) "blk-crypto-fallback: " fmt |
| 11 | |
| 12 | #include <crypto/skcipher.h> |
| 13 | #include <linux/blk-cgroup.h> |
| 14 | #include <linux/blk-crypto.h> |
| 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/crypto.h> |
| 17 | #include <linux/keyslot-manager.h> |
| 18 | #include <linux/mempool.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/random.h> |
Christoph Hellwig | 24b83de | 2021-09-20 14:33:28 +0200 | [diff] [blame^] | 21 | #include <linux/scatterlist.h> |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 22 | |
| 23 | #include "blk-crypto-internal.h" |
| 24 | |
| 25 | static unsigned int num_prealloc_bounce_pg = 32; |
| 26 | module_param(num_prealloc_bounce_pg, uint, 0); |
| 27 | MODULE_PARM_DESC(num_prealloc_bounce_pg, |
| 28 | "Number of preallocated bounce pages for the blk-crypto crypto API fallback"); |
| 29 | |
| 30 | static unsigned int blk_crypto_num_keyslots = 100; |
| 31 | module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0); |
| 32 | MODULE_PARM_DESC(num_keyslots, |
| 33 | "Number of keyslots for the blk-crypto crypto API fallback"); |
| 34 | |
| 35 | static unsigned int num_prealloc_fallback_crypt_ctxs = 128; |
| 36 | module_param(num_prealloc_fallback_crypt_ctxs, uint, 0); |
| 37 | MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs, |
| 38 | "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback"); |
| 39 | |
| 40 | struct bio_fallback_crypt_ctx { |
| 41 | struct bio_crypt_ctx crypt_ctx; |
| 42 | /* |
| 43 | * Copy of the bvec_iter when this bio was submitted. |
| 44 | * We only want to en/decrypt the part of the bio as described by the |
| 45 | * bvec_iter upon submission because bio might be split before being |
| 46 | * resubmitted |
| 47 | */ |
| 48 | struct bvec_iter crypt_iter; |
| 49 | union { |
| 50 | struct { |
| 51 | struct work_struct work; |
| 52 | struct bio *bio; |
| 53 | }; |
| 54 | struct { |
| 55 | void *bi_private_orig; |
| 56 | bio_end_io_t *bi_end_io_orig; |
| 57 | }; |
| 58 | }; |
| 59 | }; |
| 60 | |
| 61 | static struct kmem_cache *bio_fallback_crypt_ctx_cache; |
| 62 | static mempool_t *bio_fallback_crypt_ctx_pool; |
| 63 | |
| 64 | /* |
| 65 | * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate |
| 66 | * all of a mode's tfms when that mode starts being used. Since each mode may |
| 67 | * need all the keyslots at some point, each mode needs its own tfm for each |
| 68 | * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to |
| 69 | * match the behavior of real inline encryption hardware (which only supports a |
| 70 | * single encryption context per keyslot), we only allow one tfm per keyslot to |
| 71 | * be used at a time - the rest of the unused tfms have their keys cleared. |
| 72 | */ |
| 73 | static DEFINE_MUTEX(tfms_init_lock); |
| 74 | static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; |
| 75 | |
| 76 | static struct blk_crypto_keyslot { |
| 77 | enum blk_crypto_mode_num crypto_mode; |
| 78 | struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; |
| 79 | } *blk_crypto_keyslots; |
| 80 | |
| 81 | static struct blk_keyslot_manager blk_crypto_ksm; |
| 82 | static struct workqueue_struct *blk_crypto_wq; |
| 83 | static mempool_t *blk_crypto_bounce_page_pool; |
Christoph Hellwig | 5407334 | 2021-02-24 08:24:04 +0100 | [diff] [blame] | 84 | static struct bio_set crypto_bio_split; |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 85 | |
| 86 | /* |
| 87 | * This is the key we set when evicting a keyslot. This *should* be the all 0's |
| 88 | * key, but AES-XTS rejects that key, so we use some random bytes instead. |
| 89 | */ |
| 90 | static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE]; |
| 91 | |
| 92 | static void blk_crypto_evict_keyslot(unsigned int slot) |
| 93 | { |
| 94 | struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; |
| 95 | enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode; |
| 96 | int err; |
| 97 | |
| 98 | WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID); |
| 99 | |
| 100 | /* Clear the key in the skcipher */ |
| 101 | err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key, |
| 102 | blk_crypto_modes[crypto_mode].keysize); |
| 103 | WARN_ON(err); |
| 104 | slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; |
| 105 | } |
| 106 | |
| 107 | static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm, |
| 108 | const struct blk_crypto_key *key, |
| 109 | unsigned int slot) |
| 110 | { |
| 111 | struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; |
| 112 | const enum blk_crypto_mode_num crypto_mode = |
| 113 | key->crypto_cfg.crypto_mode; |
| 114 | int err; |
| 115 | |
| 116 | if (crypto_mode != slotp->crypto_mode && |
| 117 | slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) |
| 118 | blk_crypto_evict_keyslot(slot); |
| 119 | |
| 120 | slotp->crypto_mode = crypto_mode; |
| 121 | err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, |
| 122 | key->size); |
| 123 | if (err) { |
| 124 | blk_crypto_evict_keyslot(slot); |
| 125 | return err; |
| 126 | } |
| 127 | return 0; |
| 128 | } |
| 129 | |
| 130 | static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, |
| 131 | const struct blk_crypto_key *key, |
| 132 | unsigned int slot) |
| 133 | { |
| 134 | blk_crypto_evict_keyslot(slot); |
| 135 | return 0; |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * The crypto API fallback KSM ops - only used for a bio when it specifies a |
| 140 | * blk_crypto_key that was not supported by the device's inline encryption |
| 141 | * hardware. |
| 142 | */ |
| 143 | static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = { |
| 144 | .keyslot_program = blk_crypto_keyslot_program, |
| 145 | .keyslot_evict = blk_crypto_keyslot_evict, |
| 146 | }; |
| 147 | |
| 148 | static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) |
| 149 | { |
| 150 | struct bio *src_bio = enc_bio->bi_private; |
| 151 | int i; |
| 152 | |
| 153 | for (i = 0; i < enc_bio->bi_vcnt; i++) |
| 154 | mempool_free(enc_bio->bi_io_vec[i].bv_page, |
| 155 | blk_crypto_bounce_page_pool); |
| 156 | |
| 157 | src_bio->bi_status = enc_bio->bi_status; |
| 158 | |
| 159 | bio_put(enc_bio); |
| 160 | bio_endio(src_bio); |
| 161 | } |
| 162 | |
| 163 | static struct bio *blk_crypto_clone_bio(struct bio *bio_src) |
| 164 | { |
| 165 | struct bvec_iter iter; |
| 166 | struct bio_vec bv; |
| 167 | struct bio *bio; |
| 168 | |
Christoph Hellwig | 4eb1d68 | 2021-01-26 15:52:33 +0100 | [diff] [blame] | 169 | bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src)); |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 170 | if (!bio) |
| 171 | return NULL; |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 172 | bio->bi_bdev = bio_src->bi_bdev; |
Christoph Hellwig | 46bbf65 | 2021-01-26 15:33:08 +0100 | [diff] [blame] | 173 | if (bio_flagged(bio_src, BIO_REMAPPED)) |
| 174 | bio_set_flag(bio, BIO_REMAPPED); |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 175 | bio->bi_opf = bio_src->bi_opf; |
| 176 | bio->bi_ioprio = bio_src->bi_ioprio; |
| 177 | bio->bi_write_hint = bio_src->bi_write_hint; |
| 178 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; |
| 179 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; |
| 180 | |
| 181 | bio_for_each_segment(bv, bio_src, iter) |
| 182 | bio->bi_io_vec[bio->bi_vcnt++] = bv; |
| 183 | |
| 184 | bio_clone_blkg_association(bio, bio_src); |
| 185 | blkcg_bio_issue_init(bio); |
| 186 | |
| 187 | return bio; |
| 188 | } |
| 189 | |
| 190 | static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot, |
| 191 | struct skcipher_request **ciph_req_ret, |
| 192 | struct crypto_wait *wait) |
| 193 | { |
| 194 | struct skcipher_request *ciph_req; |
| 195 | const struct blk_crypto_keyslot *slotp; |
| 196 | int keyslot_idx = blk_ksm_get_slot_idx(slot); |
| 197 | |
| 198 | slotp = &blk_crypto_keyslots[keyslot_idx]; |
| 199 | ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode], |
| 200 | GFP_NOIO); |
| 201 | if (!ciph_req) |
| 202 | return false; |
| 203 | |
| 204 | skcipher_request_set_callback(ciph_req, |
| 205 | CRYPTO_TFM_REQ_MAY_BACKLOG | |
| 206 | CRYPTO_TFM_REQ_MAY_SLEEP, |
| 207 | crypto_req_done, wait); |
| 208 | *ciph_req_ret = ciph_req; |
| 209 | |
| 210 | return true; |
| 211 | } |
| 212 | |
| 213 | static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr) |
| 214 | { |
| 215 | struct bio *bio = *bio_ptr; |
| 216 | unsigned int i = 0; |
| 217 | unsigned int num_sectors = 0; |
| 218 | struct bio_vec bv; |
| 219 | struct bvec_iter iter; |
| 220 | |
| 221 | bio_for_each_segment(bv, bio, iter) { |
| 222 | num_sectors += bv.bv_len >> SECTOR_SHIFT; |
Christoph Hellwig | a8affc0 | 2021-03-11 12:01:37 +0100 | [diff] [blame] | 223 | if (++i == BIO_MAX_VECS) |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 224 | break; |
| 225 | } |
| 226 | if (num_sectors < bio_sectors(bio)) { |
| 227 | struct bio *split_bio; |
| 228 | |
Christoph Hellwig | 5407334 | 2021-02-24 08:24:04 +0100 | [diff] [blame] | 229 | split_bio = bio_split(bio, num_sectors, GFP_NOIO, |
| 230 | &crypto_bio_split); |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 231 | if (!split_bio) { |
| 232 | bio->bi_status = BLK_STS_RESOURCE; |
| 233 | return false; |
| 234 | } |
| 235 | bio_chain(split_bio, bio); |
Christoph Hellwig | ed00aab | 2020-07-01 10:59:44 +0200 | [diff] [blame] | 236 | submit_bio_noacct(bio); |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 237 | *bio_ptr = split_bio; |
| 238 | } |
| 239 | |
| 240 | return true; |
| 241 | } |
| 242 | |
| 243 | union blk_crypto_iv { |
| 244 | __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
| 245 | u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; |
| 246 | }; |
| 247 | |
| 248 | static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], |
| 249 | union blk_crypto_iv *iv) |
| 250 | { |
| 251 | int i; |
| 252 | |
| 253 | for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) |
| 254 | iv->dun[i] = cpu_to_le64(dun[i]); |
| 255 | } |
| 256 | |
| 257 | /* |
| 258 | * The crypto API fallback's encryption routine. |
| 259 | * Allocate a bounce bio for encryption, encrypt the input bio using crypto API, |
| 260 | * and replace *bio_ptr with the bounce bio. May split input bio if it's too |
| 261 | * large. Returns true on success. Returns false and sets bio->bi_status on |
| 262 | * error. |
| 263 | */ |
| 264 | static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) |
| 265 | { |
| 266 | struct bio *src_bio, *enc_bio; |
| 267 | struct bio_crypt_ctx *bc; |
| 268 | struct blk_ksm_keyslot *slot; |
| 269 | int data_unit_size; |
| 270 | struct skcipher_request *ciph_req = NULL; |
| 271 | DECLARE_CRYPTO_WAIT(wait); |
| 272 | u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
| 273 | struct scatterlist src, dst; |
| 274 | union blk_crypto_iv iv; |
| 275 | unsigned int i, j; |
| 276 | bool ret = false; |
| 277 | blk_status_t blk_st; |
| 278 | |
| 279 | /* Split the bio if it's too big for single page bvec */ |
| 280 | if (!blk_crypto_split_bio_if_needed(bio_ptr)) |
| 281 | return false; |
| 282 | |
| 283 | src_bio = *bio_ptr; |
| 284 | bc = src_bio->bi_crypt_context; |
| 285 | data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; |
| 286 | |
| 287 | /* Allocate bounce bio for encryption */ |
| 288 | enc_bio = blk_crypto_clone_bio(src_bio); |
| 289 | if (!enc_bio) { |
| 290 | src_bio->bi_status = BLK_STS_RESOURCE; |
| 291 | return false; |
| 292 | } |
| 293 | |
| 294 | /* |
| 295 | * Use the crypto API fallback keyslot manager to get a crypto_skcipher |
| 296 | * for the algorithm and key specified for this bio. |
| 297 | */ |
| 298 | blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); |
| 299 | if (blk_st != BLK_STS_OK) { |
| 300 | src_bio->bi_status = blk_st; |
| 301 | goto out_put_enc_bio; |
| 302 | } |
| 303 | |
| 304 | /* and then allocate an skcipher_request for it */ |
| 305 | if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { |
| 306 | src_bio->bi_status = BLK_STS_RESOURCE; |
| 307 | goto out_release_keyslot; |
| 308 | } |
| 309 | |
| 310 | memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); |
| 311 | sg_init_table(&src, 1); |
| 312 | sg_init_table(&dst, 1); |
| 313 | |
| 314 | skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size, |
| 315 | iv.bytes); |
| 316 | |
| 317 | /* Encrypt each page in the bounce bio */ |
| 318 | for (i = 0; i < enc_bio->bi_vcnt; i++) { |
| 319 | struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i]; |
| 320 | struct page *plaintext_page = enc_bvec->bv_page; |
| 321 | struct page *ciphertext_page = |
| 322 | mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO); |
| 323 | |
| 324 | enc_bvec->bv_page = ciphertext_page; |
| 325 | |
| 326 | if (!ciphertext_page) { |
| 327 | src_bio->bi_status = BLK_STS_RESOURCE; |
| 328 | goto out_free_bounce_pages; |
| 329 | } |
| 330 | |
| 331 | sg_set_page(&src, plaintext_page, data_unit_size, |
| 332 | enc_bvec->bv_offset); |
| 333 | sg_set_page(&dst, ciphertext_page, data_unit_size, |
| 334 | enc_bvec->bv_offset); |
| 335 | |
| 336 | /* Encrypt each data unit in this page */ |
| 337 | for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { |
| 338 | blk_crypto_dun_to_iv(curr_dun, &iv); |
| 339 | if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req), |
| 340 | &wait)) { |
| 341 | i++; |
| 342 | src_bio->bi_status = BLK_STS_IOERR; |
| 343 | goto out_free_bounce_pages; |
| 344 | } |
| 345 | bio_crypt_dun_increment(curr_dun, 1); |
| 346 | src.offset += data_unit_size; |
| 347 | dst.offset += data_unit_size; |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | enc_bio->bi_private = src_bio; |
| 352 | enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio; |
| 353 | *bio_ptr = enc_bio; |
| 354 | ret = true; |
| 355 | |
| 356 | enc_bio = NULL; |
| 357 | goto out_free_ciph_req; |
| 358 | |
| 359 | out_free_bounce_pages: |
| 360 | while (i > 0) |
| 361 | mempool_free(enc_bio->bi_io_vec[--i].bv_page, |
| 362 | blk_crypto_bounce_page_pool); |
| 363 | out_free_ciph_req: |
| 364 | skcipher_request_free(ciph_req); |
| 365 | out_release_keyslot: |
| 366 | blk_ksm_put_slot(slot); |
| 367 | out_put_enc_bio: |
| 368 | if (enc_bio) |
| 369 | bio_put(enc_bio); |
| 370 | |
| 371 | return ret; |
| 372 | } |
| 373 | |
| 374 | /* |
| 375 | * The crypto API fallback's main decryption routine. |
| 376 | * Decrypts input bio in place, and calls bio_endio on the bio. |
| 377 | */ |
| 378 | static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) |
| 379 | { |
| 380 | struct bio_fallback_crypt_ctx *f_ctx = |
| 381 | container_of(work, struct bio_fallback_crypt_ctx, work); |
| 382 | struct bio *bio = f_ctx->bio; |
| 383 | struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx; |
| 384 | struct blk_ksm_keyslot *slot; |
| 385 | struct skcipher_request *ciph_req = NULL; |
| 386 | DECLARE_CRYPTO_WAIT(wait); |
| 387 | u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
| 388 | union blk_crypto_iv iv; |
| 389 | struct scatterlist sg; |
| 390 | struct bio_vec bv; |
| 391 | struct bvec_iter iter; |
| 392 | const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; |
| 393 | unsigned int i; |
| 394 | blk_status_t blk_st; |
| 395 | |
| 396 | /* |
| 397 | * Use the crypto API fallback keyslot manager to get a crypto_skcipher |
| 398 | * for the algorithm and key specified for this bio. |
| 399 | */ |
| 400 | blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); |
| 401 | if (blk_st != BLK_STS_OK) { |
| 402 | bio->bi_status = blk_st; |
| 403 | goto out_no_keyslot; |
| 404 | } |
| 405 | |
| 406 | /* and then allocate an skcipher_request for it */ |
| 407 | if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { |
| 408 | bio->bi_status = BLK_STS_RESOURCE; |
| 409 | goto out; |
| 410 | } |
| 411 | |
| 412 | memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); |
| 413 | sg_init_table(&sg, 1); |
| 414 | skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size, |
| 415 | iv.bytes); |
| 416 | |
| 417 | /* Decrypt each segment in the bio */ |
| 418 | __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { |
| 419 | struct page *page = bv.bv_page; |
| 420 | |
| 421 | sg_set_page(&sg, page, data_unit_size, bv.bv_offset); |
| 422 | |
| 423 | /* Decrypt each data unit in the segment */ |
| 424 | for (i = 0; i < bv.bv_len; i += data_unit_size) { |
| 425 | blk_crypto_dun_to_iv(curr_dun, &iv); |
| 426 | if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req), |
| 427 | &wait)) { |
| 428 | bio->bi_status = BLK_STS_IOERR; |
| 429 | goto out; |
| 430 | } |
| 431 | bio_crypt_dun_increment(curr_dun, 1); |
| 432 | sg.offset += data_unit_size; |
| 433 | } |
| 434 | } |
| 435 | |
| 436 | out: |
| 437 | skcipher_request_free(ciph_req); |
| 438 | blk_ksm_put_slot(slot); |
| 439 | out_no_keyslot: |
| 440 | mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); |
| 441 | bio_endio(bio); |
| 442 | } |
| 443 | |
| 444 | /** |
| 445 | * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption |
| 446 | * |
| 447 | * @bio: the bio to queue |
| 448 | * |
| 449 | * Restore bi_private and bi_end_io, and queue the bio for decryption into a |
| 450 | * workqueue, since this function will be called from an atomic context. |
| 451 | */ |
| 452 | static void blk_crypto_fallback_decrypt_endio(struct bio *bio) |
| 453 | { |
| 454 | struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private; |
| 455 | |
| 456 | bio->bi_private = f_ctx->bi_private_orig; |
| 457 | bio->bi_end_io = f_ctx->bi_end_io_orig; |
| 458 | |
| 459 | /* If there was an IO error, don't queue for decrypt. */ |
| 460 | if (bio->bi_status) { |
| 461 | mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); |
| 462 | bio_endio(bio); |
| 463 | return; |
| 464 | } |
| 465 | |
| 466 | INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio); |
| 467 | f_ctx->bio = bio; |
| 468 | queue_work(blk_crypto_wq, &f_ctx->work); |
| 469 | } |
| 470 | |
| 471 | /** |
| 472 | * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption |
| 473 | * |
| 474 | * @bio_ptr: pointer to the bio to prepare |
| 475 | * |
| 476 | * If bio is doing a WRITE operation, this splits the bio into two parts if it's |
| 477 | * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio |
| 478 | * for the first part, encrypts it, and update bio_ptr to point to the bounce |
| 479 | * bio. |
| 480 | * |
| 481 | * For a READ operation, we mark the bio for decryption by using bi_private and |
| 482 | * bi_end_io. |
| 483 | * |
| 484 | * In either case, this function will make the bio look like a regular bio (i.e. |
| 485 | * as if no encryption context was ever specified) for the purposes of the rest |
| 486 | * of the stack except for blk-integrity (blk-integrity and blk-crypto are not |
| 487 | * currently supported together). |
| 488 | * |
| 489 | * Return: true on success. Sets bio->bi_status and returns false on error. |
| 490 | */ |
| 491 | bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) |
| 492 | { |
| 493 | struct bio *bio = *bio_ptr; |
| 494 | struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
| 495 | struct bio_fallback_crypt_ctx *f_ctx; |
| 496 | |
| 497 | if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) { |
| 498 | /* User didn't call blk_crypto_start_using_key() first */ |
| 499 | bio->bi_status = BLK_STS_IOERR; |
| 500 | return false; |
| 501 | } |
| 502 | |
| 503 | if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm, |
| 504 | &bc->bc_key->crypto_cfg)) { |
| 505 | bio->bi_status = BLK_STS_NOTSUPP; |
| 506 | return false; |
| 507 | } |
| 508 | |
| 509 | if (bio_data_dir(bio) == WRITE) |
| 510 | return blk_crypto_fallback_encrypt_bio(bio_ptr); |
| 511 | |
| 512 | /* |
| 513 | * bio READ case: Set up a f_ctx in the bio's bi_private and set the |
| 514 | * bi_end_io appropriately to trigger decryption when the bio is ended. |
| 515 | */ |
| 516 | f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO); |
| 517 | f_ctx->crypt_ctx = *bc; |
| 518 | f_ctx->crypt_iter = bio->bi_iter; |
| 519 | f_ctx->bi_private_orig = bio->bi_private; |
| 520 | f_ctx->bi_end_io_orig = bio->bi_end_io; |
| 521 | bio->bi_private = (void *)f_ctx; |
| 522 | bio->bi_end_io = blk_crypto_fallback_decrypt_endio; |
| 523 | bio_crypt_free_ctx(bio); |
| 524 | |
| 525 | return true; |
| 526 | } |
| 527 | |
| 528 | int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) |
| 529 | { |
| 530 | return blk_ksm_evict_key(&blk_crypto_ksm, key); |
| 531 | } |
| 532 | |
| 533 | static bool blk_crypto_fallback_inited; |
| 534 | static int blk_crypto_fallback_init(void) |
| 535 | { |
| 536 | int i; |
Colin Ian King | e7ecc142 | 2020-05-26 23:49:02 +0100 | [diff] [blame] | 537 | int err; |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 538 | |
| 539 | if (blk_crypto_fallback_inited) |
| 540 | return 0; |
| 541 | |
| 542 | prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); |
| 543 | |
Christoph Hellwig | 5407334 | 2021-02-24 08:24:04 +0100 | [diff] [blame] | 544 | err = bioset_init(&crypto_bio_split, 64, 0, 0); |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 545 | if (err) |
| 546 | goto out; |
Christoph Hellwig | 5407334 | 2021-02-24 08:24:04 +0100 | [diff] [blame] | 547 | |
| 548 | err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots); |
| 549 | if (err) |
| 550 | goto fail_free_bioset; |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 551 | err = -ENOMEM; |
| 552 | |
| 553 | blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops; |
| 554 | blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; |
| 555 | |
| 556 | /* All blk-crypto modes have a crypto API fallback. */ |
| 557 | for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) |
| 558 | blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF; |
| 559 | blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; |
| 560 | |
| 561 | blk_crypto_wq = alloc_workqueue("blk_crypto_wq", |
| 562 | WQ_UNBOUND | WQ_HIGHPRI | |
| 563 | WQ_MEM_RECLAIM, num_online_cpus()); |
| 564 | if (!blk_crypto_wq) |
| 565 | goto fail_free_ksm; |
| 566 | |
| 567 | blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, |
| 568 | sizeof(blk_crypto_keyslots[0]), |
| 569 | GFP_KERNEL); |
| 570 | if (!blk_crypto_keyslots) |
| 571 | goto fail_free_wq; |
| 572 | |
| 573 | blk_crypto_bounce_page_pool = |
| 574 | mempool_create_page_pool(num_prealloc_bounce_pg, 0); |
| 575 | if (!blk_crypto_bounce_page_pool) |
| 576 | goto fail_free_keyslots; |
| 577 | |
| 578 | bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0); |
| 579 | if (!bio_fallback_crypt_ctx_cache) |
| 580 | goto fail_free_bounce_page_pool; |
| 581 | |
| 582 | bio_fallback_crypt_ctx_pool = |
| 583 | mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs, |
| 584 | bio_fallback_crypt_ctx_cache); |
| 585 | if (!bio_fallback_crypt_ctx_pool) |
| 586 | goto fail_free_crypt_ctx_cache; |
| 587 | |
| 588 | blk_crypto_fallback_inited = true; |
| 589 | |
| 590 | return 0; |
| 591 | fail_free_crypt_ctx_cache: |
| 592 | kmem_cache_destroy(bio_fallback_crypt_ctx_cache); |
| 593 | fail_free_bounce_page_pool: |
| 594 | mempool_destroy(blk_crypto_bounce_page_pool); |
| 595 | fail_free_keyslots: |
| 596 | kfree(blk_crypto_keyslots); |
| 597 | fail_free_wq: |
| 598 | destroy_workqueue(blk_crypto_wq); |
| 599 | fail_free_ksm: |
| 600 | blk_ksm_destroy(&blk_crypto_ksm); |
Christoph Hellwig | 5407334 | 2021-02-24 08:24:04 +0100 | [diff] [blame] | 601 | fail_free_bioset: |
| 602 | bioset_exit(&crypto_bio_split); |
Satya Tangirala | 488f668 | 2020-05-14 00:37:20 +0000 | [diff] [blame] | 603 | out: |
| 604 | return err; |
| 605 | } |
| 606 | |
| 607 | /* |
| 608 | * Prepare blk-crypto-fallback for the specified crypto mode. |
| 609 | * Returns -ENOPKG if the needed crypto API support is missing. |
| 610 | */ |
| 611 | int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) |
| 612 | { |
| 613 | const char *cipher_str = blk_crypto_modes[mode_num].cipher_str; |
| 614 | struct blk_crypto_keyslot *slotp; |
| 615 | unsigned int i; |
| 616 | int err = 0; |
| 617 | |
| 618 | /* |
| 619 | * Fast path |
| 620 | * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] |
| 621 | * for each i are visible before we try to access them. |
| 622 | */ |
| 623 | if (likely(smp_load_acquire(&tfms_inited[mode_num]))) |
| 624 | return 0; |
| 625 | |
| 626 | mutex_lock(&tfms_init_lock); |
| 627 | if (tfms_inited[mode_num]) |
| 628 | goto out; |
| 629 | |
| 630 | err = blk_crypto_fallback_init(); |
| 631 | if (err) |
| 632 | goto out; |
| 633 | |
| 634 | for (i = 0; i < blk_crypto_num_keyslots; i++) { |
| 635 | slotp = &blk_crypto_keyslots[i]; |
| 636 | slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0); |
| 637 | if (IS_ERR(slotp->tfms[mode_num])) { |
| 638 | err = PTR_ERR(slotp->tfms[mode_num]); |
| 639 | if (err == -ENOENT) { |
| 640 | pr_warn_once("Missing crypto API support for \"%s\"\n", |
| 641 | cipher_str); |
| 642 | err = -ENOPKG; |
| 643 | } |
| 644 | slotp->tfms[mode_num] = NULL; |
| 645 | goto out_free_tfms; |
| 646 | } |
| 647 | |
| 648 | crypto_skcipher_set_flags(slotp->tfms[mode_num], |
| 649 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); |
| 650 | } |
| 651 | |
| 652 | /* |
| 653 | * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] |
| 654 | * for each i are visible before we set tfms_inited[mode_num]. |
| 655 | */ |
| 656 | smp_store_release(&tfms_inited[mode_num], true); |
| 657 | goto out; |
| 658 | |
| 659 | out_free_tfms: |
| 660 | for (i = 0; i < blk_crypto_num_keyslots; i++) { |
| 661 | slotp = &blk_crypto_keyslots[i]; |
| 662 | crypto_free_skcipher(slotp->tfms[mode_num]); |
| 663 | slotp->tfms[mode_num] = NULL; |
| 664 | } |
| 665 | out: |
| 666 | mutex_unlock(&tfms_init_lock); |
| 667 | return err; |
| 668 | } |