blob: f3f5e2866b8bd7a5ed638be9b9ce839ccba50d94 [file] [log] [blame]
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 Google LLC
4 */
5
6/*
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8 */
9
10#define pr_fmt(fmt) "blk-crypto-fallback: " fmt
11
12#include <crypto/skcipher.h>
13#include <linux/blk-cgroup.h>
14#include <linux/blk-crypto.h>
15#include <linux/crypto.h>
16#include <linux/keyslot-manager.h>
17#include <linux/mempool.h>
18#include <linux/module.h>
19#include <linux/random.h>
20
21#include "blk-crypto-internal.h"
22
23static unsigned int num_prealloc_bounce_pg = 32;
24module_param(num_prealloc_bounce_pg, uint, 0);
25MODULE_PARM_DESC(num_prealloc_bounce_pg,
26 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
27
28static unsigned int blk_crypto_num_keyslots = 100;
29module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
30MODULE_PARM_DESC(num_keyslots,
31 "Number of keyslots for the blk-crypto crypto API fallback");
32
33static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
34module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
35MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
36 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
37
38struct bio_fallback_crypt_ctx {
39 struct bio_crypt_ctx crypt_ctx;
40 /*
41 * Copy of the bvec_iter when this bio was submitted.
42 * We only want to en/decrypt the part of the bio as described by the
43 * bvec_iter upon submission because bio might be split before being
44 * resubmitted
45 */
46 struct bvec_iter crypt_iter;
47 u64 fallback_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
48};
49
50/* The following few vars are only used during the crypto API fallback */
51static struct kmem_cache *bio_fallback_crypt_ctx_cache;
52static mempool_t *bio_fallback_crypt_ctx_pool;
53
54/*
55 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
56 * all of a mode's tfms when that mode starts being used. Since each mode may
57 * need all the keyslots at some point, each mode needs its own tfm for each
58 * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
59 * match the behavior of real inline encryption hardware (which only supports a
60 * single encryption context per keyslot), we only allow one tfm per keyslot to
61 * be used at a time - the rest of the unused tfms have their keys cleared.
62 */
63static DEFINE_MUTEX(tfms_init_lock);
64static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
65
66struct blk_crypto_decrypt_work {
67 struct work_struct work;
68 struct bio *bio;
69};
70
71static struct blk_crypto_keyslot {
72 struct crypto_skcipher *tfm;
73 enum blk_crypto_mode_num crypto_mode;
74 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
75} *blk_crypto_keyslots;
76
77/* The following few vars are only used during the crypto API fallback */
78static struct keyslot_manager *blk_crypto_ksm;
79static struct workqueue_struct *blk_crypto_wq;
80static mempool_t *blk_crypto_bounce_page_pool;
81static struct kmem_cache *blk_crypto_decrypt_work_cache;
82
83bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
84{
85 return bc && bc->bc_ksm == blk_crypto_ksm;
86}
87
88/*
89 * This is the key we set when evicting a keyslot. This *should* be the all 0's
90 * key, but AES-XTS rejects that key, so we use some random bytes instead.
91 */
92static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
93
94static void blk_crypto_evict_keyslot(unsigned int slot)
95{
96 struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
97 enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
98 int err;
99
100 WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
101
102 /* Clear the key in the skcipher */
103 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
104 blk_crypto_modes[crypto_mode].keysize);
105 WARN_ON(err);
106 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
107}
108
109static int blk_crypto_keyslot_program(struct keyslot_manager *ksm,
110 const struct blk_crypto_key *key,
111 unsigned int slot)
112{
113 struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
114 const enum blk_crypto_mode_num crypto_mode = key->crypto_mode;
115 int err;
116
117 if (crypto_mode != slotp->crypto_mode &&
118 slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) {
119 blk_crypto_evict_keyslot(slot);
120 }
121
122 if (!slotp->tfms[crypto_mode])
123 return -ENOMEM;
124 slotp->crypto_mode = crypto_mode;
125 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
126 key->size);
127 if (err) {
128 blk_crypto_evict_keyslot(slot);
129 return err;
130 }
131 return 0;
132}
133
134static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm,
135 const struct blk_crypto_key *key,
136 unsigned int slot)
137{
138 blk_crypto_evict_keyslot(slot);
139 return 0;
140}
141
142/*
143 * The crypto API fallback KSM ops - only used for a bio when it specifies a
144 * blk_crypto_mode for which we failed to get a keyslot in the device's inline
145 * encryption hardware (which probably means the device doesn't have inline
146 * encryption hardware that supports that crypto mode).
147 */
148static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = {
149 .keyslot_program = blk_crypto_keyslot_program,
150 .keyslot_evict = blk_crypto_keyslot_evict,
151};
152
153static void blk_crypto_encrypt_endio(struct bio *enc_bio)
154{
155 struct bio *src_bio = enc_bio->bi_private;
156 int i;
157
158 for (i = 0; i < enc_bio->bi_vcnt; i++)
159 mempool_free(enc_bio->bi_io_vec[i].bv_page,
160 blk_crypto_bounce_page_pool);
161
162 src_bio->bi_status = enc_bio->bi_status;
163
164 bio_put(enc_bio);
165 bio_endio(src_bio);
166}
167
168static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
169{
170 struct bvec_iter iter;
171 struct bio_vec bv;
172 struct bio *bio;
173
174 bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL);
175 if (!bio)
176 return NULL;
177 bio->bi_disk = bio_src->bi_disk;
178 bio->bi_opf = bio_src->bi_opf;
179 bio->bi_ioprio = bio_src->bi_ioprio;
180 bio->bi_write_hint = bio_src->bi_write_hint;
181 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
182 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
183
184 bio_for_each_segment(bv, bio_src, iter)
185 bio->bi_io_vec[bio->bi_vcnt++] = bv;
186
187 if (bio_integrity(bio_src) &&
188 bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) {
189 bio_put(bio);
190 return NULL;
191 }
192
193 bio_clone_blkg_association(bio, bio_src);
194 blkcg_bio_issue_init(bio);
195
Eric Biggerscb39ec02020-01-21 09:27:47 -0800196 bio_clone_skip_dm_default_key(bio, bio_src);
197
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -0800198 return bio;
199}
200
201static int blk_crypto_alloc_cipher_req(struct bio *src_bio,
202 struct skcipher_request **ciph_req_ret,
203 struct crypto_wait *wait)
204{
205 struct skcipher_request *ciph_req;
206 const struct blk_crypto_keyslot *slotp;
207
208 slotp = &blk_crypto_keyslots[src_bio->bi_crypt_context->bc_keyslot];
209 ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
210 GFP_NOIO);
211 if (!ciph_req) {
212 src_bio->bi_status = BLK_STS_RESOURCE;
213 return -ENOMEM;
214 }
215
216 skcipher_request_set_callback(ciph_req,
217 CRYPTO_TFM_REQ_MAY_BACKLOG |
218 CRYPTO_TFM_REQ_MAY_SLEEP,
219 crypto_req_done, wait);
220 *ciph_req_ret = ciph_req;
221 return 0;
222}
223
224static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
225{
226 struct bio *bio = *bio_ptr;
227 unsigned int i = 0;
228 unsigned int num_sectors = 0;
229 struct bio_vec bv;
230 struct bvec_iter iter;
231
232 bio_for_each_segment(bv, bio, iter) {
233 num_sectors += bv.bv_len >> SECTOR_SHIFT;
234 if (++i == BIO_MAX_PAGES)
235 break;
236 }
237 if (num_sectors < bio_sectors(bio)) {
238 struct bio *split_bio;
239
240 split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
241 if (!split_bio) {
242 bio->bi_status = BLK_STS_RESOURCE;
243 return -ENOMEM;
244 }
245 bio_chain(split_bio, bio);
246 generic_make_request(bio);
247 *bio_ptr = split_bio;
248 }
249 return 0;
250}
251
252union blk_crypto_iv {
253 __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
254 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
255};
256
257static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
258 union blk_crypto_iv *iv)
259{
260 int i;
261
262 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
263 iv->dun[i] = cpu_to_le64(dun[i]);
264}
265
266/*
267 * The crypto API fallback's encryption routine.
268 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
269 * and replace *bio_ptr with the bounce bio. May split input bio if it's too
270 * large.
271 */
272static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
273{
274 struct bio *src_bio;
275 struct skcipher_request *ciph_req = NULL;
276 DECLARE_CRYPTO_WAIT(wait);
277 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
278 union blk_crypto_iv iv;
279 struct scatterlist src, dst;
280 struct bio *enc_bio;
281 unsigned int i, j;
282 int data_unit_size;
283 struct bio_crypt_ctx *bc;
284 int err = 0;
285
286 /* Split the bio if it's too big for single page bvec */
287 err = blk_crypto_split_bio_if_needed(bio_ptr);
288 if (err)
289 return err;
290
291 src_bio = *bio_ptr;
292 bc = src_bio->bi_crypt_context;
293 data_unit_size = bc->bc_key->data_unit_size;
294
295 /* Allocate bounce bio for encryption */
296 enc_bio = blk_crypto_clone_bio(src_bio);
297 if (!enc_bio) {
298 src_bio->bi_status = BLK_STS_RESOURCE;
299 return -ENOMEM;
300 }
301
302 /*
303 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
304 * for the algorithm and key specified for this bio.
305 */
306 err = bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm);
307 if (err) {
308 src_bio->bi_status = BLK_STS_IOERR;
309 goto out_put_enc_bio;
310 }
311
312 /* and then allocate an skcipher_request for it */
313 err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait);
314 if (err)
315 goto out_release_keyslot;
316
317 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
318 sg_init_table(&src, 1);
319 sg_init_table(&dst, 1);
320
321 skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
322 iv.bytes);
323
324 /* Encrypt each page in the bounce bio */
325 for (i = 0; i < enc_bio->bi_vcnt; i++) {
326 struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
327 struct page *plaintext_page = enc_bvec->bv_page;
328 struct page *ciphertext_page =
329 mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
330
331 enc_bvec->bv_page = ciphertext_page;
332
333 if (!ciphertext_page) {
334 src_bio->bi_status = BLK_STS_RESOURCE;
335 err = -ENOMEM;
336 goto out_free_bounce_pages;
337 }
338
339 sg_set_page(&src, plaintext_page, data_unit_size,
340 enc_bvec->bv_offset);
341 sg_set_page(&dst, ciphertext_page, data_unit_size,
342 enc_bvec->bv_offset);
343
344 /* Encrypt each data unit in this page */
345 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
346 blk_crypto_dun_to_iv(curr_dun, &iv);
347 err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
348 &wait);
349 if (err) {
350 i++;
351 src_bio->bi_status = BLK_STS_RESOURCE;
352 goto out_free_bounce_pages;
353 }
354 bio_crypt_dun_increment(curr_dun, 1);
355 src.offset += data_unit_size;
356 dst.offset += data_unit_size;
357 }
358 }
359
360 enc_bio->bi_private = src_bio;
361 enc_bio->bi_end_io = blk_crypto_encrypt_endio;
362 *bio_ptr = enc_bio;
363
364 enc_bio = NULL;
365 err = 0;
366 goto out_free_ciph_req;
367
368out_free_bounce_pages:
369 while (i > 0)
370 mempool_free(enc_bio->bi_io_vec[--i].bv_page,
371 blk_crypto_bounce_page_pool);
372out_free_ciph_req:
373 skcipher_request_free(ciph_req);
374out_release_keyslot:
375 bio_crypt_ctx_release_keyslot(bc);
376out_put_enc_bio:
377 if (enc_bio)
378 bio_put(enc_bio);
379
380 return err;
381}
382
383static void blk_crypto_free_fallback_crypt_ctx(struct bio *bio)
384{
385 mempool_free(container_of(bio->bi_crypt_context,
386 struct bio_fallback_crypt_ctx,
387 crypt_ctx),
388 bio_fallback_crypt_ctx_pool);
389 bio->bi_crypt_context = NULL;
390}
391
392/*
393 * The crypto API fallback's main decryption routine.
394 * Decrypts input bio in place.
395 */
396static void blk_crypto_decrypt_bio(struct work_struct *work)
397{
398 struct blk_crypto_decrypt_work *decrypt_work =
399 container_of(work, struct blk_crypto_decrypt_work, work);
400 struct bio *bio = decrypt_work->bio;
401 struct skcipher_request *ciph_req = NULL;
402 DECLARE_CRYPTO_WAIT(wait);
403 struct bio_vec bv;
404 struct bvec_iter iter;
405 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
406 union blk_crypto_iv iv;
407 struct scatterlist sg;
408 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
409 struct bio_fallback_crypt_ctx *f_ctx =
410 container_of(bc, struct bio_fallback_crypt_ctx, crypt_ctx);
411 const int data_unit_size = bc->bc_key->data_unit_size;
412 unsigned int i;
413 int err;
414
415 /*
416 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
417 * for the algorithm and key specified for this bio.
418 */
419 if (bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm)) {
420 bio->bi_status = BLK_STS_RESOURCE;
421 goto out_no_keyslot;
422 }
423
424 /* and then allocate an skcipher_request for it */
425 err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait);
426 if (err)
427 goto out;
428
429 memcpy(curr_dun, f_ctx->fallback_dun, sizeof(curr_dun));
430 sg_init_table(&sg, 1);
431 skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
432 iv.bytes);
433
434 /* Decrypt each segment in the bio */
435 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
436 struct page *page = bv.bv_page;
437
438 sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
439
440 /* Decrypt each data unit in the segment */
441 for (i = 0; i < bv.bv_len; i += data_unit_size) {
442 blk_crypto_dun_to_iv(curr_dun, &iv);
443 if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
444 &wait)) {
445 bio->bi_status = BLK_STS_IOERR;
446 goto out;
447 }
448 bio_crypt_dun_increment(curr_dun, 1);
449 sg.offset += data_unit_size;
450 }
451 }
452
453out:
454 skcipher_request_free(ciph_req);
455 bio_crypt_ctx_release_keyslot(bc);
456out_no_keyslot:
457 kmem_cache_free(blk_crypto_decrypt_work_cache, decrypt_work);
458 blk_crypto_free_fallback_crypt_ctx(bio);
459 bio_endio(bio);
460}
461
462/*
463 * Queue bio for decryption.
464 * Returns true iff bio was queued for decryption.
465 */
466bool blk_crypto_queue_decrypt_bio(struct bio *bio)
467{
468 struct blk_crypto_decrypt_work *decrypt_work;
469
470 /* If there was an IO error, don't queue for decrypt. */
471 if (bio->bi_status)
472 goto out;
473
474 decrypt_work = kmem_cache_zalloc(blk_crypto_decrypt_work_cache,
475 GFP_ATOMIC);
476 if (!decrypt_work) {
477 bio->bi_status = BLK_STS_RESOURCE;
478 goto out;
479 }
480
481 INIT_WORK(&decrypt_work->work, blk_crypto_decrypt_bio);
482 decrypt_work->bio = bio;
483 queue_work(blk_crypto_wq, &decrypt_work->work);
484
485 return true;
486out:
487 blk_crypto_free_fallback_crypt_ctx(bio);
488 return false;
489}
490
Eric Biggersfca1165b2020-04-03 12:06:10 -0700491/*
492 * Prepare blk-crypto-fallback for the specified crypto mode.
493 * Returns -ENOPKG if the needed crypto API support is missing.
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -0800494 */
Eric Biggersfca1165b2020-04-03 12:06:10 -0700495int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -0800496{
Eric Biggersfca1165b2020-04-03 12:06:10 -0700497 const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -0800498 struct blk_crypto_keyslot *slotp;
499 unsigned int i;
500 int err = 0;
501
502 /*
503 * Fast path
504 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
505 * for each i are visible before we try to access them.
506 */
507 if (likely(smp_load_acquire(&tfms_inited[mode_num])))
508 return 0;
509
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -0800510 mutex_lock(&tfms_init_lock);
511 if (likely(tfms_inited[mode_num]))
512 goto out;
513
514 for (i = 0; i < blk_crypto_num_keyslots; i++) {
515 slotp = &blk_crypto_keyslots[i];
Eric Biggersfca1165b2020-04-03 12:06:10 -0700516 slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -0800517 if (IS_ERR(slotp->tfms[mode_num])) {
518 err = PTR_ERR(slotp->tfms[mode_num]);
Eric Biggersfca1165b2020-04-03 12:06:10 -0700519 if (err == -ENOENT) {
520 pr_warn_once("Missing crypto API support for \"%s\"\n",
521 cipher_str);
522 err = -ENOPKG;
523 }
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -0800524 slotp->tfms[mode_num] = NULL;
525 goto out_free_tfms;
526 }
527
528 crypto_skcipher_set_flags(slotp->tfms[mode_num],
529 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
530 }
531
532 /*
533 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
534 * for each i are visible before we set tfms_inited[mode_num].
535 */
536 smp_store_release(&tfms_inited[mode_num], true);
537 goto out;
538
539out_free_tfms:
540 for (i = 0; i < blk_crypto_num_keyslots; i++) {
541 slotp = &blk_crypto_keyslots[i];
542 crypto_free_skcipher(slotp->tfms[mode_num]);
543 slotp->tfms[mode_num] = NULL;
544 }
545out:
546 mutex_unlock(&tfms_init_lock);
547 return err;
548}
549
550int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
551{
552 return keyslot_manager_evict_key(blk_crypto_ksm, key);
553}
554
555int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
556{
557 struct bio *bio = *bio_ptr;
558 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
559 struct bio_fallback_crypt_ctx *f_ctx;
560
Barani Muthukumaranf5ecdc52020-02-06 18:01:20 -0800561 if (bc->bc_key->is_hw_wrapped) {
562 pr_warn_once("HW wrapped key cannot be used with fallback.\n");
563 bio->bi_status = BLK_STS_NOTSUPP;
564 return -EOPNOTSUPP;
565 }
566
Eric Biggersed5d7692020-01-21 09:39:22 -0800567 if (!tfms_inited[bc->bc_key->crypto_mode]) {
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -0800568 bio->bi_status = BLK_STS_IOERR;
569 return -EIO;
570 }
571
572 if (bio_data_dir(bio) == WRITE)
573 return blk_crypto_encrypt_bio(bio_ptr);
574
575 /*
576 * Mark bio as fallback crypted and replace the bio_crypt_ctx with
577 * another one contained in a bio_fallback_crypt_ctx, so that the
578 * fallback has space to store the info it needs for decryption.
579 */
580 bc->bc_ksm = blk_crypto_ksm;
581 f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
582 f_ctx->crypt_ctx = *bc;
583 memcpy(f_ctx->fallback_dun, bc->bc_dun, sizeof(f_ctx->fallback_dun));
584 f_ctx->crypt_iter = bio->bi_iter;
585
586 bio_crypt_free_ctx(bio);
587 bio->bi_crypt_context = &f_ctx->crypt_ctx;
588
589 return 0;
590}
591
592int __init blk_crypto_fallback_init(void)
593{
594 int i;
595 unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
596
597 prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
598
599 /* All blk-crypto modes have a crypto API fallback. */
600 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
601 crypto_mode_supported[i] = 0xFFFFFFFF;
602 crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
603
Eric Biggersa59152c2020-02-13 15:08:24 -0800604 blk_crypto_ksm = keyslot_manager_create(NULL, blk_crypto_num_keyslots,
Satya Tangiralacfd7e6c2019-12-17 14:26:29 -0800605 &blk_crypto_ksm_ll_ops,
606 crypto_mode_supported, NULL);
607 if (!blk_crypto_ksm)
608 return -ENOMEM;
609
610 blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
611 WQ_UNBOUND | WQ_HIGHPRI |
612 WQ_MEM_RECLAIM, num_online_cpus());
613 if (!blk_crypto_wq)
614 return -ENOMEM;
615
616 blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
617 sizeof(blk_crypto_keyslots[0]),
618 GFP_KERNEL);
619 if (!blk_crypto_keyslots)
620 return -ENOMEM;
621
622 blk_crypto_bounce_page_pool =
623 mempool_create_page_pool(num_prealloc_bounce_pg, 0);
624 if (!blk_crypto_bounce_page_pool)
625 return -ENOMEM;
626
627 blk_crypto_decrypt_work_cache = KMEM_CACHE(blk_crypto_decrypt_work,
628 SLAB_RECLAIM_ACCOUNT);
629 if (!blk_crypto_decrypt_work_cache)
630 return -ENOMEM;
631
632 bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
633 if (!bio_fallback_crypt_ctx_cache)
634 return -ENOMEM;
635
636 bio_fallback_crypt_ctx_pool =
637 mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
638 bio_fallback_crypt_ctx_cache);
639 if (!bio_fallback_crypt_ctx_pool)
640 return -ENOMEM;
641
642 return 0;
643}