Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright 2019 Google LLC |
| 4 | */ |
| 5 | |
| 6 | /* |
| 7 | * Refer to Documentation/block/inline-encryption.rst for detailed explanation. |
| 8 | */ |
| 9 | |
| 10 | #define pr_fmt(fmt) "blk-crypto: " fmt |
| 11 | |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 12 | #include <linux/bio.h> |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 13 | #include <linux/blkdev.h> |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 14 | #include <linux/keyslot-manager.h> |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 15 | #include <linux/module.h> |
| 16 | #include <linux/slab.h> |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 17 | |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 18 | #include "blk-crypto-internal.h" |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 19 | |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 20 | const struct blk_crypto_mode blk_crypto_modes[] = { |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 21 | [BLK_ENCRYPTION_MODE_AES_256_XTS] = { |
| 22 | .cipher_str = "xts(aes)", |
| 23 | .keysize = 64, |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 24 | .ivsize = 16, |
| 25 | }, |
| 26 | [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { |
| 27 | .cipher_str = "essiv(cbc(aes),sha256)", |
| 28 | .keysize = 16, |
| 29 | .ivsize = 16, |
| 30 | }, |
| 31 | [BLK_ENCRYPTION_MODE_ADIANTUM] = { |
| 32 | .cipher_str = "adiantum(xchacha12,aes)", |
| 33 | .keysize = 32, |
| 34 | .ivsize = 32, |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 35 | }, |
| 36 | }; |
| 37 | |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 38 | /* |
| 39 | * This number needs to be at least (the number of threads doing IO |
| 40 | * concurrently) * (maximum recursive depth of a bio), so that we don't |
| 41 | * deadlock on crypt_ctx allocations. The default is chosen to be the same |
| 42 | * as the default number of post read contexts in both EXT4 and F2FS. |
| 43 | */ |
| 44 | static int num_prealloc_crypt_ctxs = 128; |
| 45 | |
| 46 | module_param(num_prealloc_crypt_ctxs, int, 0444); |
| 47 | MODULE_PARM_DESC(num_prealloc_crypt_ctxs, |
| 48 | "Number of bio crypto contexts to preallocate"); |
| 49 | |
| 50 | static struct kmem_cache *bio_crypt_ctx_cache; |
| 51 | static mempool_t *bio_crypt_ctx_pool; |
| 52 | |
| 53 | static int __init bio_crypt_ctx_init(void) |
| 54 | { |
| 55 | size_t i; |
| 56 | |
| 57 | bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); |
| 58 | if (!bio_crypt_ctx_cache) |
| 59 | goto out_no_mem; |
| 60 | |
| 61 | bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, |
| 62 | bio_crypt_ctx_cache); |
| 63 | if (!bio_crypt_ctx_pool) |
| 64 | goto out_no_mem; |
| 65 | |
| 66 | /* This is assumed in various places. */ |
| 67 | BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); |
| 68 | |
| 69 | /* Sanity check that no algorithm exceeds the defined limits. */ |
| 70 | for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { |
| 71 | BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE); |
| 72 | BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); |
| 73 | } |
| 74 | |
| 75 | return 0; |
| 76 | out_no_mem: |
| 77 | panic("Failed to allocate mem for bio crypt ctxs\n"); |
| 78 | } |
| 79 | subsys_initcall(bio_crypt_ctx_init); |
| 80 | |
| 81 | void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, |
| 82 | const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) |
| 83 | { |
Eric Biggers | cf785af | 2020-09-15 20:53:15 -0700 | [diff] [blame] | 84 | struct bio_crypt_ctx *bc; |
| 85 | |
| 86 | /* |
| 87 | * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so |
| 88 | * that the mempool_alloc() can't fail. |
| 89 | */ |
| 90 | WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); |
| 91 | |
| 92 | bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 93 | |
| 94 | bc->bc_key = key; |
| 95 | memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); |
| 96 | |
| 97 | bio->bi_crypt_context = bc; |
| 98 | } |
| 99 | EXPORT_SYMBOL_GPL(bio_crypt_set_ctx); |
| 100 | |
| 101 | void __bio_crypt_free_ctx(struct bio *bio) |
| 102 | { |
| 103 | mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); |
| 104 | bio->bi_crypt_context = NULL; |
| 105 | } |
| 106 | |
Eric Biggers | 0756015 | 2020-09-15 20:53:13 -0700 | [diff] [blame] | 107 | int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 108 | { |
| 109 | dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
Eric Biggers | 0756015 | 2020-09-15 20:53:13 -0700 | [diff] [blame] | 110 | if (!dst->bi_crypt_context) |
| 111 | return -ENOMEM; |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 112 | *dst->bi_crypt_context = *src->bi_crypt_context; |
Eric Biggers | 0756015 | 2020-09-15 20:53:13 -0700 | [diff] [blame] | 113 | return 0; |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 114 | } |
| 115 | EXPORT_SYMBOL_GPL(__bio_crypt_clone); |
| 116 | |
| 117 | /* Increments @dun by @inc, treating @dun as a multi-limb integer. */ |
| 118 | void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], |
| 119 | unsigned int inc) |
| 120 | { |
| 121 | int i; |
| 122 | |
| 123 | for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { |
| 124 | dun[i] += inc; |
| 125 | /* |
| 126 | * If the addition in this limb overflowed, then we need to |
| 127 | * carry 1 into the next limb. Else the carry is 0. |
| 128 | */ |
| 129 | if (dun[i] < inc) |
| 130 | inc = 1; |
| 131 | else |
| 132 | inc = 0; |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | void __bio_crypt_advance(struct bio *bio, unsigned int bytes) |
| 137 | { |
| 138 | struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
| 139 | |
| 140 | bio_crypt_dun_increment(bc->bc_dun, |
| 141 | bytes >> bc->bc_key->data_unit_size_bits); |
| 142 | } |
| 143 | |
| 144 | /* |
| 145 | * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to |
| 146 | * @next_dun, treating the DUNs as multi-limb integers. |
| 147 | */ |
| 148 | bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, |
| 149 | unsigned int bytes, |
| 150 | const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) |
| 151 | { |
| 152 | int i; |
| 153 | unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; |
| 154 | |
| 155 | for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { |
| 156 | if (bc->bc_dun[i] + carry != next_dun[i]) |
| 157 | return false; |
| 158 | /* |
| 159 | * If the addition in this limb overflowed, then we need to |
| 160 | * carry 1 into the next limb. Else the carry is 0. |
| 161 | */ |
| 162 | if ((bc->bc_dun[i] + carry) < carry) |
| 163 | carry = 1; |
| 164 | else |
| 165 | carry = 0; |
| 166 | } |
| 167 | |
| 168 | /* If the DUN wrapped through 0, don't treat it as contiguous. */ |
| 169 | return carry == 0; |
| 170 | } |
| 171 | |
| 172 | /* |
| 173 | * Checks that two bio crypt contexts are compatible - i.e. that |
| 174 | * they are mergeable except for data_unit_num continuity. |
| 175 | */ |
| 176 | static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1, |
| 177 | struct bio_crypt_ctx *bc2) |
| 178 | { |
| 179 | if (!bc1) |
| 180 | return !bc2; |
| 181 | |
| 182 | return bc2 && bc1->bc_key == bc2->bc_key; |
| 183 | } |
| 184 | |
| 185 | bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) |
| 186 | { |
| 187 | return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); |
| 188 | } |
| 189 | |
| 190 | /* |
| 191 | * Checks that two bio crypt contexts are compatible, and also |
| 192 | * that their data_unit_nums are continuous (and can hence be merged) |
| 193 | * in the order @bc1 followed by @bc2. |
| 194 | */ |
| 195 | bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, |
| 196 | struct bio_crypt_ctx *bc2) |
| 197 | { |
| 198 | if (!bio_crypt_ctx_compatible(bc1, bc2)) |
| 199 | return false; |
| 200 | |
| 201 | return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun); |
| 202 | } |
| 203 | |
| 204 | /* Check that all I/O segments are data unit aligned. */ |
| 205 | static bool bio_crypt_check_alignment(struct bio *bio) |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 206 | { |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 207 | const unsigned int data_unit_size = |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 208 | bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size; |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 209 | struct bvec_iter iter; |
| 210 | struct bio_vec bv; |
| 211 | |
| 212 | bio_for_each_segment(bv, bio, iter) { |
| 213 | if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 214 | return false; |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 215 | } |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 216 | |
| 217 | return true; |
| 218 | } |
| 219 | |
| 220 | blk_status_t __blk_crypto_init_request(struct request *rq) |
| 221 | { |
| 222 | return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key, |
| 223 | &rq->crypt_keyslot); |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 224 | } |
| 225 | |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 226 | /** |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 227 | * __blk_crypto_free_request - Uninitialize the crypto fields of a request. |
| 228 | * |
| 229 | * @rq: The request whose crypto fields to uninitialize. |
| 230 | * |
| 231 | * Completely uninitializes the crypto fields of a request. If a keyslot has |
| 232 | * been programmed into some inline encryption hardware, that keyslot is |
| 233 | * released. The rq->crypt_ctx is also freed. |
| 234 | */ |
| 235 | void __blk_crypto_free_request(struct request *rq) |
| 236 | { |
| 237 | blk_ksm_put_slot(rq->crypt_keyslot); |
| 238 | mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); |
| 239 | blk_crypto_rq_set_defaults(rq); |
| 240 | } |
| 241 | |
| 242 | /** |
| 243 | * __blk_crypto_bio_prep - Prepare bio for inline encryption |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 244 | * |
| 245 | * @bio_ptr: pointer to original bio pointer |
| 246 | * |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 247 | * If the bio crypt context provided for the bio is supported by the underlying |
| 248 | * device's inline encryption hardware, do nothing. |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 249 | * |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 250 | * Otherwise, try to perform en/decryption for this bio by falling back to the |
| 251 | * kernel crypto API. When the crypto API fallback is used for encryption, |
| 252 | * blk-crypto may choose to split the bio into 2 - the first one that will |
| 253 | * continue to be processed and the second one that will be resubmitted via |
Christoph Hellwig | ed00aab | 2020-07-01 10:59:44 +0200 | [diff] [blame] | 254 | * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 255 | * of the aforementioned "first one", and *bio_ptr will be updated to this |
| 256 | * bounce bio. |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 257 | * |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 258 | * Caller must ensure bio has bio_crypt_ctx. |
| 259 | * |
| 260 | * Return: true on success; false on error (and bio->bi_status will be set |
| 261 | * appropriately, and bio_endio() will have been called so bio |
| 262 | * submission should abort). |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 263 | */ |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 264 | bool __blk_crypto_bio_prep(struct bio **bio_ptr) |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 265 | { |
| 266 | struct bio *bio = *bio_ptr; |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 267 | const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 268 | |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 269 | /* Error if bio has no data. */ |
| 270 | if (WARN_ON_ONCE(!bio_has_data(bio))) { |
| 271 | bio->bi_status = BLK_STS_IOERR; |
| 272 | goto fail; |
| 273 | } |
| 274 | |
| 275 | if (!bio_crypt_check_alignment(bio)) { |
| 276 | bio->bi_status = BLK_STS_IOERR; |
| 277 | goto fail; |
| 278 | } |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 279 | |
| 280 | /* |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 281 | * Success if device supports the encryption context, or if we succeeded |
| 282 | * in falling back to the crypto API. |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 283 | */ |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 284 | if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm, |
| 285 | &bc_key->crypto_cfg)) |
| 286 | return true; |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 287 | |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 288 | if (blk_crypto_fallback_bio_prep(bio_ptr)) |
| 289 | return true; |
| 290 | fail: |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 291 | bio_endio(*bio_ptr); |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 292 | return false; |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 293 | } |
| 294 | |
Eric Biggers | 93f221a | 2020-09-15 20:53:14 -0700 | [diff] [blame] | 295 | int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, |
| 296 | gfp_t gfp_mask) |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 297 | { |
Eric Biggers | 93f221a | 2020-09-15 20:53:14 -0700 | [diff] [blame] | 298 | if (!rq->crypt_ctx) { |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 299 | rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
Eric Biggers | 93f221a | 2020-09-15 20:53:14 -0700 | [diff] [blame] | 300 | if (!rq->crypt_ctx) |
| 301 | return -ENOMEM; |
| 302 | } |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 303 | *rq->crypt_ctx = *bio->bi_crypt_context; |
Eric Biggers | 93f221a | 2020-09-15 20:53:14 -0700 | [diff] [blame] | 304 | return 0; |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 305 | } |
| 306 | |
| 307 | /** |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 308 | * blk_crypto_init_key() - Prepare a key for use with blk-crypto |
| 309 | * @blk_key: Pointer to the blk_crypto_key to initialize. |
Barani Muthukumaran | d739474 | 2020-01-02 11:57:39 -0800 | [diff] [blame] | 310 | * @raw_key: Pointer to the raw key. |
| 311 | * @raw_key_size: Size of raw key. Must be at least the required size for the |
| 312 | * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed |
| 313 | * to be longer than the mode's actual key size, in order to |
Barani Muthukumaran | f5ecdc5 | 2020-02-06 18:01:20 -0800 | [diff] [blame] | 314 | * support inline encryption hardware that accepts wrapped keys. |
| 315 | * @is_hw_wrapped has to be set for such keys) |
| 316 | * @is_hw_wrapped: Denotes @raw_key is wrapped. |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 317 | * @crypto_mode: identifier for the encryption algorithm to use |
Eric Biggers | 7705813 | 2020-05-06 14:15:06 -0700 | [diff] [blame] | 318 | * @dun_bytes: number of bytes that will be used to specify the DUN when this |
| 319 | * key is used |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 320 | * @data_unit_size: the data unit size to use for en/decryption |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 321 | * |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 322 | * Return: 0 on success, -errno on failure. The caller is responsible for |
| 323 | * zeroizing both blk_key and raw_key when done with them. |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 324 | */ |
Barani Muthukumaran | d739474 | 2020-01-02 11:57:39 -0800 | [diff] [blame] | 325 | int blk_crypto_init_key(struct blk_crypto_key *blk_key, |
| 326 | const u8 *raw_key, unsigned int raw_key_size, |
Barani Muthukumaran | f5ecdc5 | 2020-02-06 18:01:20 -0800 | [diff] [blame] | 327 | bool is_hw_wrapped, |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 328 | enum blk_crypto_mode_num crypto_mode, |
Eric Biggers | 7705813 | 2020-05-06 14:15:06 -0700 | [diff] [blame] | 329 | unsigned int dun_bytes, |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 330 | unsigned int data_unit_size) |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 331 | { |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 332 | const struct blk_crypto_mode *mode; |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 333 | |
| 334 | memset(blk_key, 0, sizeof(*blk_key)); |
| 335 | |
| 336 | if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes)) |
| 337 | return -EINVAL; |
| 338 | |
Barani Muthukumaran | d739474 | 2020-01-02 11:57:39 -0800 | [diff] [blame] | 339 | BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE); |
| 340 | |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 341 | mode = &blk_crypto_modes[crypto_mode]; |
Barani Muthukumaran | f5ecdc5 | 2020-02-06 18:01:20 -0800 | [diff] [blame] | 342 | if (is_hw_wrapped) { |
| 343 | if (raw_key_size < mode->keysize || |
| 344 | raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE) |
| 345 | return -EINVAL; |
| 346 | } else { |
| 347 | if (raw_key_size != mode->keysize) |
| 348 | return -EINVAL; |
| 349 | } |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 350 | |
Eric Biggers | 80b1a70 | 2021-08-24 22:59:18 -0700 | [diff] [blame] | 351 | if (dun_bytes == 0 || dun_bytes > mode->ivsize) |
Eric Biggers | 7705813 | 2020-05-06 14:15:06 -0700 | [diff] [blame] | 352 | return -EINVAL; |
| 353 | |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 354 | if (!is_power_of_2(data_unit_size)) |
| 355 | return -EINVAL; |
| 356 | |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 357 | blk_key->crypto_cfg.crypto_mode = crypto_mode; |
| 358 | blk_key->crypto_cfg.dun_bytes = dun_bytes; |
| 359 | blk_key->crypto_cfg.data_unit_size = data_unit_size; |
| 360 | blk_key->crypto_cfg.is_hw_wrapped = is_hw_wrapped; |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 361 | blk_key->data_unit_size_bits = ilog2(data_unit_size); |
Barani Muthukumaran | d739474 | 2020-01-02 11:57:39 -0800 | [diff] [blame] | 362 | blk_key->size = raw_key_size; |
| 363 | memcpy(blk_key->raw, raw_key, raw_key_size); |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 364 | |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 365 | return 0; |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 366 | } |
Eric Biggers | 3f52b66 | 2020-01-22 12:32:33 -0800 | [diff] [blame] | 367 | EXPORT_SYMBOL_GPL(blk_crypto_init_key); |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 368 | |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 369 | /* |
| 370 | * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the |
| 371 | * request queue it's submitted to supports inline crypto, or the |
| 372 | * blk-crypto-fallback is enabled and supports the cfg). |
| 373 | */ |
| 374 | bool blk_crypto_config_supported(struct request_queue *q, |
| 375 | const struct blk_crypto_config *cfg) |
| 376 | { |
Eric Biggers | 9c9596e | 2020-06-26 10:05:38 -0700 | [diff] [blame] | 377 | if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) && |
| 378 | !cfg->is_hw_wrapped) |
| 379 | return true; |
| 380 | return blk_ksm_crypto_cfg_supported(q->ksm, cfg); |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 381 | } |
| 382 | |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 383 | /** |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 384 | * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device |
| 385 | * @key: A key to use on the device |
Eric Biggers | fca1165b | 2020-04-03 12:06:10 -0700 | [diff] [blame] | 386 | * @q: the request queue for the device |
| 387 | * |
| 388 | * Upper layers must call this function to ensure that either the hardware |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 389 | * supports the key's crypto settings, or the crypto API fallback has transforms |
| 390 | * for the needed mode allocated and ready to go. This function may allocate |
| 391 | * an skcipher, and *should not* be called from the data path, since that might |
| 392 | * cause a deadlock |
Eric Biggers | fca1165b | 2020-04-03 12:06:10 -0700 | [diff] [blame] | 393 | * |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 394 | * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and |
| 395 | * blk-crypto-fallback is either disabled or the needed algorithm |
| 396 | * is disabled in the crypto API; or another -errno code. |
Eric Biggers | fca1165b | 2020-04-03 12:06:10 -0700 | [diff] [blame] | 397 | */ |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 398 | int blk_crypto_start_using_key(const struct blk_crypto_key *key, |
| 399 | struct request_queue *q) |
Eric Biggers | fca1165b | 2020-04-03 12:06:10 -0700 | [diff] [blame] | 400 | { |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 401 | if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) |
Eric Biggers | fca1165b | 2020-04-03 12:06:10 -0700 | [diff] [blame] | 402 | return 0; |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 403 | if (key->crypto_cfg.is_hw_wrapped) { |
Eric Biggers | 935b0c4 | 2020-04-03 12:06:11 -0700 | [diff] [blame] | 404 | pr_warn_once("hardware doesn't support wrapped keys\n"); |
| 405 | return -EOPNOTSUPP; |
| 406 | } |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 407 | return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode); |
Eric Biggers | fca1165b | 2020-04-03 12:06:10 -0700 | [diff] [blame] | 408 | } |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 409 | EXPORT_SYMBOL_GPL(blk_crypto_start_using_key); |
Eric Biggers | fca1165b | 2020-04-03 12:06:10 -0700 | [diff] [blame] | 410 | |
| 411 | /** |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 412 | * blk_crypto_evict_key() - Evict a key from any inline encryption hardware |
| 413 | * it may have been programmed into |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 414 | * @q: The request queue who's associated inline encryption hardware this key |
| 415 | * might have been programmed into |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 416 | * @key: The key to evict |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 417 | * |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 418 | * Upper layers (filesystems) must call this function to ensure that a key is |
| 419 | * evicted from any hardware that it might have been programmed into. The key |
| 420 | * must not be in use by any in-flight IO when this function is called. |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 421 | * |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 422 | * Return: 0 on success or if key is not present in the q's ksm, -err on error. |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 423 | */ |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 424 | int blk_crypto_evict_key(struct request_queue *q, |
| 425 | const struct blk_crypto_key *key) |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 426 | { |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 427 | if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) |
| 428 | return blk_ksm_evict_key(q->ksm, key); |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 429 | |
Satya Tangirala | c2b86b7 | 2020-06-16 14:33:37 -0700 | [diff] [blame] | 430 | /* |
| 431 | * If the request queue's associated inline encryption hardware didn't |
| 432 | * have support for the key, then the key might have been programmed |
| 433 | * into the fallback keyslot manager, so try to evict from there. |
| 434 | */ |
Satya Tangirala | cfd7e6c | 2019-12-17 14:26:29 -0800 | [diff] [blame] | 435 | return blk_crypto_fallback_evict_key(key); |
Satya Tangirala | 600e29f | 2019-10-24 14:44:25 -0700 | [diff] [blame] | 436 | } |
Eric Biggers | 3f52b66 | 2020-01-22 12:32:33 -0800 | [diff] [blame] | 437 | EXPORT_SYMBOL_GPL(blk_crypto_evict_key); |