blob: 8d8e723816d9213d8a0cd5b5dd22fda6cbb398ed [file] [log] [blame]
Ard Biesheuvel6be141e2021-03-23 10:54:38 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2021 Google LLC
4 * Author: Ard Biesheuvel <ardb@google.com>
5 *
6 * This file is the core of the fips140.ko, which carries a number of crypto
7 * algorithms and chaining mode templates that are also built into vmlinux.
8 * This modules performs a load time integrity check, as mandated by FIPS 140,
9 * and replaces registered crypto algorithms that appear on the FIPS 140 list
10 * with ones provided by this module. This meets the FIPS 140 requirements for
11 * a cryptographic software module.
12 */
13
14#define pr_fmt(fmt) "fips140: " fmt
15
16#include <linux/ctype.h>
17#include <linux/module.h>
18#include <crypto/aead.h>
19#include <crypto/aes.h>
20#include <crypto/hash.h>
21#include <crypto/sha.h>
22#include <crypto/skcipher.h>
23#include <crypto/rng.h>
24#include <trace/hooks/fips140.h>
25
26#include "internal.h"
27
28/*
29 * FIPS 140-2 prefers the use of HMAC with a public key over a plain hash.
30 */
31u8 __initdata fips140_integ_hmac_key[] = "The quick brown fox jumps over the lazy dog";
32
33/* this is populated by the build tool */
34u8 __initdata fips140_integ_hmac_digest[SHA256_DIGEST_SIZE];
35
36const u32 __initcall_start_marker __section(".initcalls._start");
37const u32 __initcall_end_marker __section(".initcalls._end");
38
39const u8 __fips140_text_start __section(".text.._start");
40const u8 __fips140_text_end __section(".text.._end");
41
42const u8 __fips140_rodata_start __section(".rodata.._start");
43const u8 __fips140_rodata_end __section(".rodata.._end");
44
45/*
46 * We need this little detour to prevent Clang from detecting out of bounds
47 * accesses to __fips140_text_start and __fips140_rodata_start, which only exist
48 * to delineate the section, and so their sizes are not relevant to us.
49 */
50const u32 *__initcall_start = &__initcall_start_marker;
51
52const u8 *__text_start = &__fips140_text_start;
53const u8 *__rodata_start = &__fips140_rodata_start;
54
55static const char fips140_algorithms[][22] __initconst = {
56 "aes",
57
58 "gcm(aes)",
59
60 "ecb(aes)",
61 "cbc(aes)",
62 "ctr(aes)",
63 "xts(aes)",
64
65 "hmac(sha1)",
66 "hmac(sha224)",
67 "hmac(sha256)",
68 "hmac(sha384)",
69 "hmac(sha512)",
70 "sha1",
71 "sha224",
72 "sha256",
73 "sha384",
74 "sha512",
75
76 "drbg_nopr_ctr_aes256",
77 "drbg_nopr_ctr_aes192",
78 "drbg_nopr_ctr_aes128",
79 "drbg_nopr_hmac_sha512",
80 "drbg_nopr_hmac_sha384",
81 "drbg_nopr_hmac_sha256",
82 "drbg_nopr_hmac_sha1",
83 "drbg_nopr_sha512",
84 "drbg_nopr_sha384",
85 "drbg_nopr_sha256",
86 "drbg_nopr_sha1",
87 "drbg_pr_ctr_aes256",
88 "drbg_pr_ctr_aes192",
89 "drbg_pr_ctr_aes128",
90 "drbg_pr_hmac_sha512",
91 "drbg_pr_hmac_sha384",
92 "drbg_pr_hmac_sha256",
93 "drbg_pr_hmac_sha1",
94 "drbg_pr_sha512",
95 "drbg_pr_sha384",
96 "drbg_pr_sha256",
97 "drbg_pr_sha1",
98};
99
100static bool __init is_fips140_algo(struct crypto_alg *alg)
101{
102 int i;
103
104 /*
105 * All software algorithms are synchronous, hardware algorithms must
106 * be covered by their own FIPS 140 certification.
107 */
108 if (alg->cra_flags & CRYPTO_ALG_ASYNC)
109 return false;
110
111 for (i = 0; i < ARRAY_SIZE(fips140_algorithms); i++)
112 if (!strcmp(alg->cra_name, fips140_algorithms[i]))
113 return true;
114 return false;
115}
116
117static LIST_HEAD(unchecked_fips140_algos);
118
119static void __init unregister_existing_fips140_algos(void)
120{
121 struct crypto_alg *alg, *tmp;
122 LIST_HEAD(remove_list);
123 LIST_HEAD(spawns);
124
125 down_write(&crypto_alg_sem);
126
127 /*
128 * Find all registered algorithms that we care about, and move them to
129 * a private list so that they are no longer exposed via the algo
130 * lookup API. Subsequently, we will unregister them if they are not in
131 * active use. If they are, we cannot simply remove them but we can
132 * adapt them later to use our integrity checked backing code.
133 */
134 list_for_each_entry_safe(alg, tmp, &crypto_alg_list, cra_list) {
135 if (is_fips140_algo(alg)) {
136 if (refcount_read(&alg->cra_refcnt) == 1) {
137 /*
138 * This algorithm is not currently in use, but
139 * there may be template instances holding
140 * references to it via spawns. So let's tear
141 * it down like crypto_unregister_alg() would,
142 * but without releasing the lock, to prevent
143 * races with concurrent TFM allocations.
144 */
145 alg->cra_flags |= CRYPTO_ALG_DEAD;
146 list_move(&alg->cra_list, &remove_list);
147 crypto_remove_spawns(alg, &spawns, NULL);
148 } else {
149 /*
150 * This algorithm is live, i.e., there are TFMs
151 * allocated that rely on it for its crypto
152 * transformations. We will swap these out
153 * later with integrity checked versions.
154 */
Eric Biggers92de5342021-07-08 14:46:42 -0700155 pr_info("found already-live algorithm '%s' ('%s')\n",
156 alg->cra_name, alg->cra_driver_name);
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100157 list_move(&alg->cra_list,
158 &unchecked_fips140_algos);
159 }
160 }
161 }
162
163 /*
164 * We haven't taken a reference to the algorithms on the remove_list,
165 * so technically, we may be competing with a concurrent invocation of
166 * crypto_unregister_alg() here. Fortunately, crypto_unregister_alg()
167 * just gives up with a warning if the algo that is being unregistered
168 * has already disappeared, so this happens to be safe. That does mean
169 * we need to hold on to the lock, to ensure that the algo is either on
170 * the list or it is not, and not in some limbo state.
171 */
172 crypto_remove_final(&remove_list);
173 crypto_remove_final(&spawns);
174
175 up_write(&crypto_alg_sem);
176}
177
178static void __init unapply_text_relocations(void *section, int section_size,
179 const Elf64_Rela *rela, int numrels)
180{
181 while (numrels--) {
182 u32 *place = (u32 *)(section + rela->r_offset);
183
184 BUG_ON(rela->r_offset >= section_size);
185
186 switch (ELF64_R_TYPE(rela->r_info)) {
187#ifdef CONFIG_ARM64
188 case R_AARCH64_JUMP26:
189 case R_AARCH64_CALL26:
190 *place &= ~GENMASK(25, 0);
191 break;
192
193 case R_AARCH64_ADR_PREL_LO21:
194 case R_AARCH64_ADR_PREL_PG_HI21:
195 case R_AARCH64_ADR_PREL_PG_HI21_NC:
196 *place &= ~(GENMASK(30, 29) | GENMASK(23, 5));
197 break;
198
199 case R_AARCH64_ADD_ABS_LO12_NC:
200 case R_AARCH64_LDST8_ABS_LO12_NC:
201 case R_AARCH64_LDST16_ABS_LO12_NC:
202 case R_AARCH64_LDST32_ABS_LO12_NC:
203 case R_AARCH64_LDST64_ABS_LO12_NC:
204 case R_AARCH64_LDST128_ABS_LO12_NC:
205 *place &= ~GENMASK(21, 10);
206 break;
207 default:
208 pr_err("unhandled relocation type %llu\n",
209 ELF64_R_TYPE(rela->r_info));
210 BUG();
211#else
212#error
213#endif
214 }
215 rela++;
216 }
217}
218
219static void __init unapply_rodata_relocations(void *section, int section_size,
220 const Elf64_Rela *rela, int numrels)
221{
222 while (numrels--) {
223 void *place = section + rela->r_offset;
224
225 BUG_ON(rela->r_offset >= section_size);
226
227 switch (ELF64_R_TYPE(rela->r_info)) {
228#ifdef CONFIG_ARM64
229 case R_AARCH64_ABS64:
230 *(u64 *)place = 0;
231 break;
232 default:
233 pr_err("unhandled relocation type %llu\n",
234 ELF64_R_TYPE(rela->r_info));
235 BUG();
236#else
237#error
238#endif
239 }
240 rela++;
241 }
242}
243
244static bool __init check_fips140_module_hmac(void)
245{
246 SHASH_DESC_ON_STACK(desc, dontcare);
247 u8 digest[SHA256_DIGEST_SIZE];
248 void *textcopy, *rodatacopy;
249 int textsize, rodatasize;
250 int err;
251
252 textsize = &__fips140_text_end - &__fips140_text_start;
253 rodatasize = &__fips140_rodata_end - &__fips140_rodata_start;
254
Eric Biggersc799c662021-07-02 16:25:20 -0700255 pr_info("text size : 0x%x\n", textsize);
256 pr_info("rodata size: 0x%x\n", rodatasize);
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100257
258 textcopy = kmalloc(textsize + rodatasize, GFP_KERNEL);
259 if (!textcopy) {
260 pr_err("Failed to allocate memory for copy of .text\n");
261 return false;
262 }
263
264 rodatacopy = textcopy + textsize;
265
266 memcpy(textcopy, __text_start, textsize);
267 memcpy(rodatacopy, __rodata_start, rodatasize);
268
269 // apply the relocations in reverse on the copies of .text and .rodata
270 unapply_text_relocations(textcopy, textsize,
271 __this_module.arch.text_relocations,
272 __this_module.arch.num_text_relocations);
273
274 unapply_rodata_relocations(rodatacopy, rodatasize,
275 __this_module.arch.rodata_relocations,
276 __this_module.arch.num_rodata_relocations);
277
278 kfree(__this_module.arch.text_relocations);
279 kfree(__this_module.arch.rodata_relocations);
280
281 desc->tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
282 if (IS_ERR(desc->tfm)) {
283 pr_err("failed to allocate hmac tfm (%ld)\n", PTR_ERR(desc->tfm));
284 kfree(textcopy);
285 return false;
286 }
287
Eric Biggersc799c662021-07-02 16:25:20 -0700288 pr_info("using '%s' for integrity check\n",
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100289 crypto_shash_driver_name(desc->tfm));
290
291 err = crypto_shash_setkey(desc->tfm, fips140_integ_hmac_key,
292 strlen(fips140_integ_hmac_key)) ?:
293 crypto_shash_init(desc) ?:
294 crypto_shash_update(desc, textcopy, textsize) ?:
295 crypto_shash_finup(desc, rodatacopy, rodatasize, digest);
296
297 crypto_free_shash(desc->tfm);
298 kfree(textcopy);
299
300 if (err) {
301 pr_err("failed to calculate hmac shash (%d)\n", err);
302 return false;
303 }
304
305 if (memcmp(digest, fips140_integ_hmac_digest, sizeof(digest))) {
306 pr_err("provided_digest : %*phN\n", (int)sizeof(digest),
307 fips140_integ_hmac_digest);
308
309 pr_err("calculated digest: %*phN\n", (int)sizeof(digest),
310 digest);
311
312 return false;
313 }
314
315 return true;
316}
317
318static bool __init update_live_fips140_algos(void)
319{
320 struct crypto_alg *alg, *new_alg, *tmp;
321
322 /*
323 * Find all algorithms that we could not unregister the last time
324 * around, due to the fact that they were already in use.
325 */
326 down_write(&crypto_alg_sem);
327 list_for_each_entry_safe(alg, tmp, &unchecked_fips140_algos, cra_list) {
328
329 /*
330 * Take this algo off the list before releasing the lock. This
331 * ensures that a concurrent invocation of
332 * crypto_unregister_alg() observes a consistent state, i.e.,
333 * the algo is still on the list, and crypto_unregister_alg()
334 * will release it, or it is not, and crypto_unregister_alg()
335 * will issue a warning but ignore this condition otherwise.
336 */
337 list_del_init(&alg->cra_list);
338 up_write(&crypto_alg_sem);
339
340 /*
341 * Grab the algo that will replace the live one.
342 * Note that this will instantiate template based instances as
343 * well, as long as their driver name uses the conventional
344 * pattern of "template(algo)". In this case, we are relying on
345 * the fact that the templates carried by this module will
346 * supersede the builtin ones, due to the fact that they were
347 * registered later, and therefore appear first in the linked
348 * list. For example, "hmac(sha1-ce)" constructed using the
349 * builtin hmac template and the builtin SHA1 driver will be
350 * superseded by the integrity checked versions of HMAC and
351 * SHA1-ce carried in this module.
352 *
353 * Note that this takes a reference to the new algorithm which
354 * will never get released. This is intentional: once we copy
355 * the function pointers from the new algo into the old one, we
356 * cannot drop the new algo unless we are sure that the old one
357 * has been released, and this is someting we don't keep track
358 * of at the moment.
359 */
360 new_alg = crypto_alg_mod_lookup(alg->cra_driver_name,
361 alg->cra_flags & CRYPTO_ALG_TYPE_MASK,
362 CRYPTO_ALG_TYPE_MASK | CRYPTO_NOLOAD);
363
364 if (IS_ERR(new_alg)) {
365 pr_crit("Failed to allocate '%s' for updating live algo (%ld)\n",
366 alg->cra_driver_name, PTR_ERR(new_alg));
367 return false;
368 }
369
370 /*
371 * The FIPS module's algorithms are expected to be built from
372 * the same source code as the in-kernel ones so that they are
373 * fully compatible. In general, there's no way to verify full
374 * compatibility at runtime, but we can at least verify that
375 * the algorithm properties match.
376 */
377 if (alg->cra_ctxsize != new_alg->cra_ctxsize ||
378 alg->cra_alignmask != new_alg->cra_alignmask) {
379 pr_crit("Failed to update live algo '%s' due to mismatch:\n"
380 "cra_ctxsize : %u vs %u\n"
381 "cra_alignmask : 0x%x vs 0x%x\n",
382 alg->cra_driver_name,
383 alg->cra_ctxsize, new_alg->cra_ctxsize,
384 alg->cra_alignmask, new_alg->cra_alignmask);
385 return false;
386 }
387
388 /*
389 * Update the name and priority so the algorithm stands out as
390 * one that was updated in order to comply with FIPS140, and
391 * that it is not the preferred version for further use.
392 */
393 strlcat(alg->cra_name, "+orig", CRYPTO_MAX_ALG_NAME);
394 alg->cra_priority = 0;
395
396 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
397 struct aead_alg *old_aead, *new_aead;
398 struct skcipher_alg *old_skcipher, *new_skcipher;
399 struct shash_alg *old_shash, *new_shash;
400 struct rng_alg *old_rng, *new_rng;
401
402 case CRYPTO_ALG_TYPE_CIPHER:
403 alg->cra_u.cipher = new_alg->cra_u.cipher;
404 break;
405
406 case CRYPTO_ALG_TYPE_AEAD:
407 old_aead = container_of(alg, struct aead_alg, base);
408 new_aead = container_of(new_alg, struct aead_alg, base);
409
410 old_aead->setkey = new_aead->setkey;
411 old_aead->setauthsize = new_aead->setauthsize;
412 old_aead->encrypt = new_aead->encrypt;
413 old_aead->decrypt = new_aead->decrypt;
414 old_aead->init = new_aead->init;
415 old_aead->exit = new_aead->exit;
416 break;
417
418 case CRYPTO_ALG_TYPE_SKCIPHER:
419 old_skcipher = container_of(alg, struct skcipher_alg, base);
420 new_skcipher = container_of(new_alg, struct skcipher_alg, base);
421
422 old_skcipher->setkey = new_skcipher->setkey;
423 old_skcipher->encrypt = new_skcipher->encrypt;
424 old_skcipher->decrypt = new_skcipher->decrypt;
425 old_skcipher->init = new_skcipher->init;
426 old_skcipher->exit = new_skcipher->exit;
427 break;
428
429 case CRYPTO_ALG_TYPE_SHASH:
430 old_shash = container_of(alg, struct shash_alg, base);
431 new_shash = container_of(new_alg, struct shash_alg, base);
432
433 old_shash->init = new_shash->init;
434 old_shash->update = new_shash->update;
435 old_shash->final = new_shash->final;
436 old_shash->finup = new_shash->finup;
437 old_shash->digest = new_shash->digest;
438 old_shash->export = new_shash->export;
439 old_shash->import = new_shash->import;
440 old_shash->setkey = new_shash->setkey;
441 old_shash->init_tfm = new_shash->init_tfm;
442 old_shash->exit_tfm = new_shash->exit_tfm;
443 break;
444
445 case CRYPTO_ALG_TYPE_RNG:
446 old_rng = container_of(alg, struct rng_alg, base);
447 new_rng = container_of(new_alg, struct rng_alg, base);
448
449 old_rng->generate = new_rng->generate;
450 old_rng->seed = new_rng->seed;
451 old_rng->set_ent = new_rng->set_ent;
452 break;
453 default:
454 /*
455 * This should never happen: every item on the
456 * fips140_algorithms list should match one of the
457 * cases above, so if we end up here, something is
458 * definitely wrong.
459 */
460 pr_crit("Unexpected type %u for algo %s, giving up ...\n",
461 alg->cra_flags & CRYPTO_ALG_TYPE_MASK,
462 alg->cra_driver_name);
463 return false;
464 }
465
466 /*
467 * Move the algorithm back to the algorithm list, so it is
468 * visible in /proc/crypto et al.
469 */
470 down_write(&crypto_alg_sem);
471 list_add_tail(&alg->cra_list, &crypto_alg_list);
472 }
473 up_write(&crypto_alg_sem);
474
475 return true;
476}
477
478static void fips140_sha256(void *p, const u8 *data, unsigned int len, u8 *out,
479 int *hook_inuse)
480{
481 sha256(data, len, out);
482 *hook_inuse = 1;
483}
484
485static void fips140_aes_expandkey(void *p, struct crypto_aes_ctx *ctx,
486 const u8 *in_key, unsigned int key_len,
487 int *err)
488{
489 *err = aes_expandkey(ctx, in_key, key_len);
490}
491
492static void fips140_aes_encrypt(void *priv, const struct crypto_aes_ctx *ctx,
493 u8 *out, const u8 *in, int *hook_inuse)
494{
495 aes_encrypt(ctx, out, in);
496 *hook_inuse = 1;
497}
498
499static void fips140_aes_decrypt(void *priv, const struct crypto_aes_ctx *ctx,
500 u8 *out, const u8 *in, int *hook_inuse)
501{
502 aes_decrypt(ctx, out, in);
503 *hook_inuse = 1;
504}
505
506static bool update_fips140_library_routines(void)
507{
508 int ret;
509
510 ret = register_trace_android_vh_sha256(fips140_sha256, NULL) ?:
511 register_trace_android_vh_aes_expandkey(fips140_aes_expandkey, NULL) ?:
512 register_trace_android_vh_aes_encrypt(fips140_aes_encrypt, NULL) ?:
513 register_trace_android_vh_aes_decrypt(fips140_aes_decrypt, NULL);
514
515 return ret == 0;
516}
517
518/*
519 * Initialize the FIPS 140 module.
520 *
521 * Note: this routine iterates over the contents of the initcall section, which
522 * consists of an array of function pointers that was emitted by the linker
523 * rather than the compiler. This means that these function pointers lack the
524 * usual CFI stubs that the compiler emits when CFI codegen is enabled. So
525 * let's disable CFI locally when handling the initcall array, to avoid
526 * surpises.
527 */
Eric Biggers091338c2021-07-02 16:25:22 -0700528static int __init __attribute__((__no_sanitize__("cfi")))
529fips140_init(void)
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100530{
531 const u32 *initcall;
532
Eric Biggersc799c662021-07-02 16:25:20 -0700533 pr_info("loading module\n");
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100534
535 unregister_existing_fips140_algos();
536
537 /* iterate over all init routines present in this module and call them */
538 for (initcall = __initcall_start + 1;
539 initcall < &__initcall_end_marker;
540 initcall++) {
541 int (*init)(void) = offset_to_ptr(initcall);
542
543 init();
544 }
545
546 if (!update_live_fips140_algos())
547 goto panic;
548
549 if (!update_fips140_library_routines())
550 goto panic;
551
552 /*
553 * Wait until all tasks have at least been scheduled once and preempted
554 * voluntarily. This ensures that none of the superseded algorithms that
555 * were already in use will still be live.
556 */
557 synchronize_rcu_tasks();
558
559 /* insert self tests here */
560
561 /*
562 * It may seem backward to perform the integrity check last, but this
563 * is intentional: the check itself uses hmac(sha256) which is one of
564 * the algorithms that are replaced with versions from this module, and
565 * the integrity check must use the replacement version.
566 */
567
568 if (!check_fips140_module_hmac()) {
Eric Biggersc799c662021-07-02 16:25:20 -0700569 pr_crit("integrity check failed -- giving up!\n");
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100570 goto panic;
571 }
Eric Biggersc799c662021-07-02 16:25:20 -0700572 pr_info("integrity check passed\n");
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100573
Eric Biggersc799c662021-07-02 16:25:20 -0700574 pr_info("module successfully loaded\n");
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100575 return 0;
576
577panic:
578 panic("FIPS 140 module load failure");
579}
580
581module_init(fips140_init);
582
583MODULE_IMPORT_NS(CRYPTO_INTERNAL);
584MODULE_LICENSE("GPL v2");
585
586/*
587 * Crypto-related helper functions, reproduced here so that they will be
588 * covered by the FIPS 140 integrity check.
589 *
590 * Non-cryptographic helper functions such as memcpy() can be excluded from the
591 * FIPS module, but there is ambiguity about other helper functions like
592 * __crypto_xor() and crypto_inc() which aren't cryptographic by themselves,
593 * but are more closely associated with cryptography than e.g. memcpy(). To
594 * err on the side of caution, we include copies of these in the FIPS module.
595 */
596void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
597{
598 while (len >= 8) {
599 *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
600 dst += 8;
601 src1 += 8;
602 src2 += 8;
603 len -= 8;
604 }
605
606 while (len >= 4) {
607 *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
608 dst += 4;
609 src1 += 4;
610 src2 += 4;
611 len -= 4;
612 }
613
614 while (len >= 2) {
615 *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
616 dst += 2;
617 src1 += 2;
618 src2 += 2;
619 len -= 2;
620 }
621
622 while (len--)
623 *dst++ = *src1++ ^ *src2++;
624}
625
626void crypto_inc(u8 *a, unsigned int size)
627{
628 a += size;
629
630 while (size--)
631 if (++*--a)
632 break;
633}