blob: f0ed4602e9244bfa54bf5999f9c652c304544ddf [file] [log] [blame]
Ard Biesheuvel6be141e2021-03-23 10:54:38 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2021 Google LLC
4 * Author: Ard Biesheuvel <ardb@google.com>
5 *
6 * This file is the core of the fips140.ko, which carries a number of crypto
7 * algorithms and chaining mode templates that are also built into vmlinux.
8 * This modules performs a load time integrity check, as mandated by FIPS 140,
9 * and replaces registered crypto algorithms that appear on the FIPS 140 list
10 * with ones provided by this module. This meets the FIPS 140 requirements for
11 * a cryptographic software module.
12 */
13
Ard Biesheuvel6be141e2021-03-23 10:54:38 +010014#include <linux/ctype.h>
15#include <linux/module.h>
16#include <crypto/aead.h>
17#include <crypto/aes.h>
18#include <crypto/hash.h>
19#include <crypto/sha.h>
20#include <crypto/skcipher.h>
21#include <crypto/rng.h>
22#include <trace/hooks/fips140.h>
23
Eric Biggersb7397e82021-07-08 14:46:46 -070024#include "fips140-module.h"
Ard Biesheuvel6be141e2021-03-23 10:54:38 +010025#include "internal.h"
26
27/*
Eric Biggersb7397e82021-07-08 14:46:46 -070028 * This option allows deliberately failing the self-tests for a particular
29 * algorithm. This is for FIPS lab testing only.
30 */
31#ifdef CONFIG_CRYPTO_FIPS140_MOD_ERROR_INJECTION
32char *fips140_broken_alg;
33module_param_named(broken_alg, fips140_broken_alg, charp, 0);
34#endif
35
36/*
Ard Biesheuvel6be141e2021-03-23 10:54:38 +010037 * FIPS 140-2 prefers the use of HMAC with a public key over a plain hash.
38 */
39u8 __initdata fips140_integ_hmac_key[] = "The quick brown fox jumps over the lazy dog";
40
41/* this is populated by the build tool */
42u8 __initdata fips140_integ_hmac_digest[SHA256_DIGEST_SIZE];
43
44const u32 __initcall_start_marker __section(".initcalls._start");
45const u32 __initcall_end_marker __section(".initcalls._end");
46
47const u8 __fips140_text_start __section(".text.._start");
48const u8 __fips140_text_end __section(".text.._end");
49
50const u8 __fips140_rodata_start __section(".rodata.._start");
51const u8 __fips140_rodata_end __section(".rodata.._end");
52
53/*
54 * We need this little detour to prevent Clang from detecting out of bounds
55 * accesses to __fips140_text_start and __fips140_rodata_start, which only exist
56 * to delineate the section, and so their sizes are not relevant to us.
57 */
58const u32 *__initcall_start = &__initcall_start_marker;
59
60const u8 *__text_start = &__fips140_text_start;
61const u8 *__rodata_start = &__fips140_rodata_start;
62
Eric Biggersb7397e82021-07-08 14:46:46 -070063/*
64 * The list of the crypto API algorithms (by cra_name) that will be unregistered
65 * by this module, in preparation for the module registering its own
66 * implementation(s) of them. When adding a new algorithm here, make sure to
67 * consider whether it needs a self-test added to fips140_selftests[] as well.
68 */
Eric Biggerse886dd42021-07-08 14:46:45 -070069static const char * const fips140_algorithms[] __initconst = {
Ard Biesheuvel6be141e2021-03-23 10:54:38 +010070 "aes",
71
72 "gcm(aes)",
73
74 "ecb(aes)",
75 "cbc(aes)",
76 "ctr(aes)",
77 "xts(aes)",
78
79 "hmac(sha1)",
80 "hmac(sha224)",
81 "hmac(sha256)",
82 "hmac(sha384)",
83 "hmac(sha512)",
84 "sha1",
85 "sha224",
86 "sha256",
87 "sha384",
88 "sha512",
89
Eric Biggerse886dd42021-07-08 14:46:45 -070090 "stdrng",
Ard Biesheuvel6be141e2021-03-23 10:54:38 +010091};
92
93static bool __init is_fips140_algo(struct crypto_alg *alg)
94{
95 int i;
96
97 /*
98 * All software algorithms are synchronous, hardware algorithms must
99 * be covered by their own FIPS 140 certification.
100 */
101 if (alg->cra_flags & CRYPTO_ALG_ASYNC)
102 return false;
103
104 for (i = 0; i < ARRAY_SIZE(fips140_algorithms); i++)
105 if (!strcmp(alg->cra_name, fips140_algorithms[i]))
106 return true;
107 return false;
108}
109
110static LIST_HEAD(unchecked_fips140_algos);
111
Eric Biggers634445a2021-07-08 14:46:43 -0700112/*
113 * Release a list of algorithms which have been removed from crypto_alg_list.
114 *
115 * Note that even though the list is a private list, we have to hold
116 * crypto_alg_sem while iterating through it because crypto_unregister_alg() may
117 * run concurrently (as we haven't taken a reference to the algorithms on the
118 * list), and crypto_unregister_alg() will remove the algorithm from whichever
119 * list it happens to be on, while holding crypto_alg_sem. That's okay, since
120 * in that case crypto_unregister_alg() will handle the crypto_alg_put().
121 */
122static void fips140_remove_final(struct list_head *list)
123{
124 struct crypto_alg *alg;
125 struct crypto_alg *n;
126
127 /*
128 * We need to take crypto_alg_sem to safely traverse the list (see
129 * comment above), but we have to drop it when doing each
130 * crypto_alg_put() as that may take crypto_alg_sem again.
131 */
132 down_write(&crypto_alg_sem);
133 list_for_each_entry_safe(alg, n, list, cra_list) {
134 list_del_init(&alg->cra_list);
135 up_write(&crypto_alg_sem);
136
137 crypto_alg_put(alg);
138
139 down_write(&crypto_alg_sem);
140 }
141 up_write(&crypto_alg_sem);
142}
143
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100144static void __init unregister_existing_fips140_algos(void)
145{
146 struct crypto_alg *alg, *tmp;
147 LIST_HEAD(remove_list);
148 LIST_HEAD(spawns);
149
150 down_write(&crypto_alg_sem);
151
152 /*
153 * Find all registered algorithms that we care about, and move them to
154 * a private list so that they are no longer exposed via the algo
155 * lookup API. Subsequently, we will unregister them if they are not in
156 * active use. If they are, we cannot simply remove them but we can
157 * adapt them later to use our integrity checked backing code.
158 */
159 list_for_each_entry_safe(alg, tmp, &crypto_alg_list, cra_list) {
160 if (is_fips140_algo(alg)) {
161 if (refcount_read(&alg->cra_refcnt) == 1) {
162 /*
163 * This algorithm is not currently in use, but
164 * there may be template instances holding
165 * references to it via spawns. So let's tear
166 * it down like crypto_unregister_alg() would,
167 * but without releasing the lock, to prevent
168 * races with concurrent TFM allocations.
169 */
170 alg->cra_flags |= CRYPTO_ALG_DEAD;
171 list_move(&alg->cra_list, &remove_list);
172 crypto_remove_spawns(alg, &spawns, NULL);
173 } else {
174 /*
175 * This algorithm is live, i.e., there are TFMs
176 * allocated that rely on it for its crypto
177 * transformations. We will swap these out
178 * later with integrity checked versions.
179 */
Eric Biggers92de5342021-07-08 14:46:42 -0700180 pr_info("found already-live algorithm '%s' ('%s')\n",
181 alg->cra_name, alg->cra_driver_name);
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100182 list_move(&alg->cra_list,
183 &unchecked_fips140_algos);
184 }
185 }
186 }
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100187 up_write(&crypto_alg_sem);
Eric Biggers634445a2021-07-08 14:46:43 -0700188
189 fips140_remove_final(&remove_list);
190 fips140_remove_final(&spawns);
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100191}
192
193static void __init unapply_text_relocations(void *section, int section_size,
194 const Elf64_Rela *rela, int numrels)
195{
196 while (numrels--) {
197 u32 *place = (u32 *)(section + rela->r_offset);
198
199 BUG_ON(rela->r_offset >= section_size);
200
201 switch (ELF64_R_TYPE(rela->r_info)) {
202#ifdef CONFIG_ARM64
203 case R_AARCH64_JUMP26:
204 case R_AARCH64_CALL26:
205 *place &= ~GENMASK(25, 0);
206 break;
207
208 case R_AARCH64_ADR_PREL_LO21:
209 case R_AARCH64_ADR_PREL_PG_HI21:
210 case R_AARCH64_ADR_PREL_PG_HI21_NC:
211 *place &= ~(GENMASK(30, 29) | GENMASK(23, 5));
212 break;
213
214 case R_AARCH64_ADD_ABS_LO12_NC:
215 case R_AARCH64_LDST8_ABS_LO12_NC:
216 case R_AARCH64_LDST16_ABS_LO12_NC:
217 case R_AARCH64_LDST32_ABS_LO12_NC:
218 case R_AARCH64_LDST64_ABS_LO12_NC:
219 case R_AARCH64_LDST128_ABS_LO12_NC:
220 *place &= ~GENMASK(21, 10);
221 break;
222 default:
223 pr_err("unhandled relocation type %llu\n",
224 ELF64_R_TYPE(rela->r_info));
225 BUG();
226#else
227#error
228#endif
229 }
230 rela++;
231 }
232}
233
234static void __init unapply_rodata_relocations(void *section, int section_size,
235 const Elf64_Rela *rela, int numrels)
236{
237 while (numrels--) {
238 void *place = section + rela->r_offset;
239
240 BUG_ON(rela->r_offset >= section_size);
241
242 switch (ELF64_R_TYPE(rela->r_info)) {
243#ifdef CONFIG_ARM64
244 case R_AARCH64_ABS64:
245 *(u64 *)place = 0;
246 break;
247 default:
248 pr_err("unhandled relocation type %llu\n",
249 ELF64_R_TYPE(rela->r_info));
250 BUG();
251#else
252#error
253#endif
254 }
255 rela++;
256 }
257}
258
259static bool __init check_fips140_module_hmac(void)
260{
261 SHASH_DESC_ON_STACK(desc, dontcare);
262 u8 digest[SHA256_DIGEST_SIZE];
263 void *textcopy, *rodatacopy;
264 int textsize, rodatasize;
265 int err;
266
267 textsize = &__fips140_text_end - &__fips140_text_start;
268 rodatasize = &__fips140_rodata_end - &__fips140_rodata_start;
269
Eric Biggersc799c662021-07-02 16:25:20 -0700270 pr_info("text size : 0x%x\n", textsize);
271 pr_info("rodata size: 0x%x\n", rodatasize);
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100272
273 textcopy = kmalloc(textsize + rodatasize, GFP_KERNEL);
274 if (!textcopy) {
275 pr_err("Failed to allocate memory for copy of .text\n");
276 return false;
277 }
278
279 rodatacopy = textcopy + textsize;
280
281 memcpy(textcopy, __text_start, textsize);
282 memcpy(rodatacopy, __rodata_start, rodatasize);
283
284 // apply the relocations in reverse on the copies of .text and .rodata
285 unapply_text_relocations(textcopy, textsize,
286 __this_module.arch.text_relocations,
287 __this_module.arch.num_text_relocations);
288
289 unapply_rodata_relocations(rodatacopy, rodatasize,
290 __this_module.arch.rodata_relocations,
291 __this_module.arch.num_rodata_relocations);
292
293 kfree(__this_module.arch.text_relocations);
294 kfree(__this_module.arch.rodata_relocations);
295
296 desc->tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
297 if (IS_ERR(desc->tfm)) {
298 pr_err("failed to allocate hmac tfm (%ld)\n", PTR_ERR(desc->tfm));
299 kfree(textcopy);
300 return false;
301 }
302
Eric Biggersc799c662021-07-02 16:25:20 -0700303 pr_info("using '%s' for integrity check\n",
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100304 crypto_shash_driver_name(desc->tfm));
305
306 err = crypto_shash_setkey(desc->tfm, fips140_integ_hmac_key,
307 strlen(fips140_integ_hmac_key)) ?:
308 crypto_shash_init(desc) ?:
309 crypto_shash_update(desc, textcopy, textsize) ?:
310 crypto_shash_finup(desc, rodatacopy, rodatasize, digest);
311
312 crypto_free_shash(desc->tfm);
313 kfree(textcopy);
314
315 if (err) {
316 pr_err("failed to calculate hmac shash (%d)\n", err);
317 return false;
318 }
319
320 if (memcmp(digest, fips140_integ_hmac_digest, sizeof(digest))) {
321 pr_err("provided_digest : %*phN\n", (int)sizeof(digest),
322 fips140_integ_hmac_digest);
323
324 pr_err("calculated digest: %*phN\n", (int)sizeof(digest),
325 digest);
326
327 return false;
328 }
329
330 return true;
331}
332
333static bool __init update_live_fips140_algos(void)
334{
335 struct crypto_alg *alg, *new_alg, *tmp;
336
337 /*
338 * Find all algorithms that we could not unregister the last time
339 * around, due to the fact that they were already in use.
340 */
341 down_write(&crypto_alg_sem);
342 list_for_each_entry_safe(alg, tmp, &unchecked_fips140_algos, cra_list) {
343
344 /*
345 * Take this algo off the list before releasing the lock. This
346 * ensures that a concurrent invocation of
347 * crypto_unregister_alg() observes a consistent state, i.e.,
348 * the algo is still on the list, and crypto_unregister_alg()
349 * will release it, or it is not, and crypto_unregister_alg()
350 * will issue a warning but ignore this condition otherwise.
351 */
352 list_del_init(&alg->cra_list);
353 up_write(&crypto_alg_sem);
354
355 /*
356 * Grab the algo that will replace the live one.
357 * Note that this will instantiate template based instances as
358 * well, as long as their driver name uses the conventional
359 * pattern of "template(algo)". In this case, we are relying on
360 * the fact that the templates carried by this module will
361 * supersede the builtin ones, due to the fact that they were
362 * registered later, and therefore appear first in the linked
363 * list. For example, "hmac(sha1-ce)" constructed using the
364 * builtin hmac template and the builtin SHA1 driver will be
365 * superseded by the integrity checked versions of HMAC and
366 * SHA1-ce carried in this module.
367 *
368 * Note that this takes a reference to the new algorithm which
369 * will never get released. This is intentional: once we copy
370 * the function pointers from the new algo into the old one, we
371 * cannot drop the new algo unless we are sure that the old one
372 * has been released, and this is someting we don't keep track
373 * of at the moment.
374 */
375 new_alg = crypto_alg_mod_lookup(alg->cra_driver_name,
376 alg->cra_flags & CRYPTO_ALG_TYPE_MASK,
377 CRYPTO_ALG_TYPE_MASK | CRYPTO_NOLOAD);
378
379 if (IS_ERR(new_alg)) {
380 pr_crit("Failed to allocate '%s' for updating live algo (%ld)\n",
381 alg->cra_driver_name, PTR_ERR(new_alg));
382 return false;
383 }
384
385 /*
386 * The FIPS module's algorithms are expected to be built from
387 * the same source code as the in-kernel ones so that they are
388 * fully compatible. In general, there's no way to verify full
389 * compatibility at runtime, but we can at least verify that
390 * the algorithm properties match.
391 */
392 if (alg->cra_ctxsize != new_alg->cra_ctxsize ||
393 alg->cra_alignmask != new_alg->cra_alignmask) {
394 pr_crit("Failed to update live algo '%s' due to mismatch:\n"
395 "cra_ctxsize : %u vs %u\n"
396 "cra_alignmask : 0x%x vs 0x%x\n",
397 alg->cra_driver_name,
398 alg->cra_ctxsize, new_alg->cra_ctxsize,
399 alg->cra_alignmask, new_alg->cra_alignmask);
400 return false;
401 }
402
403 /*
404 * Update the name and priority so the algorithm stands out as
405 * one that was updated in order to comply with FIPS140, and
406 * that it is not the preferred version for further use.
407 */
408 strlcat(alg->cra_name, "+orig", CRYPTO_MAX_ALG_NAME);
409 alg->cra_priority = 0;
410
411 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
412 struct aead_alg *old_aead, *new_aead;
413 struct skcipher_alg *old_skcipher, *new_skcipher;
414 struct shash_alg *old_shash, *new_shash;
415 struct rng_alg *old_rng, *new_rng;
416
417 case CRYPTO_ALG_TYPE_CIPHER:
418 alg->cra_u.cipher = new_alg->cra_u.cipher;
419 break;
420
421 case CRYPTO_ALG_TYPE_AEAD:
422 old_aead = container_of(alg, struct aead_alg, base);
423 new_aead = container_of(new_alg, struct aead_alg, base);
424
425 old_aead->setkey = new_aead->setkey;
426 old_aead->setauthsize = new_aead->setauthsize;
427 old_aead->encrypt = new_aead->encrypt;
428 old_aead->decrypt = new_aead->decrypt;
429 old_aead->init = new_aead->init;
430 old_aead->exit = new_aead->exit;
431 break;
432
433 case CRYPTO_ALG_TYPE_SKCIPHER:
434 old_skcipher = container_of(alg, struct skcipher_alg, base);
435 new_skcipher = container_of(new_alg, struct skcipher_alg, base);
436
437 old_skcipher->setkey = new_skcipher->setkey;
438 old_skcipher->encrypt = new_skcipher->encrypt;
439 old_skcipher->decrypt = new_skcipher->decrypt;
440 old_skcipher->init = new_skcipher->init;
441 old_skcipher->exit = new_skcipher->exit;
442 break;
443
444 case CRYPTO_ALG_TYPE_SHASH:
445 old_shash = container_of(alg, struct shash_alg, base);
446 new_shash = container_of(new_alg, struct shash_alg, base);
447
448 old_shash->init = new_shash->init;
449 old_shash->update = new_shash->update;
450 old_shash->final = new_shash->final;
451 old_shash->finup = new_shash->finup;
452 old_shash->digest = new_shash->digest;
453 old_shash->export = new_shash->export;
454 old_shash->import = new_shash->import;
455 old_shash->setkey = new_shash->setkey;
456 old_shash->init_tfm = new_shash->init_tfm;
457 old_shash->exit_tfm = new_shash->exit_tfm;
458 break;
459
460 case CRYPTO_ALG_TYPE_RNG:
461 old_rng = container_of(alg, struct rng_alg, base);
462 new_rng = container_of(new_alg, struct rng_alg, base);
463
464 old_rng->generate = new_rng->generate;
465 old_rng->seed = new_rng->seed;
466 old_rng->set_ent = new_rng->set_ent;
467 break;
468 default:
469 /*
470 * This should never happen: every item on the
471 * fips140_algorithms list should match one of the
472 * cases above, so if we end up here, something is
473 * definitely wrong.
474 */
475 pr_crit("Unexpected type %u for algo %s, giving up ...\n",
476 alg->cra_flags & CRYPTO_ALG_TYPE_MASK,
477 alg->cra_driver_name);
478 return false;
479 }
480
481 /*
482 * Move the algorithm back to the algorithm list, so it is
483 * visible in /proc/crypto et al.
484 */
485 down_write(&crypto_alg_sem);
486 list_add_tail(&alg->cra_list, &crypto_alg_list);
487 }
488 up_write(&crypto_alg_sem);
489
490 return true;
491}
492
493static void fips140_sha256(void *p, const u8 *data, unsigned int len, u8 *out,
494 int *hook_inuse)
495{
496 sha256(data, len, out);
497 *hook_inuse = 1;
498}
499
500static void fips140_aes_expandkey(void *p, struct crypto_aes_ctx *ctx,
501 const u8 *in_key, unsigned int key_len,
502 int *err)
503{
504 *err = aes_expandkey(ctx, in_key, key_len);
505}
506
507static void fips140_aes_encrypt(void *priv, const struct crypto_aes_ctx *ctx,
508 u8 *out, const u8 *in, int *hook_inuse)
509{
510 aes_encrypt(ctx, out, in);
511 *hook_inuse = 1;
512}
513
514static void fips140_aes_decrypt(void *priv, const struct crypto_aes_ctx *ctx,
515 u8 *out, const u8 *in, int *hook_inuse)
516{
517 aes_decrypt(ctx, out, in);
518 *hook_inuse = 1;
519}
520
521static bool update_fips140_library_routines(void)
522{
523 int ret;
524
525 ret = register_trace_android_vh_sha256(fips140_sha256, NULL) ?:
526 register_trace_android_vh_aes_expandkey(fips140_aes_expandkey, NULL) ?:
527 register_trace_android_vh_aes_encrypt(fips140_aes_encrypt, NULL) ?:
528 register_trace_android_vh_aes_decrypt(fips140_aes_decrypt, NULL);
529
530 return ret == 0;
531}
532
533/*
534 * Initialize the FIPS 140 module.
535 *
536 * Note: this routine iterates over the contents of the initcall section, which
537 * consists of an array of function pointers that was emitted by the linker
538 * rather than the compiler. This means that these function pointers lack the
539 * usual CFI stubs that the compiler emits when CFI codegen is enabled. So
540 * let's disable CFI locally when handling the initcall array, to avoid
541 * surpises.
542 */
Eric Biggers091338c2021-07-02 16:25:22 -0700543static int __init __attribute__((__no_sanitize__("cfi")))
544fips140_init(void)
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100545{
546 const u32 *initcall;
547
Eric Biggersc799c662021-07-02 16:25:20 -0700548 pr_info("loading module\n");
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100549
550 unregister_existing_fips140_algos();
551
552 /* iterate over all init routines present in this module and call them */
553 for (initcall = __initcall_start + 1;
554 initcall < &__initcall_end_marker;
555 initcall++) {
556 int (*init)(void) = offset_to_ptr(initcall);
Eric Biggers0af06622021-07-08 14:46:43 -0700557 int err = init();
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100558
Eric Biggers0af06622021-07-08 14:46:43 -0700559 /*
560 * ENODEV is expected from initcalls that only register
561 * algorithms that depend on non-present CPU features. Besides
562 * that, errors aren't expected here.
563 */
564 if (err && err != -ENODEV) {
565 pr_err("initcall %ps() failed: %d\n", init, err);
566 goto panic;
567 }
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100568 }
569
570 if (!update_live_fips140_algos())
571 goto panic;
572
573 if (!update_fips140_library_routines())
574 goto panic;
575
576 /*
577 * Wait until all tasks have at least been scheduled once and preempted
578 * voluntarily. This ensures that none of the superseded algorithms that
579 * were already in use will still be live.
580 */
581 synchronize_rcu_tasks();
582
Eric Biggersb7397e82021-07-08 14:46:46 -0700583 if (!fips140_run_selftests())
584 goto panic;
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100585
586 /*
587 * It may seem backward to perform the integrity check last, but this
588 * is intentional: the check itself uses hmac(sha256) which is one of
589 * the algorithms that are replaced with versions from this module, and
Eric Biggersb7397e82021-07-08 14:46:46 -0700590 * the integrity check must use the replacement version. Also, to be
591 * ready for FIPS 140-3, the integrity check algorithm must have already
592 * been self-tested.
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100593 */
594
595 if (!check_fips140_module_hmac()) {
Eric Biggersc799c662021-07-02 16:25:20 -0700596 pr_crit("integrity check failed -- giving up!\n");
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100597 goto panic;
598 }
Eric Biggersc799c662021-07-02 16:25:20 -0700599 pr_info("integrity check passed\n");
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100600
Eric Biggersc799c662021-07-02 16:25:20 -0700601 pr_info("module successfully loaded\n");
Ard Biesheuvel6be141e2021-03-23 10:54:38 +0100602 return 0;
603
604panic:
605 panic("FIPS 140 module load failure");
606}
607
608module_init(fips140_init);
609
610MODULE_IMPORT_NS(CRYPTO_INTERNAL);
611MODULE_LICENSE("GPL v2");
612
613/*
614 * Crypto-related helper functions, reproduced here so that they will be
615 * covered by the FIPS 140 integrity check.
616 *
617 * Non-cryptographic helper functions such as memcpy() can be excluded from the
618 * FIPS module, but there is ambiguity about other helper functions like
619 * __crypto_xor() and crypto_inc() which aren't cryptographic by themselves,
620 * but are more closely associated with cryptography than e.g. memcpy(). To
621 * err on the side of caution, we include copies of these in the FIPS module.
622 */
623void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
624{
625 while (len >= 8) {
626 *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
627 dst += 8;
628 src1 += 8;
629 src2 += 8;
630 len -= 8;
631 }
632
633 while (len >= 4) {
634 *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
635 dst += 4;
636 src1 += 4;
637 src2 += 4;
638 len -= 4;
639 }
640
641 while (len >= 2) {
642 *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
643 dst += 2;
644 src1 += 2;
645 src2 += 2;
646 len -= 2;
647 }
648
649 while (len--)
650 *dst++ = *src1++ ^ *src2++;
651}
652
653void crypto_inc(u8 *a, unsigned int size)
654{
655 a += size;
656
657 while (size--)
658 if (++*--a)
659 break;
660}