blob: fa728f662a6f3e094dccacca44d1ebc099e5a0c6 [file] [log] [blame]
David Howellsab3c3582013-09-24 10:35:18 +01001/* Large capacity key type
2 *
Jason A. Donenfeld428490e2017-09-20 16:58:39 +02003 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
David Howellsab3c3582013-09-24 10:35:18 +01004 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12
David Howells7df3e592016-10-26 15:02:01 +010013#define pr_fmt(fmt) "big_key: "fmt
David Howellsab3c3582013-09-24 10:35:18 +010014#include <linux/init.h>
15#include <linux/seq_file.h>
16#include <linux/file.h>
17#include <linux/shmem_fs.h>
18#include <linux/err.h>
Kirill Marinushkin13100a72016-04-12 19:54:58 +010019#include <linux/scatterlist.h>
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020020#include <linux/random.h>
David Howellsab3c3582013-09-24 10:35:18 +010021#include <keys/user-type.h>
22#include <keys/big_key-type.h>
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020023#include <crypto/aead.h>
David Howellsab3c3582013-09-24 10:35:18 +010024
David Howellsd9f4bb12018-02-22 14:38:34 +000025struct big_key_buf {
26 unsigned int nr_pages;
27 void *virt;
28 struct scatterlist *sg;
29 struct page *pages[];
30};
31
David Howellsab3c3582013-09-24 10:35:18 +010032/*
David Howells146aa8b2015-10-21 14:04:48 +010033 * Layout of key payload words.
34 */
35enum {
36 big_key_data,
37 big_key_path,
38 big_key_path_2nd_part,
39 big_key_len,
40};
41
42/*
Kirill Marinushkin13100a72016-04-12 19:54:58 +010043 * Crypto operation with big_key data
44 */
45enum big_key_op {
46 BIG_KEY_ENC,
47 BIG_KEY_DEC,
48};
49
50/*
David Howellsab3c3582013-09-24 10:35:18 +010051 * If the data is under this limit, there's no point creating a shm file to
52 * hold it as the permanently resident metadata for the shmem fs will be at
53 * least as large as the data.
54 */
55#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
56
57/*
Kirill Marinushkin13100a72016-04-12 19:54:58 +010058 * Key size for big_key data encryption
59 */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020060#define ENC_KEY_SIZE 32
61
62/*
63 * Authentication tag length
64 */
65#define ENC_AUTHTAG_SIZE 16
Kirill Marinushkin13100a72016-04-12 19:54:58 +010066
67/*
David Howellsab3c3582013-09-24 10:35:18 +010068 * big_key defined keys take an arbitrary string as the description and an
69 * arbitrary blob of data as the payload
70 */
71struct key_type key_type_big_key = {
72 .name = "big_key",
David Howells002edaf2014-07-18 18:56:36 +010073 .preparse = big_key_preparse,
74 .free_preparse = big_key_free_preparse,
75 .instantiate = generic_key_instantiate,
David Howellsab3c3582013-09-24 10:35:18 +010076 .revoke = big_key_revoke,
77 .destroy = big_key_destroy,
78 .describe = big_key_describe,
79 .read = big_key_read,
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020080 /* no ->update(); don't add it without changing big_key_crypt() nonce */
David Howellsab3c3582013-09-24 10:35:18 +010081};
82
83/*
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020084 * Crypto names for big_key data authenticated encryption
Kirill Marinushkin13100a72016-04-12 19:54:58 +010085 */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020086static const char big_key_alg_name[] = "gcm(aes)";
Kirill Marinushkin13100a72016-04-12 19:54:58 +010087
88/*
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020089 * Crypto algorithms for big_key data authenticated encryption
Kirill Marinushkin13100a72016-04-12 19:54:58 +010090 */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020091static struct crypto_aead *big_key_aead;
Kirill Marinushkin13100a72016-04-12 19:54:58 +010092
93/*
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020094 * Since changing the key affects the entire object, we need a mutex.
Kirill Marinushkin13100a72016-04-12 19:54:58 +010095 */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020096static DEFINE_MUTEX(big_key_aead_lock);
Kirill Marinushkin13100a72016-04-12 19:54:58 +010097
98/*
99 * Encrypt/decrypt big_key data
100 */
David Howellsd9f4bb12018-02-22 14:38:34 +0000101static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key)
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100102{
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200103 int ret;
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200104 struct aead_request *aead_req;
105 /* We always use a zero nonce. The reason we can get away with this is
106 * because we're using a different randomly generated key for every
107 * different encryption. Notably, too, key_type_big_key doesn't define
108 * an .update function, so there's no chance we'll wind up reusing the
109 * key to encrypt updated data. Simply put: one key, one encryption.
110 */
111 u8 zero_nonce[crypto_aead_ivsize(big_key_aead)];
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100112
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200113 aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
114 if (!aead_req)
115 return -ENOMEM;
116
117 memset(zero_nonce, 0, sizeof(zero_nonce));
David Howellsd9f4bb12018-02-22 14:38:34 +0000118 aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce);
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200119 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
120 aead_request_set_ad(aead_req, 0);
121
122 mutex_lock(&big_key_aead_lock);
123 if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100124 ret = -EAGAIN;
125 goto error;
126 }
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100127 if (op == BIG_KEY_ENC)
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200128 ret = crypto_aead_encrypt(aead_req);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100129 else
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200130 ret = crypto_aead_decrypt(aead_req);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100131error:
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200132 mutex_unlock(&big_key_aead_lock);
133 aead_request_free(aead_req);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100134 return ret;
135}
136
137/*
David Howellsd9f4bb12018-02-22 14:38:34 +0000138 * Free up the buffer.
139 */
140static void big_key_free_buffer(struct big_key_buf *buf)
141{
142 unsigned int i;
143
144 if (buf->virt) {
145 memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
146 vunmap(buf->virt);
147 }
148
149 for (i = 0; i < buf->nr_pages; i++)
150 if (buf->pages[i])
151 __free_page(buf->pages[i]);
152
153 kfree(buf);
154}
155
156/*
157 * Allocate a buffer consisting of a set of pages with a virtual mapping
158 * applied over them.
159 */
160static void *big_key_alloc_buffer(size_t len)
161{
162 struct big_key_buf *buf;
163 unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
164 unsigned int i, l;
165
166 buf = kzalloc(sizeof(struct big_key_buf) +
167 sizeof(struct page) * npg +
168 sizeof(struct scatterlist) * npg,
169 GFP_KERNEL);
170 if (!buf)
171 return NULL;
172
173 buf->nr_pages = npg;
174 buf->sg = (void *)(buf->pages + npg);
175 sg_init_table(buf->sg, npg);
176
177 for (i = 0; i < buf->nr_pages; i++) {
178 buf->pages[i] = alloc_page(GFP_KERNEL);
179 if (!buf->pages[i])
180 goto nomem;
181
182 l = min_t(size_t, len, PAGE_SIZE);
183 sg_set_page(&buf->sg[i], buf->pages[i], l, 0);
184 len -= l;
185 }
186
187 buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL);
188 if (!buf->virt)
189 goto nomem;
190
191 return buf;
192
193nomem:
194 big_key_free_buffer(buf);
195 return NULL;
196}
197
198/*
David Howells002edaf2014-07-18 18:56:36 +0100199 * Preparse a big key
David Howellsab3c3582013-09-24 10:35:18 +0100200 */
David Howells002edaf2014-07-18 18:56:36 +0100201int big_key_preparse(struct key_preparsed_payload *prep)
David Howellsab3c3582013-09-24 10:35:18 +0100202{
David Howellsd9f4bb12018-02-22 14:38:34 +0000203 struct big_key_buf *buf;
David Howells146aa8b2015-10-21 14:04:48 +0100204 struct path *path = (struct path *)&prep->payload.data[big_key_path];
David Howellsab3c3582013-09-24 10:35:18 +0100205 struct file *file;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100206 u8 *enckey;
David Howellsab3c3582013-09-24 10:35:18 +0100207 ssize_t written;
David Howellsd9f4bb12018-02-22 14:38:34 +0000208 size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE;
David Howellsab3c3582013-09-24 10:35:18 +0100209 int ret;
210
David Howellsab3c3582013-09-24 10:35:18 +0100211 if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
David Howellsd9f4bb12018-02-22 14:38:34 +0000212 return -EINVAL;
David Howellsab3c3582013-09-24 10:35:18 +0100213
214 /* Set an arbitrary quota */
David Howells002edaf2014-07-18 18:56:36 +0100215 prep->quotalen = 16;
David Howellsab3c3582013-09-24 10:35:18 +0100216
David Howells146aa8b2015-10-21 14:04:48 +0100217 prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;
David Howellsab3c3582013-09-24 10:35:18 +0100218
219 if (datalen > BIG_KEY_FILE_THRESHOLD) {
220 /* Create a shmem file to store the data in. This will permit the data
221 * to be swapped out if needed.
222 *
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100223 * File content is stored encrypted with randomly generated key.
David Howellsab3c3582013-09-24 10:35:18 +0100224 */
Christoph Hellwige13ec932017-09-01 17:39:14 +0200225 loff_t pos = 0;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100226
David Howellsd9f4bb12018-02-22 14:38:34 +0000227 buf = big_key_alloc_buffer(enclen);
228 if (!buf)
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100229 return -ENOMEM;
David Howellsd9f4bb12018-02-22 14:38:34 +0000230 memcpy(buf->virt, prep->data, datalen);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100231
232 /* generate random key */
233 enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
234 if (!enckey) {
235 ret = -ENOMEM;
David Howells002edaf2014-07-18 18:56:36 +0100236 goto error;
Wei Yongjund2b86972013-10-30 11:23:02 +0800237 }
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200238 ret = get_random_bytes_wait(enckey, ENC_KEY_SIZE);
239 if (unlikely(ret))
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100240 goto err_enckey;
241
242 /* encrypt aligned data */
David Howellsd9f4bb12018-02-22 14:38:34 +0000243 ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100244 if (ret)
245 goto err_enckey;
246
247 /* save aligned data to file */
248 file = shmem_kernel_file_setup("", enclen, 0);
249 if (IS_ERR(file)) {
250 ret = PTR_ERR(file);
251 goto err_enckey;
252 }
253
David Howellsd9f4bb12018-02-22 14:38:34 +0000254 written = kernel_write(file, buf->virt, enclen, &pos);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100255 if (written != enclen) {
David Howells97826c82013-11-13 16:51:06 +0000256 ret = written;
David Howellsab3c3582013-09-24 10:35:18 +0100257 if (written >= 0)
258 ret = -ENOMEM;
259 goto err_fput;
260 }
261
262 /* Pin the mount and dentry to the key so that we can open it again
263 * later
264 */
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100265 prep->payload.data[big_key_data] = enckey;
David Howellsab3c3582013-09-24 10:35:18 +0100266 *path = file->f_path;
267 path_get(path);
268 fput(file);
David Howellsd9f4bb12018-02-22 14:38:34 +0000269 big_key_free_buffer(buf);
David Howellsab3c3582013-09-24 10:35:18 +0100270 } else {
271 /* Just store the data in a buffer */
272 void *data = kmalloc(datalen, GFP_KERNEL);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100273
David Howells002edaf2014-07-18 18:56:36 +0100274 if (!data)
275 return -ENOMEM;
David Howellsab3c3582013-09-24 10:35:18 +0100276
David Howells146aa8b2015-10-21 14:04:48 +0100277 prep->payload.data[big_key_data] = data;
278 memcpy(data, prep->data, prep->datalen);
David Howellsab3c3582013-09-24 10:35:18 +0100279 }
280 return 0;
281
282err_fput:
283 fput(file);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100284err_enckey:
Jason A. Donenfeld91080182017-09-20 16:58:38 +0200285 kzfree(enckey);
David Howellsab3c3582013-09-24 10:35:18 +0100286error:
David Howellsd9f4bb12018-02-22 14:38:34 +0000287 big_key_free_buffer(buf);
David Howellsab3c3582013-09-24 10:35:18 +0100288 return ret;
289}
290
291/*
David Howells002edaf2014-07-18 18:56:36 +0100292 * Clear preparsement.
293 */
294void big_key_free_preparse(struct key_preparsed_payload *prep)
295{
296 if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
David Howells146aa8b2015-10-21 14:04:48 +0100297 struct path *path = (struct path *)&prep->payload.data[big_key_path];
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100298
David Howells002edaf2014-07-18 18:56:36 +0100299 path_put(path);
David Howells002edaf2014-07-18 18:56:36 +0100300 }
Jason A. Donenfeld91080182017-09-20 16:58:38 +0200301 kzfree(prep->payload.data[big_key_data]);
David Howells002edaf2014-07-18 18:56:36 +0100302}
303
304/*
David Howellsab3c3582013-09-24 10:35:18 +0100305 * dispose of the links from a revoked keyring
306 * - called with the key sem write-locked
307 */
308void big_key_revoke(struct key *key)
309{
David Howells146aa8b2015-10-21 14:04:48 +0100310 struct path *path = (struct path *)&key->payload.data[big_key_path];
David Howellsab3c3582013-09-24 10:35:18 +0100311
312 /* clear the quota */
313 key_payload_reserve(key, 0);
David Howells363b02d2017-10-04 16:43:25 +0100314 if (key_is_positive(key) &&
David Howells146aa8b2015-10-21 14:04:48 +0100315 (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
David Howellsab3c3582013-09-24 10:35:18 +0100316 vfs_truncate(path, 0);
317}
318
319/*
320 * dispose of the data dangling from the corpse of a big_key key
321 */
322void big_key_destroy(struct key *key)
323{
David Howells146aa8b2015-10-21 14:04:48 +0100324 size_t datalen = (size_t)key->payload.data[big_key_len];
325
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100326 if (datalen > BIG_KEY_FILE_THRESHOLD) {
David Howells146aa8b2015-10-21 14:04:48 +0100327 struct path *path = (struct path *)&key->payload.data[big_key_path];
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100328
David Howellsab3c3582013-09-24 10:35:18 +0100329 path_put(path);
330 path->mnt = NULL;
331 path->dentry = NULL;
David Howellsab3c3582013-09-24 10:35:18 +0100332 }
Jason A. Donenfeld91080182017-09-20 16:58:38 +0200333 kzfree(key->payload.data[big_key_data]);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100334 key->payload.data[big_key_data] = NULL;
David Howellsab3c3582013-09-24 10:35:18 +0100335}
336
337/*
338 * describe the big_key key
339 */
340void big_key_describe(const struct key *key, struct seq_file *m)
341{
David Howells146aa8b2015-10-21 14:04:48 +0100342 size_t datalen = (size_t)key->payload.data[big_key_len];
David Howellsab3c3582013-09-24 10:35:18 +0100343
344 seq_puts(m, key->description);
345
David Howells363b02d2017-10-04 16:43:25 +0100346 if (key_is_positive(key))
David Howells146aa8b2015-10-21 14:04:48 +0100347 seq_printf(m, ": %zu [%s]",
David Howellsab3c3582013-09-24 10:35:18 +0100348 datalen,
349 datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
350}
351
352/*
353 * read the key data
354 * - the key's semaphore is read-locked
355 */
356long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
357{
David Howells146aa8b2015-10-21 14:04:48 +0100358 size_t datalen = (size_t)key->payload.data[big_key_len];
David Howellsab3c3582013-09-24 10:35:18 +0100359 long ret;
360
361 if (!buffer || buflen < datalen)
362 return datalen;
363
364 if (datalen > BIG_KEY_FILE_THRESHOLD) {
David Howellsd9f4bb12018-02-22 14:38:34 +0000365 struct big_key_buf *buf;
David Howells146aa8b2015-10-21 14:04:48 +0100366 struct path *path = (struct path *)&key->payload.data[big_key_path];
David Howellsab3c3582013-09-24 10:35:18 +0100367 struct file *file;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100368 u8 *enckey = (u8 *)key->payload.data[big_key_data];
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200369 size_t enclen = datalen + ENC_AUTHTAG_SIZE;
Christoph Hellwigbdd1d2d2017-09-01 17:39:13 +0200370 loff_t pos = 0;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100371
David Howellsd9f4bb12018-02-22 14:38:34 +0000372 buf = big_key_alloc_buffer(enclen);
373 if (!buf)
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100374 return -ENOMEM;
David Howellsab3c3582013-09-24 10:35:18 +0100375
376 file = dentry_open(path, O_RDONLY, current_cred());
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100377 if (IS_ERR(file)) {
378 ret = PTR_ERR(file);
379 goto error;
380 }
David Howellsab3c3582013-09-24 10:35:18 +0100381
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100382 /* read file to kernel and decrypt */
David Howellsd9f4bb12018-02-22 14:38:34 +0000383 ret = kernel_read(file, buf->virt, enclen, &pos);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100384 if (ret >= 0 && ret != enclen) {
David Howellsab3c3582013-09-24 10:35:18 +0100385 ret = -EIO;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100386 goto err_fput;
387 }
388
David Howellsd9f4bb12018-02-22 14:38:34 +0000389 ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100390 if (ret)
391 goto err_fput;
392
393 ret = datalen;
394
395 /* copy decrypted data to user */
David Howellsd9f4bb12018-02-22 14:38:34 +0000396 if (copy_to_user(buffer, buf->virt, datalen) != 0)
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100397 ret = -EFAULT;
398
399err_fput:
400 fput(file);
401error:
David Howellsd9f4bb12018-02-22 14:38:34 +0000402 big_key_free_buffer(buf);
David Howellsab3c3582013-09-24 10:35:18 +0100403 } else {
404 ret = datalen;
David Howells146aa8b2015-10-21 14:04:48 +0100405 if (copy_to_user(buffer, key->payload.data[big_key_data],
406 datalen) != 0)
David Howellsab3c3582013-09-24 10:35:18 +0100407 ret = -EFAULT;
408 }
409
410 return ret;
411}
412
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100413/*
414 * Register key type
415 */
David Howellsab3c3582013-09-24 10:35:18 +0100416static int __init big_key_init(void)
417{
David Howells7df3e592016-10-26 15:02:01 +0100418 int ret;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100419
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100420 /* init block cipher */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200421 big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
422 if (IS_ERR(big_key_aead)) {
423 ret = PTR_ERR(big_key_aead);
David Howells7df3e592016-10-26 15:02:01 +0100424 pr_err("Can't alloc crypto: %d\n", ret);
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200425 return ret;
David Howells7df3e592016-10-26 15:02:01 +0100426 }
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200427 ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
428 if (ret < 0) {
429 pr_err("Can't set crypto auth tag len: %d\n", ret);
430 goto free_aead;
431 }
David Howells7df3e592016-10-26 15:02:01 +0100432
433 ret = register_key_type(&key_type_big_key);
434 if (ret < 0) {
435 pr_err("Can't register type: %d\n", ret);
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200436 goto free_aead;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100437 }
438
439 return 0;
440
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200441free_aead:
442 crypto_free_aead(big_key_aead);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100443 return ret;
444}
445
David Howells7df3e592016-10-26 15:02:01 +0100446late_initcall(big_key_init);