blob: 001abe530a0d456b08ac1664dc10e966b6f82ee4 [file] [log] [blame]
Thomas Gleixnerb4d0d232019-05-20 19:08:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howellsab3c3582013-09-24 10:35:18 +01002/* Large capacity key type
3 *
Jason A. Donenfeld428490e2017-09-20 16:58:39 +02004 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
David Howellsab3c3582013-09-24 10:35:18 +01005 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
6 * Written by David Howells (dhowells@redhat.com)
David Howellsab3c3582013-09-24 10:35:18 +01007 */
8
David Howells7df3e592016-10-26 15:02:01 +01009#define pr_fmt(fmt) "big_key: "fmt
David Howellsab3c3582013-09-24 10:35:18 +010010#include <linux/init.h>
11#include <linux/seq_file.h>
12#include <linux/file.h>
13#include <linux/shmem_fs.h>
14#include <linux/err.h>
Kirill Marinushkin13100a72016-04-12 19:54:58 +010015#include <linux/scatterlist.h>
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020016#include <linux/random.h>
Randy Dunlap514c6032018-04-05 16:25:34 -070017#include <linux/vmalloc.h>
David Howellsab3c3582013-09-24 10:35:18 +010018#include <keys/user-type.h>
19#include <keys/big_key-type.h>
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020020#include <crypto/aead.h>
Tycho Andersena964f392018-04-24 14:26:37 -060021#include <crypto/gcm.h>
David Howellsab3c3582013-09-24 10:35:18 +010022
David Howellsd9f4bb12018-02-22 14:38:34 +000023struct big_key_buf {
24 unsigned int nr_pages;
25 void *virt;
26 struct scatterlist *sg;
27 struct page *pages[];
28};
29
David Howellsab3c3582013-09-24 10:35:18 +010030/*
David Howells146aa8b2015-10-21 14:04:48 +010031 * Layout of key payload words.
32 */
33enum {
34 big_key_data,
35 big_key_path,
36 big_key_path_2nd_part,
37 big_key_len,
38};
39
40/*
Kirill Marinushkin13100a72016-04-12 19:54:58 +010041 * Crypto operation with big_key data
42 */
43enum big_key_op {
44 BIG_KEY_ENC,
45 BIG_KEY_DEC,
46};
47
48/*
David Howellsab3c3582013-09-24 10:35:18 +010049 * If the data is under this limit, there's no point creating a shm file to
50 * hold it as the permanently resident metadata for the shmem fs will be at
51 * least as large as the data.
52 */
53#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
54
55/*
Kirill Marinushkin13100a72016-04-12 19:54:58 +010056 * Key size for big_key data encryption
57 */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020058#define ENC_KEY_SIZE 32
59
60/*
61 * Authentication tag length
62 */
63#define ENC_AUTHTAG_SIZE 16
Kirill Marinushkin13100a72016-04-12 19:54:58 +010064
65/*
David Howellsab3c3582013-09-24 10:35:18 +010066 * big_key defined keys take an arbitrary string as the description and an
67 * arbitrary blob of data as the payload
68 */
69struct key_type key_type_big_key = {
70 .name = "big_key",
David Howells002edaf2014-07-18 18:56:36 +010071 .preparse = big_key_preparse,
72 .free_preparse = big_key_free_preparse,
73 .instantiate = generic_key_instantiate,
David Howellsab3c3582013-09-24 10:35:18 +010074 .revoke = big_key_revoke,
75 .destroy = big_key_destroy,
76 .describe = big_key_describe,
77 .read = big_key_read,
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020078 /* no ->update(); don't add it without changing big_key_crypt() nonce */
David Howellsab3c3582013-09-24 10:35:18 +010079};
80
81/*
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020082 * Crypto names for big_key data authenticated encryption
Kirill Marinushkin13100a72016-04-12 19:54:58 +010083 */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020084static const char big_key_alg_name[] = "gcm(aes)";
Tycho Andersena964f392018-04-24 14:26:37 -060085#define BIG_KEY_IV_SIZE GCM_AES_IV_SIZE
Kirill Marinushkin13100a72016-04-12 19:54:58 +010086
87/*
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020088 * Crypto algorithms for big_key data authenticated encryption
Kirill Marinushkin13100a72016-04-12 19:54:58 +010089 */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020090static struct crypto_aead *big_key_aead;
Kirill Marinushkin13100a72016-04-12 19:54:58 +010091
92/*
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020093 * Since changing the key affects the entire object, we need a mutex.
Kirill Marinushkin13100a72016-04-12 19:54:58 +010094 */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +020095static DEFINE_MUTEX(big_key_aead_lock);
Kirill Marinushkin13100a72016-04-12 19:54:58 +010096
97/*
98 * Encrypt/decrypt big_key data
99 */
David Howellsd9f4bb12018-02-22 14:38:34 +0000100static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key)
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100101{
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200102 int ret;
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200103 struct aead_request *aead_req;
104 /* We always use a zero nonce. The reason we can get away with this is
105 * because we're using a different randomly generated key for every
106 * different encryption. Notably, too, key_type_big_key doesn't define
107 * an .update function, so there's no chance we'll wind up reusing the
108 * key to encrypt updated data. Simply put: one key, one encryption.
109 */
Tycho Andersena964f392018-04-24 14:26:37 -0600110 u8 zero_nonce[BIG_KEY_IV_SIZE];
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100111
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200112 aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
113 if (!aead_req)
114 return -ENOMEM;
115
116 memset(zero_nonce, 0, sizeof(zero_nonce));
David Howellsd9f4bb12018-02-22 14:38:34 +0000117 aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce);
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200118 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
119 aead_request_set_ad(aead_req, 0);
120
121 mutex_lock(&big_key_aead_lock);
122 if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100123 ret = -EAGAIN;
124 goto error;
125 }
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100126 if (op == BIG_KEY_ENC)
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200127 ret = crypto_aead_encrypt(aead_req);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100128 else
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200129 ret = crypto_aead_decrypt(aead_req);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100130error:
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200131 mutex_unlock(&big_key_aead_lock);
132 aead_request_free(aead_req);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100133 return ret;
134}
135
136/*
David Howellsd9f4bb12018-02-22 14:38:34 +0000137 * Free up the buffer.
138 */
139static void big_key_free_buffer(struct big_key_buf *buf)
140{
141 unsigned int i;
142
143 if (buf->virt) {
144 memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
145 vunmap(buf->virt);
146 }
147
148 for (i = 0; i < buf->nr_pages; i++)
149 if (buf->pages[i])
150 __free_page(buf->pages[i]);
151
152 kfree(buf);
153}
154
155/*
156 * Allocate a buffer consisting of a set of pages with a virtual mapping
157 * applied over them.
158 */
159static void *big_key_alloc_buffer(size_t len)
160{
161 struct big_key_buf *buf;
162 unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
163 unsigned int i, l;
164
165 buf = kzalloc(sizeof(struct big_key_buf) +
166 sizeof(struct page) * npg +
167 sizeof(struct scatterlist) * npg,
168 GFP_KERNEL);
169 if (!buf)
170 return NULL;
171
172 buf->nr_pages = npg;
173 buf->sg = (void *)(buf->pages + npg);
174 sg_init_table(buf->sg, npg);
175
176 for (i = 0; i < buf->nr_pages; i++) {
177 buf->pages[i] = alloc_page(GFP_KERNEL);
178 if (!buf->pages[i])
179 goto nomem;
180
181 l = min_t(size_t, len, PAGE_SIZE);
182 sg_set_page(&buf->sg[i], buf->pages[i], l, 0);
183 len -= l;
184 }
185
186 buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL);
187 if (!buf->virt)
188 goto nomem;
189
190 return buf;
191
192nomem:
193 big_key_free_buffer(buf);
194 return NULL;
195}
196
197/*
David Howells002edaf2014-07-18 18:56:36 +0100198 * Preparse a big key
David Howellsab3c3582013-09-24 10:35:18 +0100199 */
David Howells002edaf2014-07-18 18:56:36 +0100200int big_key_preparse(struct key_preparsed_payload *prep)
David Howellsab3c3582013-09-24 10:35:18 +0100201{
David Howellsd9f4bb12018-02-22 14:38:34 +0000202 struct big_key_buf *buf;
David Howells146aa8b2015-10-21 14:04:48 +0100203 struct path *path = (struct path *)&prep->payload.data[big_key_path];
David Howellsab3c3582013-09-24 10:35:18 +0100204 struct file *file;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100205 u8 *enckey;
David Howellsab3c3582013-09-24 10:35:18 +0100206 ssize_t written;
David Howellsd9f4bb12018-02-22 14:38:34 +0000207 size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE;
David Howellsab3c3582013-09-24 10:35:18 +0100208 int ret;
209
David Howellsab3c3582013-09-24 10:35:18 +0100210 if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
David Howellsd9f4bb12018-02-22 14:38:34 +0000211 return -EINVAL;
David Howellsab3c3582013-09-24 10:35:18 +0100212
213 /* Set an arbitrary quota */
David Howells002edaf2014-07-18 18:56:36 +0100214 prep->quotalen = 16;
David Howellsab3c3582013-09-24 10:35:18 +0100215
David Howells146aa8b2015-10-21 14:04:48 +0100216 prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;
David Howellsab3c3582013-09-24 10:35:18 +0100217
218 if (datalen > BIG_KEY_FILE_THRESHOLD) {
219 /* Create a shmem file to store the data in. This will permit the data
220 * to be swapped out if needed.
221 *
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100222 * File content is stored encrypted with randomly generated key.
David Howellsab3c3582013-09-24 10:35:18 +0100223 */
Christoph Hellwige13ec932017-09-01 17:39:14 +0200224 loff_t pos = 0;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100225
David Howellsd9f4bb12018-02-22 14:38:34 +0000226 buf = big_key_alloc_buffer(enclen);
227 if (!buf)
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100228 return -ENOMEM;
David Howellsd9f4bb12018-02-22 14:38:34 +0000229 memcpy(buf->virt, prep->data, datalen);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100230
231 /* generate random key */
232 enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
233 if (!enckey) {
234 ret = -ENOMEM;
David Howells002edaf2014-07-18 18:56:36 +0100235 goto error;
Wei Yongjund2b86972013-10-30 11:23:02 +0800236 }
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200237 ret = get_random_bytes_wait(enckey, ENC_KEY_SIZE);
238 if (unlikely(ret))
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100239 goto err_enckey;
240
241 /* encrypt aligned data */
David Howellsd9f4bb12018-02-22 14:38:34 +0000242 ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100243 if (ret)
244 goto err_enckey;
245
246 /* save aligned data to file */
247 file = shmem_kernel_file_setup("", enclen, 0);
248 if (IS_ERR(file)) {
249 ret = PTR_ERR(file);
250 goto err_enckey;
251 }
252
David Howellsd9f4bb12018-02-22 14:38:34 +0000253 written = kernel_write(file, buf->virt, enclen, &pos);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100254 if (written != enclen) {
David Howells97826c82013-11-13 16:51:06 +0000255 ret = written;
David Howellsab3c3582013-09-24 10:35:18 +0100256 if (written >= 0)
257 ret = -ENOMEM;
258 goto err_fput;
259 }
260
261 /* Pin the mount and dentry to the key so that we can open it again
262 * later
263 */
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100264 prep->payload.data[big_key_data] = enckey;
David Howellsab3c3582013-09-24 10:35:18 +0100265 *path = file->f_path;
266 path_get(path);
267 fput(file);
David Howellsd9f4bb12018-02-22 14:38:34 +0000268 big_key_free_buffer(buf);
David Howellsab3c3582013-09-24 10:35:18 +0100269 } else {
270 /* Just store the data in a buffer */
271 void *data = kmalloc(datalen, GFP_KERNEL);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100272
David Howells002edaf2014-07-18 18:56:36 +0100273 if (!data)
274 return -ENOMEM;
David Howellsab3c3582013-09-24 10:35:18 +0100275
David Howells146aa8b2015-10-21 14:04:48 +0100276 prep->payload.data[big_key_data] = data;
277 memcpy(data, prep->data, prep->datalen);
David Howellsab3c3582013-09-24 10:35:18 +0100278 }
279 return 0;
280
281err_fput:
282 fput(file);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100283err_enckey:
Jason A. Donenfeld91080182017-09-20 16:58:38 +0200284 kzfree(enckey);
David Howellsab3c3582013-09-24 10:35:18 +0100285error:
David Howellsd9f4bb12018-02-22 14:38:34 +0000286 big_key_free_buffer(buf);
David Howellsab3c3582013-09-24 10:35:18 +0100287 return ret;
288}
289
290/*
David Howells002edaf2014-07-18 18:56:36 +0100291 * Clear preparsement.
292 */
293void big_key_free_preparse(struct key_preparsed_payload *prep)
294{
295 if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
David Howells146aa8b2015-10-21 14:04:48 +0100296 struct path *path = (struct path *)&prep->payload.data[big_key_path];
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100297
David Howells002edaf2014-07-18 18:56:36 +0100298 path_put(path);
David Howells002edaf2014-07-18 18:56:36 +0100299 }
Jason A. Donenfeld91080182017-09-20 16:58:38 +0200300 kzfree(prep->payload.data[big_key_data]);
David Howells002edaf2014-07-18 18:56:36 +0100301}
302
303/*
David Howellsab3c3582013-09-24 10:35:18 +0100304 * dispose of the links from a revoked keyring
305 * - called with the key sem write-locked
306 */
307void big_key_revoke(struct key *key)
308{
David Howells146aa8b2015-10-21 14:04:48 +0100309 struct path *path = (struct path *)&key->payload.data[big_key_path];
David Howellsab3c3582013-09-24 10:35:18 +0100310
311 /* clear the quota */
312 key_payload_reserve(key, 0);
David Howells363b02d2017-10-04 16:43:25 +0100313 if (key_is_positive(key) &&
David Howells146aa8b2015-10-21 14:04:48 +0100314 (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
David Howellsab3c3582013-09-24 10:35:18 +0100315 vfs_truncate(path, 0);
316}
317
318/*
319 * dispose of the data dangling from the corpse of a big_key key
320 */
321void big_key_destroy(struct key *key)
322{
David Howells146aa8b2015-10-21 14:04:48 +0100323 size_t datalen = (size_t)key->payload.data[big_key_len];
324
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100325 if (datalen > BIG_KEY_FILE_THRESHOLD) {
David Howells146aa8b2015-10-21 14:04:48 +0100326 struct path *path = (struct path *)&key->payload.data[big_key_path];
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100327
David Howellsab3c3582013-09-24 10:35:18 +0100328 path_put(path);
329 path->mnt = NULL;
330 path->dentry = NULL;
David Howellsab3c3582013-09-24 10:35:18 +0100331 }
Jason A. Donenfeld91080182017-09-20 16:58:38 +0200332 kzfree(key->payload.data[big_key_data]);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100333 key->payload.data[big_key_data] = NULL;
David Howellsab3c3582013-09-24 10:35:18 +0100334}
335
336/*
337 * describe the big_key key
338 */
339void big_key_describe(const struct key *key, struct seq_file *m)
340{
David Howells146aa8b2015-10-21 14:04:48 +0100341 size_t datalen = (size_t)key->payload.data[big_key_len];
David Howellsab3c3582013-09-24 10:35:18 +0100342
343 seq_puts(m, key->description);
344
David Howells363b02d2017-10-04 16:43:25 +0100345 if (key_is_positive(key))
David Howells146aa8b2015-10-21 14:04:48 +0100346 seq_printf(m, ": %zu [%s]",
David Howellsab3c3582013-09-24 10:35:18 +0100347 datalen,
348 datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
349}
350
351/*
352 * read the key data
353 * - the key's semaphore is read-locked
354 */
355long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
356{
David Howells146aa8b2015-10-21 14:04:48 +0100357 size_t datalen = (size_t)key->payload.data[big_key_len];
David Howellsab3c3582013-09-24 10:35:18 +0100358 long ret;
359
360 if (!buffer || buflen < datalen)
361 return datalen;
362
363 if (datalen > BIG_KEY_FILE_THRESHOLD) {
David Howellsd9f4bb12018-02-22 14:38:34 +0000364 struct big_key_buf *buf;
David Howells146aa8b2015-10-21 14:04:48 +0100365 struct path *path = (struct path *)&key->payload.data[big_key_path];
David Howellsab3c3582013-09-24 10:35:18 +0100366 struct file *file;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100367 u8 *enckey = (u8 *)key->payload.data[big_key_data];
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200368 size_t enclen = datalen + ENC_AUTHTAG_SIZE;
Christoph Hellwigbdd1d2d2017-09-01 17:39:13 +0200369 loff_t pos = 0;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100370
David Howellsd9f4bb12018-02-22 14:38:34 +0000371 buf = big_key_alloc_buffer(enclen);
372 if (!buf)
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100373 return -ENOMEM;
David Howellsab3c3582013-09-24 10:35:18 +0100374
375 file = dentry_open(path, O_RDONLY, current_cred());
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100376 if (IS_ERR(file)) {
377 ret = PTR_ERR(file);
378 goto error;
379 }
David Howellsab3c3582013-09-24 10:35:18 +0100380
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100381 /* read file to kernel and decrypt */
David Howellsd9f4bb12018-02-22 14:38:34 +0000382 ret = kernel_read(file, buf->virt, enclen, &pos);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100383 if (ret >= 0 && ret != enclen) {
David Howellsab3c3582013-09-24 10:35:18 +0100384 ret = -EIO;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100385 goto err_fput;
386 }
387
David Howellsd9f4bb12018-02-22 14:38:34 +0000388 ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100389 if (ret)
390 goto err_fput;
391
392 ret = datalen;
393
394 /* copy decrypted data to user */
David Howellsd9f4bb12018-02-22 14:38:34 +0000395 if (copy_to_user(buffer, buf->virt, datalen) != 0)
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100396 ret = -EFAULT;
397
398err_fput:
399 fput(file);
400error:
David Howellsd9f4bb12018-02-22 14:38:34 +0000401 big_key_free_buffer(buf);
David Howellsab3c3582013-09-24 10:35:18 +0100402 } else {
403 ret = datalen;
David Howells146aa8b2015-10-21 14:04:48 +0100404 if (copy_to_user(buffer, key->payload.data[big_key_data],
405 datalen) != 0)
David Howellsab3c3582013-09-24 10:35:18 +0100406 ret = -EFAULT;
407 }
408
409 return ret;
410}
411
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100412/*
413 * Register key type
414 */
David Howellsab3c3582013-09-24 10:35:18 +0100415static int __init big_key_init(void)
416{
David Howells7df3e592016-10-26 15:02:01 +0100417 int ret;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100418
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100419 /* init block cipher */
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200420 big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
421 if (IS_ERR(big_key_aead)) {
422 ret = PTR_ERR(big_key_aead);
David Howells7df3e592016-10-26 15:02:01 +0100423 pr_err("Can't alloc crypto: %d\n", ret);
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200424 return ret;
David Howells7df3e592016-10-26 15:02:01 +0100425 }
Tycho Andersena964f392018-04-24 14:26:37 -0600426
427 if (unlikely(crypto_aead_ivsize(big_key_aead) != BIG_KEY_IV_SIZE)) {
428 WARN(1, "big key algorithm changed?");
429 ret = -EINVAL;
430 goto free_aead;
431 }
432
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200433 ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
434 if (ret < 0) {
435 pr_err("Can't set crypto auth tag len: %d\n", ret);
436 goto free_aead;
437 }
David Howells7df3e592016-10-26 15:02:01 +0100438
439 ret = register_key_type(&key_type_big_key);
440 if (ret < 0) {
441 pr_err("Can't register type: %d\n", ret);
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200442 goto free_aead;
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100443 }
444
445 return 0;
446
Jason A. Donenfeld428490e2017-09-20 16:58:39 +0200447free_aead:
448 crypto_free_aead(big_key_aead);
Kirill Marinushkin13100a72016-04-12 19:54:58 +0100449 return ret;
450}
451
David Howells7df3e592016-10-26 15:02:01 +0100452late_initcall(big_key_init);