blob: ea95d72acb75269c792b1d3f661b63bc1c5d2dd7 [file] [log] [blame]
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
14 * Jaegeuk Kim, 2015.
15 *
16 * This has not yet undergone a rigorous security audit.
17 *
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
20 */
21
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070022#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070027#include <linux/dcache.h>
Jaegeuk Kim03a8bb02016-04-12 16:05:36 -070028#include <linux/namei.h>
Daniel Walterb7e7cf72017-06-19 09:27:58 +020029#include <crypto/aes.h>
Eric Biggersa5757842018-01-05 10:45:00 -080030#include <crypto/skcipher.h>
Theodore Ts'occ4e0df2016-11-26 22:05:18 -050031#include "fscrypt_private.h"
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070032
33static unsigned int num_prealloc_crypto_pages = 32;
34static unsigned int num_prealloc_crypto_ctxs = 128;
35
36module_param(num_prealloc_crypto_pages, uint, 0444);
37MODULE_PARM_DESC(num_prealloc_crypto_pages,
38 "Number of crypto pages to preallocate");
39module_param(num_prealloc_crypto_ctxs, uint, 0444);
40MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
41 "Number of crypto contexts to preallocate");
42
43static mempool_t *fscrypt_bounce_page_pool = NULL;
44
45static LIST_HEAD(fscrypt_free_ctxs);
46static DEFINE_SPINLOCK(fscrypt_ctx_lock);
47
Richard Weinberger58ae7462016-12-19 12:25:32 +010048struct workqueue_struct *fscrypt_read_workqueue;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070049static DEFINE_MUTEX(fscrypt_init_mutex);
50
51static struct kmem_cache *fscrypt_ctx_cachep;
52struct kmem_cache *fscrypt_info_cachep;
53
54/**
55 * fscrypt_release_ctx() - Releases an encryption context
56 * @ctx: The encryption context to release.
57 *
58 * If the encryption context was allocated from the pre-allocated pool, returns
59 * it to that pool. Else, frees it.
60 *
61 * If there's a bounce page in the context, this frees that.
62 */
63void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
64{
65 unsigned long flags;
66
David Gstir6a34e4d2016-12-06 23:53:58 +010067 if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070068 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
69 ctx->w.bounce_page = NULL;
70 }
71 ctx->w.control_page = NULL;
72 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
73 kmem_cache_free(fscrypt_ctx_cachep, ctx);
74 } else {
75 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
76 list_add(&ctx->free_list, &fscrypt_free_ctxs);
77 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
78 }
79}
80EXPORT_SYMBOL(fscrypt_release_ctx);
81
82/**
83 * fscrypt_get_ctx() - Gets an encryption context
84 * @inode: The inode for which we are doing the crypto
Jaegeuk Kimb32e44822016-04-11 15:51:57 -070085 * @gfp_flags: The gfp flag for memory allocation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070086 *
87 * Allocates and initializes an encryption context.
88 *
89 * Return: An allocated and initialized encryption context on success; error
90 * value or NULL otherwise.
91 */
David Gstir0b93e1b2016-11-13 22:20:47 +010092struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070093{
94 struct fscrypt_ctx *ctx = NULL;
95 struct fscrypt_info *ci = inode->i_crypt_info;
96 unsigned long flags;
97
98 if (ci == NULL)
99 return ERR_PTR(-ENOKEY);
100
101 /*
102 * We first try getting the ctx from a free list because in
103 * the common case the ctx will have an allocated and
104 * initialized crypto tfm, so it's probably a worthwhile
105 * optimization. For the bounce page, we first try getting it
106 * from the kernel allocator because that's just about as fast
107 * as getting it from a list and because a cache of free pages
108 * should generally be a "last resort" option for a filesystem
109 * to be able to do its job.
110 */
111 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
112 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
113 struct fscrypt_ctx, free_list);
114 if (ctx)
115 list_del(&ctx->free_list);
116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
117 if (!ctx) {
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700119 if (!ctx)
120 return ERR_PTR(-ENOMEM);
121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
122 } else {
123 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
124 }
David Gstir6a34e4d2016-12-06 23:53:58 +0100125 ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700126 return ctx;
127}
128EXPORT_SYMBOL(fscrypt_get_ctx);
129
Richard Weinberger58ae7462016-12-19 12:25:32 +0100130int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
131 u64 lblk_num, struct page *src_page,
132 struct page *dest_page, unsigned int len,
133 unsigned int offs, gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700134{
Eric Biggersfb445432016-10-12 23:30:16 -0400135 struct {
136 __le64 index;
Daniel Walterb7e7cf72017-06-19 09:27:58 +0200137 u8 padding[FS_IV_SIZE - sizeof(__le64)];
138 } iv;
Linus Torvaldsd4075742016-03-21 11:03:02 -0700139 struct skcipher_request *req = NULL;
Gilad Ben-Yossefd0082e12017-10-18 08:00:44 +0100140 DECLARE_CRYPTO_WAIT(wait);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700141 struct scatterlist dst, src;
142 struct fscrypt_info *ci = inode->i_crypt_info;
Linus Torvaldsd4075742016-03-21 11:03:02 -0700143 struct crypto_skcipher *tfm = ci->ci_ctfm;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700144 int res = 0;
145
David Gstir14004512016-12-06 23:53:55 +0100146 BUG_ON(len == 0);
147
Daniel Walterb7e7cf72017-06-19 09:27:58 +0200148 BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
149 BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
150 iv.index = cpu_to_le64(lblk_num);
151 memset(iv.padding, 0, sizeof(iv.padding));
152
153 if (ci->ci_essiv_tfm != NULL) {
154 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
155 (u8 *)&iv);
156 }
157
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700158 req = skcipher_request_alloc(tfm, gfp_flags);
Eric Biggersc90fd7752018-04-30 15:51:38 -0700159 if (!req)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700160 return -ENOMEM;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700161
Linus Torvaldsd4075742016-03-21 11:03:02 -0700162 skcipher_request_set_callback(
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700163 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
Gilad Ben-Yossefd0082e12017-10-18 08:00:44 +0100164 crypto_req_done, &wait);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700165
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700166 sg_init_table(&dst, 1);
David Gstir14004512016-12-06 23:53:55 +0100167 sg_set_page(&dst, dest_page, len, offs);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700168 sg_init_table(&src, 1);
David Gstir14004512016-12-06 23:53:55 +0100169 sg_set_page(&src, src_page, len, offs);
Daniel Walterb7e7cf72017-06-19 09:27:58 +0200170 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700171 if (rw == FS_DECRYPT)
Gilad Ben-Yossefd0082e12017-10-18 08:00:44 +0100172 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700173 else
Gilad Ben-Yossefd0082e12017-10-18 08:00:44 +0100174 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
Linus Torvaldsd4075742016-03-21 11:03:02 -0700175 skcipher_request_free(req);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700176 if (res) {
177 printk_ratelimited(KERN_ERR
Linus Torvaldsd4075742016-03-21 11:03:02 -0700178 "%s: crypto_skcipher_encrypt() returned %d\n",
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700179 __func__, res);
180 return res;
181 }
182 return 0;
183}
184
Richard Weinberger58ae7462016-12-19 12:25:32 +0100185struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
186 gfp_t gfp_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700187{
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700188 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700189 if (ctx->w.bounce_page == NULL)
190 return ERR_PTR(-ENOMEM);
David Gstir6a34e4d2016-12-06 23:53:58 +0100191 ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700192 return ctx->w.bounce_page;
193}
194
195/**
196 * fscypt_encrypt_page() - Encrypts a page
David Gstir14004512016-12-06 23:53:55 +0100197 * @inode: The inode for which the encryption should take place
198 * @page: The page to encrypt. Must be locked for bounce-page
199 * encryption.
200 * @len: Length of data to encrypt in @page and encrypted
201 * data in returned page.
202 * @offs: Offset of data within @page and returned
203 * page holding encrypted data.
204 * @lblk_num: Logical block number. This must be unique for multiple
205 * calls with same inode, except when overwriting
206 * previously written data.
207 * @gfp_flags: The gfp flag for memory allocation
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700208 *
David Gstir14004512016-12-06 23:53:55 +0100209 * Encrypts @page using the ctx encryption context. Performs encryption
210 * either in-place or into a newly allocated bounce page.
211 * Called on the page write path.
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700212 *
David Gstir14004512016-12-06 23:53:55 +0100213 * Bounce page allocation is the default.
214 * In this case, the contents of @page are encrypted and stored in an
215 * allocated bounce page. @page has to be locked and the caller must call
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700216 * fscrypt_restore_control_page() on the returned ciphertext page to
217 * release the bounce buffer and the encryption context.
218 *
David Gstirbd7b8292016-12-06 23:53:56 +0100219 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
David Gstir14004512016-12-06 23:53:55 +0100220 * fscrypt_operations. Here, the input-page is returned with its content
221 * encrypted.
222 *
223 * Return: A page with the encrypted content on success. Else, an
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700224 * error value or NULL.
225 */
David Gstir0b93e1b2016-11-13 22:20:47 +0100226struct page *fscrypt_encrypt_page(const struct inode *inode,
David Gstir14004512016-12-06 23:53:55 +0100227 struct page *page,
228 unsigned int len,
229 unsigned int offs,
230 u64 lblk_num, gfp_t gfp_flags)
David Gstir7821d4d2016-11-13 22:20:46 +0100231
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700232{
233 struct fscrypt_ctx *ctx;
David Gstir14004512016-12-06 23:53:55 +0100234 struct page *ciphertext_page = page;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700235 int err;
236
David Gstir14004512016-12-06 23:53:55 +0100237 BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700238
David Gstirbd7b8292016-12-06 23:53:56 +0100239 if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
David Gstir9e532772016-12-06 23:53:54 +0100240 /* with inplace-encryption we just encrypt the page */
Richard Weinberger58ae7462016-12-19 12:25:32 +0100241 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
242 ciphertext_page, len, offs,
243 gfp_flags);
David Gstir9e532772016-12-06 23:53:54 +0100244 if (err)
245 return ERR_PTR(err);
246
247 return ciphertext_page;
248 }
249
David Gstirbd7b8292016-12-06 23:53:56 +0100250 BUG_ON(!PageLocked(page));
251
Jaegeuk Kimb32e44822016-04-11 15:51:57 -0700252 ctx = fscrypt_get_ctx(inode, gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700253 if (IS_ERR(ctx))
254 return (struct page *)ctx;
255
David Gstir9e532772016-12-06 23:53:54 +0100256 /* The encryption operation will require a bounce page. */
Richard Weinberger58ae7462016-12-19 12:25:32 +0100257 ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
David Gstir9e532772016-12-06 23:53:54 +0100258 if (IS_ERR(ciphertext_page))
259 goto errout;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700260
David Gstir14004512016-12-06 23:53:55 +0100261 ctx->w.control_page = page;
Richard Weinberger58ae7462016-12-19 12:25:32 +0100262 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
263 page, ciphertext_page, len, offs,
264 gfp_flags);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700265 if (err) {
266 ciphertext_page = ERR_PTR(err);
267 goto errout;
268 }
David Gstir9e532772016-12-06 23:53:54 +0100269 SetPagePrivate(ciphertext_page);
270 set_page_private(ciphertext_page, (unsigned long)ctx);
271 lock_page(ciphertext_page);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700272 return ciphertext_page;
273
274errout:
275 fscrypt_release_ctx(ctx);
276 return ciphertext_page;
277}
278EXPORT_SYMBOL(fscrypt_encrypt_page);
279
280/**
David Gstir7821d4d2016-11-13 22:20:46 +0100281 * fscrypt_decrypt_page() - Decrypts a page in-place
David Gstir14004512016-12-06 23:53:55 +0100282 * @inode: The corresponding inode for the page to decrypt.
283 * @page: The page to decrypt. Must be locked in case
David Gstirbd7b8292016-12-06 23:53:56 +0100284 * it is a writeback page (FS_CFLG_OWN_PAGES unset).
David Gstir14004512016-12-06 23:53:55 +0100285 * @len: Number of bytes in @page to be decrypted.
286 * @offs: Start of data in @page.
287 * @lblk_num: Logical block number.
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700288 *
289 * Decrypts page in-place using the ctx encryption context.
290 *
291 * Called from the read completion callback.
292 *
293 * Return: Zero on success, non-zero otherwise.
294 */
David Gstir0b93e1b2016-11-13 22:20:47 +0100295int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
David Gstir14004512016-12-06 23:53:55 +0100296 unsigned int len, unsigned int offs, u64 lblk_num)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700297{
David Gstirbd7b8292016-12-06 23:53:56 +0100298 if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
299 BUG_ON(!PageLocked(page));
300
Richard Weinberger58ae7462016-12-19 12:25:32 +0100301 return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
302 len, offs, GFP_NOFS);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700303}
304EXPORT_SYMBOL(fscrypt_decrypt_page);
305
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700306/*
307 * Validate dentries for encrypted directories to make sure we aren't
308 * potentially caching stale data after a key has been added or
309 * removed.
310 */
311static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
312{
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700313 struct dentry *dir;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700314 int dir_has_key, cached_with_key;
315
Jaegeuk Kim03a8bb02016-04-12 16:05:36 -0700316 if (flags & LOOKUP_RCU)
317 return -ECHILD;
318
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700319 dir = dget_parent(dentry);
Eric Biggerse0428a22017-10-09 12:15:36 -0700320 if (!IS_ENCRYPTED(d_inode(dir))) {
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700321 dput(dir);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700322 return 0;
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700323 }
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700324
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700325 /* this should eventually be an flag in d_flags */
326 spin_lock(&dentry->d_lock);
327 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
328 spin_unlock(&dentry->d_lock);
Eric Biggers1b53cf92017-02-21 15:07:11 -0800329 dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
Jaegeuk Kimd7d75352016-04-11 15:10:11 -0700330 dput(dir);
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700331
332 /*
333 * If the dentry was cached without the key, and it is a
334 * negative dentry, it might be a valid name. We can't check
335 * if the key has since been made available due to locking
336 * reasons, so we fail the validation so ext4_lookup() can do
337 * this check.
338 *
339 * We also fail the validation if the dentry was created with
340 * the key present, but we no longer have the key, or vice versa.
341 */
342 if ((!cached_with_key && d_is_negative(dentry)) ||
343 (!cached_with_key && dir_has_key) ||
344 (cached_with_key && !dir_has_key))
345 return 0;
346 return 1;
347}
348
349const struct dentry_operations fscrypt_d_ops = {
350 .d_revalidate = fscrypt_d_revalidate,
351};
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700352
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700353void fscrypt_restore_control_page(struct page *page)
354{
355 struct fscrypt_ctx *ctx;
356
357 ctx = (struct fscrypt_ctx *)page_private(page);
358 set_page_private(page, (unsigned long)NULL);
359 ClearPagePrivate(page);
360 unlock_page(page);
361 fscrypt_release_ctx(ctx);
362}
363EXPORT_SYMBOL(fscrypt_restore_control_page);
364
365static void fscrypt_destroy(void)
366{
367 struct fscrypt_ctx *pos, *n;
368
369 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
370 kmem_cache_free(fscrypt_ctx_cachep, pos);
371 INIT_LIST_HEAD(&fscrypt_free_ctxs);
372 mempool_destroy(fscrypt_bounce_page_pool);
373 fscrypt_bounce_page_pool = NULL;
374}
375
376/**
377 * fscrypt_initialize() - allocate major buffers for fs encryption.
David Gstirf32d7ac2016-12-06 23:53:57 +0100378 * @cop_flags: fscrypt operations flags
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700379 *
380 * We only call this when we start accessing encrypted files, since it
381 * results in memory getting allocated that wouldn't otherwise be used.
382 *
383 * Return: Zero on success, non-zero otherwise.
384 */
David Gstirf32d7ac2016-12-06 23:53:57 +0100385int fscrypt_initialize(unsigned int cop_flags)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700386{
387 int i, res = -ENOMEM;
388
Eric Biggersa0b3bc82017-10-29 06:30:19 -0400389 /* No need to allocate a bounce page pool if this FS won't use it. */
390 if (cop_flags & FS_CFLG_OWN_PAGES)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700391 return 0;
392
393 mutex_lock(&fscrypt_init_mutex);
394 if (fscrypt_bounce_page_pool)
395 goto already_initialized;
396
397 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
398 struct fscrypt_ctx *ctx;
399
400 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
401 if (!ctx)
402 goto fail;
403 list_add(&ctx->free_list, &fscrypt_free_ctxs);
404 }
405
406 fscrypt_bounce_page_pool =
407 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
408 if (!fscrypt_bounce_page_pool)
409 goto fail;
410
411already_initialized:
412 mutex_unlock(&fscrypt_init_mutex);
413 return 0;
414fail:
415 fscrypt_destroy();
416 mutex_unlock(&fscrypt_init_mutex);
417 return res;
418}
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700419
420/**
421 * fscrypt_init() - Set up for fs encryption.
422 */
423static int __init fscrypt_init(void)
424{
Eric Biggers36dd26e2018-04-20 16:30:02 -0700425 /*
426 * Use an unbound workqueue to allow bios to be decrypted in parallel
427 * even when they happen to complete on the same CPU. This sacrifices
428 * locality, but it's worthwhile since decryption is CPU-intensive.
429 *
430 * Also use a high-priority workqueue to prioritize decryption work,
431 * which blocks reads from completing, over regular application tasks.
432 */
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700433 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
Eric Biggers36dd26e2018-04-20 16:30:02 -0700434 WQ_UNBOUND | WQ_HIGHPRI,
435 num_online_cpus());
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700436 if (!fscrypt_read_workqueue)
437 goto fail;
438
439 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
440 if (!fscrypt_ctx_cachep)
441 goto fail_free_queue;
442
443 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
444 if (!fscrypt_info_cachep)
445 goto fail_free_ctx;
446
447 return 0;
448
449fail_free_ctx:
450 kmem_cache_destroy(fscrypt_ctx_cachep);
451fail_free_queue:
452 destroy_workqueue(fscrypt_read_workqueue);
453fail:
454 return -ENOMEM;
455}
456module_init(fscrypt_init)
457
458/**
459 * fscrypt_exit() - Shutdown the fs encryption system
460 */
461static void __exit fscrypt_exit(void)
462{
463 fscrypt_destroy();
464
465 if (fscrypt_read_workqueue)
466 destroy_workqueue(fscrypt_read_workqueue);
467 kmem_cache_destroy(fscrypt_ctx_cachep);
468 kmem_cache_destroy(fscrypt_info_cachep);
Daniel Walterb7e7cf72017-06-19 09:27:58 +0200469
470 fscrypt_essiv_cleanup();
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700471}
472module_exit(fscrypt_exit);
473
474MODULE_LICENSE("GPL");