blob: 392337f16ecfd87f5d2ba411ac4d97d2cb4e35b4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Jana Saoutbf142992014-06-24 14:27:04 -04002 * Copyright (C) 2003 Jana Saout <jana@saout.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Brozbbb16582020-01-03 09:20:22 +01004 * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file is released under the GPL.
8 */
9
Milan Broz43d69032008-02-08 02:11:09 +000010#include <linux/completion.h>
Herbert Xud1806f62006-08-22 20:29:17 +100011#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
Ondrej Kozinac538f6e2016-11-21 15:58:51 +010015#include <linux/key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/bio.h>
17#include <linux/blkdev.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/crypto.h>
21#include <linux/workqueue.h>
Mikulas Patockadc267622015-02-13 08:25:59 -050022#include <linux/kthread.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070023#include <linux/backing-dev.h>
Arun Sharma600634972011-07-26 16:09:06 -070024#include <linux/atomic.h>
David Hardeman378f0582005-09-17 17:55:31 +100025#include <linux/scatterlist.h>
Mikulas Patockab3c5fd32015-02-13 08:27:41 -050026#include <linux/rbtree.h>
Ondrej Kozina027c4312016-12-01 18:20:52 +010027#include <linux/ctype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/page.h>
Rik Snel48527fa2006-09-03 08:56:39 +100029#include <asm/unaligned.h>
Milan Broz34745782011-01-13 19:59:55 +000030#include <crypto/hash.h>
31#include <crypto/md5.h>
32#include <crypto/algapi.h>
Herbert Xubbdb23b2016-01-24 21:16:36 +080033#include <crypto/skcipher.h>
Milan Brozef43aa32017-01-04 20:23:54 +010034#include <crypto/aead.h>
35#include <crypto/authenc.h>
36#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
Dmitry Baryshkov27f54112020-04-20 16:46:59 +030037#include <linux/key-type.h>
Ondrej Kozinac538f6e2016-11-21 15:58:51 +010038#include <keys/user-type.h>
Dmitry Baryshkov27f54112020-04-20 16:46:59 +030039#include <keys/encrypted-type.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Mikulas Patocka586e80e2008-10-21 17:44:59 +010041#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Alasdair G Kergon72d94862006-06-26 00:27:35 -070043#define DM_MSG_PREFIX "crypt"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * context holding the current state of a multi-part conversion
47 */
48struct convert_context {
Milan Broz43d69032008-02-08 02:11:09 +000049 struct completion restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 struct bio *bio_in;
51 struct bio *bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -070052 struct bvec_iter iter_in;
53 struct bvec_iter iter_out;
AliOS system security8d683dc2018-11-05 15:31:42 +080054 u64 cc_sector;
Mikulas Patocka40b62292012-07-27 15:08:04 +010055 atomic_t cc_pending;
Milan Brozef43aa32017-01-04 20:23:54 +010056 union {
57 struct skcipher_request *req;
58 struct aead_request *req_aead;
59 } r;
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061};
62
Milan Broz53017032008-02-08 02:10:38 +000063/*
64 * per bio private data
65 */
66struct dm_crypt_io {
Alasdair G Kergon49a8a922012-07-27 15:08:05 +010067 struct crypt_config *cc;
Milan Broz53017032008-02-08 02:10:38 +000068 struct bio *base_bio;
Milan Brozef43aa32017-01-04 20:23:54 +010069 u8 *integrity_metadata;
70 bool integrity_metadata_from_pool;
Milan Broz53017032008-02-08 02:10:38 +000071 struct work_struct work;
Ignat Korchagin39d42fa2020-07-06 18:37:31 +010072 struct tasklet_struct tasklet;
Milan Broz53017032008-02-08 02:10:38 +000073
74 struct convert_context ctx;
75
Mikulas Patocka40b62292012-07-27 15:08:04 +010076 atomic_t io_pending;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020077 blk_status_t error;
Milan Broz0c395b02008-02-08 02:10:54 +000078 sector_t sector;
Mikulas Patockadc267622015-02-13 08:25:59 -050079
Mikulas Patockab3c5fd32015-02-13 08:27:41 -050080 struct rb_node rb_node;
Mikulas Patocka298a9fa2014-03-28 15:51:55 -040081} CRYPTO_MINALIGN_ATTR;
Milan Broz53017032008-02-08 02:10:38 +000082
Milan Broz01482b72008-02-08 02:11:04 +000083struct dm_crypt_request {
Huang Yingb2174ee2009-03-16 17:44:33 +000084 struct convert_context *ctx;
Milan Brozef43aa32017-01-04 20:23:54 +010085 struct scatterlist sg_in[4];
86 struct scatterlist sg_out[4];
AliOS system security8d683dc2018-11-05 15:31:42 +080087 u64 iv_sector;
Milan Broz01482b72008-02-08 02:11:04 +000088};
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090struct crypt_config;
91
92struct crypt_iv_operations {
93 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +010094 const char *opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 void (*dtr)(struct crypt_config *cc);
Milan Brozb95bf2d2009-12-10 23:51:56 +000096 int (*init)(struct crypt_config *cc);
Milan Broz542da312009-12-10 23:51:57 +000097 int (*wipe)(struct crypt_config *cc);
Milan Broz2dc53272011-01-13 19:59:54 +000098 int (*generator)(struct crypt_config *cc, u8 *iv,
99 struct dm_crypt_request *dmreq);
100 int (*post)(struct crypt_config *cc, u8 *iv,
101 struct dm_crypt_request *dmreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102};
103
Milan Broz60473592009-12-10 23:51:55 +0000104struct iv_benbi_private {
105 int shift;
106};
107
Milan Broz34745782011-01-13 19:59:55 +0000108#define LMK_SEED_SIZE 64 /* hash + 0 */
109struct iv_lmk_private {
110 struct crypto_shash *hash_tfm;
111 u8 *seed;
112};
113
Milan Brozed04d982013-10-28 23:21:04 +0100114#define TCW_WHITENING_SIZE 16
115struct iv_tcw_private {
116 struct crypto_shash *crc32_tfm;
117 u8 *iv_seed;
118 u8 *whitening;
119};
120
Milan Brozbbb16582020-01-03 09:20:22 +0100121#define ELEPHANT_MAX_KEY_SIZE 32
122struct iv_elephant_private {
123 struct crypto_skcipher *tfm;
124};
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126/*
127 * Crypt: maps a linear range of a block device
128 * and encrypts / decrypts at the same time.
129 */
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -0500130enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
Ignat Korchagin39d42fa2020-07-06 18:37:31 +0100131 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
Damien Le Moal8e225f02020-07-08 18:28:08 +0900132 DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
133 DM_CRYPT_WRITE_INLINE };
Andi Kleenc0297722011-01-13 19:59:53 +0000134
Milan Brozef43aa32017-01-04 20:23:54 +0100135enum cipher_flags {
136 CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
Milan Broz8f0009a2017-03-16 15:39:44 +0100137 CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
Milan Brozbbb16582020-01-03 09:20:22 +0100138 CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
Milan Brozef43aa32017-01-04 20:23:54 +0100139};
140
Andi Kleenc0297722011-01-13 19:59:53 +0000141/*
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500142 * The fields in here must be read only after initialization.
Andi Kleenc0297722011-01-13 19:59:53 +0000143 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144struct crypt_config {
145 struct dm_dev *dev;
146 sector_t start;
147
Mikulas Patocka50593532017-08-13 22:45:08 -0400148 struct percpu_counter n_allocated_pages;
149
Milan Brozcabf08e2007-10-19 22:38:58 +0100150 struct workqueue_struct *io_queue;
151 struct workqueue_struct *crypt_queue;
Milan Broz3f1e9072008-03-28 14:16:07 -0700152
Mikulas Patockac7329ef2018-07-11 12:10:51 -0400153 spinlock_t write_thread_lock;
Mike Snitzer72d711c2018-05-22 18:26:20 -0400154 struct task_struct *write_thread;
Mikulas Patockab3c5fd32015-02-13 08:27:41 -0500155 struct rb_root write_tree;
Mikulas Patockadc267622015-02-13 08:25:59 -0500156
Milan Broz7dbcd132011-01-13 19:59:52 +0000157 char *cipher_string;
Milan Brozef43aa32017-01-04 20:23:54 +0100158 char *cipher_auth;
Ondrej Kozinac538f6e2016-11-21 15:58:51 +0100159 char *key_string;
Milan Broz5ebaee62010-08-12 04:14:07 +0100160
Julia Lawall1b1b58f2015-11-29 14:09:19 +0100161 const struct crypt_iv_operations *iv_gen_ops;
Herbert Xu79066ad2006-12-05 13:41:52 -0800162 union {
Milan Broz60473592009-12-10 23:51:55 +0000163 struct iv_benbi_private benbi;
Milan Broz34745782011-01-13 19:59:55 +0000164 struct iv_lmk_private lmk;
Milan Brozed04d982013-10-28 23:21:04 +0100165 struct iv_tcw_private tcw;
Milan Brozbbb16582020-01-03 09:20:22 +0100166 struct iv_elephant_private elephant;
Herbert Xu79066ad2006-12-05 13:41:52 -0800167 } iv_gen_private;
AliOS system security8d683dc2018-11-05 15:31:42 +0800168 u64 iv_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 unsigned int iv_size;
Mikulas Patockaff3af922017-03-23 10:23:14 -0400170 unsigned short int sector_size;
171 unsigned char sector_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Milan Brozef43aa32017-01-04 20:23:54 +0100173 union {
174 struct crypto_skcipher **tfms;
175 struct crypto_aead **tfms_aead;
176 } cipher_tfm;
Milan Brozd1f96422011-01-13 19:59:54 +0000177 unsigned tfms_count;
Milan Brozef43aa32017-01-04 20:23:54 +0100178 unsigned long cipher_flags;
Andi Kleenc0297722011-01-13 19:59:53 +0000179
180 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000181 * Layout of each crypto request:
182 *
Herbert Xubbdb23b2016-01-24 21:16:36 +0800183 * struct skcipher_request
Milan Brozddd42ed2008-02-08 02:11:07 +0000184 * context
185 * padding
186 * struct dm_crypt_request
187 * padding
188 * IV
189 *
190 * The padding is added so that dm_crypt_request and the IV are
191 * correctly aligned.
192 */
193 unsigned int dmreq_start;
Milan Brozddd42ed2008-02-08 02:11:07 +0000194
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400195 unsigned int per_bio_data_size;
196
Milan Broze48d4bb2006-10-03 01:15:37 -0700197 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 unsigned int key_size;
Milan Brozda31a072013-10-28 23:21:03 +0100199 unsigned int key_parts; /* independent parts in key buffer */
200 unsigned int key_extra_size; /* additional keys length */
Milan Brozef43aa32017-01-04 20:23:54 +0100201 unsigned int key_mac_size; /* MAC key size for authenc(...) */
202
203 unsigned int integrity_tag_size;
204 unsigned int integrity_iv_size;
205 unsigned int on_disk_tag_size;
206
Mike Snitzer72d711c2018-05-22 18:26:20 -0400207 /*
208 * pool for per bio private data, crypto requests,
209 * encryption requeusts/buffer pages and integrity tags
210 */
211 unsigned tag_pool_max_sectors;
212 mempool_t tag_pool;
213 mempool_t req_pool;
214 mempool_t page_pool;
215
216 struct bio_set bs;
217 struct mutex bio_alloc_lock;
218
Milan Brozef43aa32017-01-04 20:23:54 +0100219 u8 *authenc_key; /* space for keys in authenc() format (if used) */
Gustavo A. R. Silvab18ae8d2020-05-07 13:51:58 -0500220 u8 key[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221};
222
Milan Brozef43aa32017-01-04 20:23:54 +0100223#define MIN_IOS 64
224#define MAX_TAG_SIZE 480
225#define POOL_ENTRY_SIZE 512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Mikulas Patocka50593532017-08-13 22:45:08 -0400227static DEFINE_SPINLOCK(dm_crypt_clients_lock);
228static unsigned dm_crypt_clients_n = 0;
229static volatile unsigned long dm_crypt_pages_per_client;
230#define DM_CRYPT_MEMORY_PERCENT 2
231#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
232
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100233static void clone_init(struct dm_crypt_io *, struct bio *);
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000234static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Milan Brozef43aa32017-01-04 20:23:54 +0100235static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
236 struct scatterlist *sg);
Olaf Kirch027581f2007-05-09 02:32:52 -0700237
Yang Yingliang3fd53532020-02-13 12:11:26 +0800238static bool crypt_integrity_aead(struct crypt_config *cc);
239
Andi Kleenc0297722011-01-13 19:59:53 +0000240/*
Eric Biggers86f917a2017-03-30 22:18:48 -0700241 * Use this to access cipher attributes that are independent of the key.
Andi Kleenc0297722011-01-13 19:59:53 +0000242 */
Herbert Xubbdb23b2016-01-24 21:16:36 +0800243static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
Andi Kleenc0297722011-01-13 19:59:53 +0000244{
Milan Brozef43aa32017-01-04 20:23:54 +0100245 return cc->cipher_tfm.tfms[0];
246}
247
248static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
249{
250 return cc->cipher_tfm.tfms_aead[0];
Andi Kleenc0297722011-01-13 19:59:53 +0000251}
252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 * Different IV generation algorithms:
255 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000256 * plain: the initial vector is the 32-bit little-endian version of the sector
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200257 * number, padded with zeros if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 *
Milan Broz61afef62009-12-10 23:52:25 +0000259 * plain64: the initial vector is the 64-bit little-endian version of the sector
260 * number, padded with zeros if necessary.
261 *
Milan Broz7e3fd852017-06-06 09:07:01 +0200262 * plain64be: the initial vector is the 64-bit big-endian version of the sector
263 * number, padded with zeros if necessary.
264 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000265 * essiv: "encrypted sector|salt initial vector", the sector number is
266 * encrypted with the bulk cipher using a salt as key. The salt
267 * should be derived from the bulk cipher's key via hashing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 *
Rik Snel48527fa2006-09-03 08:56:39 +1000269 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
270 * (needed for LRW-32-AES and possible other narrow block modes)
271 *
Ludwig Nussel46b47732007-05-09 02:32:55 -0700272 * null: the initial vector is always zero. Provides compatibility with
273 * obsolete loop_fish2 devices. Do not use for new devices.
274 *
Milan Broz34745782011-01-13 19:59:55 +0000275 * lmk: Compatible implementation of the block chaining mode used
276 * by the Loop-AES block device encryption system
277 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
278 * It operates on full 512 byte sectors and uses CBC
279 * with an IV derived from the sector number, the data and
280 * optionally extra IV seed.
281 * This means that after decryption the first block
282 * of sector must be tweaked according to decrypted data.
283 * Loop-AES can use three encryption schemes:
284 * version 1: is plain aes-cbc mode
285 * version 2: uses 64 multikey scheme with lmk IV generator
286 * version 3: the same as version 2 with additional IV seed
287 * (it uses 65 keys, last key is used as IV seed)
288 *
Milan Brozed04d982013-10-28 23:21:04 +0100289 * tcw: Compatible implementation of the block chaining mode used
290 * by the TrueCrypt device encryption system (prior to version 4.1).
Milan Broze44f23b2015-04-05 18:03:10 +0200291 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
Milan Brozed04d982013-10-28 23:21:04 +0100292 * It operates on full 512 byte sectors and uses CBC
293 * with an IV derived from initial key and the sector number.
294 * In addition, whitening value is applied on every sector, whitening
295 * is calculated from initial key, sector number and mixed using CRC32.
296 * Note that this encryption scheme is vulnerable to watermarking attacks
297 * and should be used for old compatible containers access only.
Milan Brozb9411d72019-07-09 15:22:14 +0200298 *
299 * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
300 * The IV is encrypted little-endian byte-offset (with the same key
301 * and cipher as the volume).
Milan Brozbbb16582020-01-03 09:20:22 +0100302 *
303 * elephant: The extended version of eboiv with additional Elephant diffuser
304 * used with Bitlocker CBC mode.
305 * This mode was used in older Windows systems
Alexander A. Klimov6f3bc222020-06-27 12:31:38 +0200306 * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 */
308
Milan Broz2dc53272011-01-13 19:59:54 +0000309static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
310 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
312 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100313 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
315 return 0;
316}
317
Milan Broz61afef62009-12-10 23:52:25 +0000318static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
Milan Broz2dc53272011-01-13 19:59:54 +0000319 struct dm_crypt_request *dmreq)
Milan Broz61afef62009-12-10 23:52:25 +0000320{
321 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100322 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Milan Broz61afef62009-12-10 23:52:25 +0000323
324 return 0;
325}
326
Milan Broz7e3fd852017-06-06 09:07:01 +0200327static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
328 struct dm_crypt_request *dmreq)
329{
330 memset(iv, 0, cc->iv_size);
331 /* iv_size is at least of size u64; usually it is 16 bytes */
332 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
333
334 return 0;
335}
336
Milan Broz2dc53272011-01-13 19:59:54 +0000337static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
338 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339{
Ard Biesheuvela1a262b2019-08-19 17:17:37 +0300340 /*
341 * ESSIV encryption of the IV is now handled by the crypto API,
342 * so just pass the plain sector number here.
343 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100345 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Andi Kleenc0297722011-01-13 19:59:53 +0000346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 return 0;
348}
349
Rik Snel48527fa2006-09-03 08:56:39 +1000350static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
351 const char *opts)
352{
Milan Broz4ea94712020-01-06 10:11:47 +0100353 unsigned bs;
354 int log;
355
Yang Yingliang3fd53532020-02-13 12:11:26 +0800356 if (crypt_integrity_aead(cc))
Milan Broz4ea94712020-01-06 10:11:47 +0100357 bs = crypto_aead_blocksize(any_tfm_aead(cc));
358 else
359 bs = crypto_skcipher_blocksize(any_tfm(cc));
360 log = ilog2(bs);
Rik Snel48527fa2006-09-03 08:56:39 +1000361
362 /* we need to calculate how far we must shift the sector count
363 * to get the cipher block count, we use this shift in _gen */
364
365 if (1 << log != bs) {
366 ti->error = "cypher blocksize is not a power of 2";
367 return -EINVAL;
368 }
369
370 if (log > 9) {
371 ti->error = "cypher blocksize is > 512";
372 return -EINVAL;
373 }
374
Milan Broz60473592009-12-10 23:51:55 +0000375 cc->iv_gen_private.benbi.shift = 9 - log;
Rik Snel48527fa2006-09-03 08:56:39 +1000376
377 return 0;
378}
379
380static void crypt_iv_benbi_dtr(struct crypt_config *cc)
381{
Rik Snel48527fa2006-09-03 08:56:39 +1000382}
383
Milan Broz2dc53272011-01-13 19:59:54 +0000384static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
385 struct dm_crypt_request *dmreq)
Rik Snel48527fa2006-09-03 08:56:39 +1000386{
Herbert Xu79066ad2006-12-05 13:41:52 -0800387 __be64 val;
388
Rik Snel48527fa2006-09-03 08:56:39 +1000389 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
Herbert Xu79066ad2006-12-05 13:41:52 -0800390
Milan Broz2dc53272011-01-13 19:59:54 +0000391 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
Herbert Xu79066ad2006-12-05 13:41:52 -0800392 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
Rik Snel48527fa2006-09-03 08:56:39 +1000393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 return 0;
395}
396
Milan Broz2dc53272011-01-13 19:59:54 +0000397static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
398 struct dm_crypt_request *dmreq)
Ludwig Nussel46b47732007-05-09 02:32:55 -0700399{
400 memset(iv, 0, cc->iv_size);
401
402 return 0;
403}
404
Milan Broz34745782011-01-13 19:59:55 +0000405static void crypt_iv_lmk_dtr(struct crypt_config *cc)
406{
407 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
408
409 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
410 crypto_free_shash(lmk->hash_tfm);
411 lmk->hash_tfm = NULL;
412
Waiman Long453431a2020-08-06 23:18:13 -0700413 kfree_sensitive(lmk->seed);
Milan Broz34745782011-01-13 19:59:55 +0000414 lmk->seed = NULL;
415}
416
417static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
418 const char *opts)
419{
420 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
421
Milan Broz8f0009a2017-03-16 15:39:44 +0100422 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
423 ti->error = "Unsupported sector size for LMK";
424 return -EINVAL;
425 }
426
Mikulas Patockacd746932020-07-09 23:20:42 -0700427 lmk->hash_tfm = crypto_alloc_shash("md5", 0,
428 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Broz34745782011-01-13 19:59:55 +0000429 if (IS_ERR(lmk->hash_tfm)) {
430 ti->error = "Error initializing LMK hash";
431 return PTR_ERR(lmk->hash_tfm);
432 }
433
434 /* No seed in LMK version 2 */
435 if (cc->key_parts == cc->tfms_count) {
436 lmk->seed = NULL;
437 return 0;
438 }
439
440 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
441 if (!lmk->seed) {
442 crypt_iv_lmk_dtr(cc);
443 ti->error = "Error kmallocing seed storage in LMK";
444 return -ENOMEM;
445 }
446
447 return 0;
448}
449
450static int crypt_iv_lmk_init(struct crypt_config *cc)
451{
452 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
453 int subkey_size = cc->key_size / cc->key_parts;
454
455 /* LMK seed is on the position of LMK_KEYS + 1 key */
456 if (lmk->seed)
457 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
458 crypto_shash_digestsize(lmk->hash_tfm));
459
460 return 0;
461}
462
463static int crypt_iv_lmk_wipe(struct crypt_config *cc)
464{
465 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
466
467 if (lmk->seed)
468 memset(lmk->seed, 0, LMK_SEED_SIZE);
469
470 return 0;
471}
472
473static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
474 struct dm_crypt_request *dmreq,
475 u8 *data)
476{
477 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200478 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
Milan Broz34745782011-01-13 19:59:55 +0000479 struct md5_state md5state;
Milan Brozda31a072013-10-28 23:21:03 +0100480 __le32 buf[4];
Milan Broz34745782011-01-13 19:59:55 +0000481 int i, r;
482
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200483 desc->tfm = lmk->hash_tfm;
Milan Broz34745782011-01-13 19:59:55 +0000484
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200485 r = crypto_shash_init(desc);
Milan Broz34745782011-01-13 19:59:55 +0000486 if (r)
487 return r;
488
489 if (lmk->seed) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200490 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
Milan Broz34745782011-01-13 19:59:55 +0000491 if (r)
492 return r;
493 }
494
495 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200496 r = crypto_shash_update(desc, data + 16, 16 * 31);
Milan Broz34745782011-01-13 19:59:55 +0000497 if (r)
498 return r;
499
500 /* Sector is cropped to 56 bits here */
501 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
502 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
503 buf[2] = cpu_to_le32(4024);
504 buf[3] = 0;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200505 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
Milan Broz34745782011-01-13 19:59:55 +0000506 if (r)
507 return r;
508
509 /* No MD5 padding here */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200510 r = crypto_shash_export(desc, &md5state);
Milan Broz34745782011-01-13 19:59:55 +0000511 if (r)
512 return r;
513
514 for (i = 0; i < MD5_HASH_WORDS; i++)
515 __cpu_to_le32s(&md5state.hash[i]);
516 memcpy(iv, &md5state.hash, cc->iv_size);
517
518 return 0;
519}
520
521static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
522 struct dm_crypt_request *dmreq)
523{
Milan Brozef43aa32017-01-04 20:23:54 +0100524 struct scatterlist *sg;
Milan Broz34745782011-01-13 19:59:55 +0000525 u8 *src;
526 int r = 0;
527
528 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
Milan Brozef43aa32017-01-04 20:23:54 +0100529 sg = crypt_get_sg_data(cc, dmreq->sg_in);
530 src = kmap_atomic(sg_page(sg));
531 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
Cong Wangc2e022c2011-11-28 13:26:02 +0800532 kunmap_atomic(src);
Milan Broz34745782011-01-13 19:59:55 +0000533 } else
534 memset(iv, 0, cc->iv_size);
535
536 return r;
537}
538
539static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
540 struct dm_crypt_request *dmreq)
541{
Milan Brozef43aa32017-01-04 20:23:54 +0100542 struct scatterlist *sg;
Milan Broz34745782011-01-13 19:59:55 +0000543 u8 *dst;
544 int r;
545
546 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
547 return 0;
548
Milan Brozef43aa32017-01-04 20:23:54 +0100549 sg = crypt_get_sg_data(cc, dmreq->sg_out);
550 dst = kmap_atomic(sg_page(sg));
551 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
Milan Broz34745782011-01-13 19:59:55 +0000552
553 /* Tweak the first block of plaintext sector */
554 if (!r)
Milan Brozef43aa32017-01-04 20:23:54 +0100555 crypto_xor(dst + sg->offset, iv, cc->iv_size);
Milan Broz34745782011-01-13 19:59:55 +0000556
Cong Wangc2e022c2011-11-28 13:26:02 +0800557 kunmap_atomic(dst);
Milan Broz34745782011-01-13 19:59:55 +0000558 return r;
559}
560
Milan Brozed04d982013-10-28 23:21:04 +0100561static void crypt_iv_tcw_dtr(struct crypt_config *cc)
562{
563 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
564
Waiman Long453431a2020-08-06 23:18:13 -0700565 kfree_sensitive(tcw->iv_seed);
Milan Brozed04d982013-10-28 23:21:04 +0100566 tcw->iv_seed = NULL;
Waiman Long453431a2020-08-06 23:18:13 -0700567 kfree_sensitive(tcw->whitening);
Milan Brozed04d982013-10-28 23:21:04 +0100568 tcw->whitening = NULL;
569
570 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
571 crypto_free_shash(tcw->crc32_tfm);
572 tcw->crc32_tfm = NULL;
573}
574
575static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
576 const char *opts)
577{
578 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
579
Milan Broz8f0009a2017-03-16 15:39:44 +0100580 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
581 ti->error = "Unsupported sector size for TCW";
582 return -EINVAL;
583 }
584
Milan Brozed04d982013-10-28 23:21:04 +0100585 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
586 ti->error = "Wrong key size for TCW";
587 return -EINVAL;
588 }
589
Mikulas Patockacd746932020-07-09 23:20:42 -0700590 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
591 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Brozed04d982013-10-28 23:21:04 +0100592 if (IS_ERR(tcw->crc32_tfm)) {
593 ti->error = "Error initializing CRC32 in TCW";
594 return PTR_ERR(tcw->crc32_tfm);
595 }
596
597 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
598 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
599 if (!tcw->iv_seed || !tcw->whitening) {
600 crypt_iv_tcw_dtr(cc);
601 ti->error = "Error allocating seed storage in TCW";
602 return -ENOMEM;
603 }
604
605 return 0;
606}
607
608static int crypt_iv_tcw_init(struct crypt_config *cc)
609{
610 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
611 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
612
613 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
614 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
615 TCW_WHITENING_SIZE);
616
617 return 0;
618}
619
620static int crypt_iv_tcw_wipe(struct crypt_config *cc)
621{
622 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
623
624 memset(tcw->iv_seed, 0, cc->iv_size);
625 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
626
627 return 0;
628}
629
630static int crypt_iv_tcw_whitening(struct crypt_config *cc,
631 struct dm_crypt_request *dmreq,
632 u8 *data)
633{
634 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
Bart Van Assche350b5392016-06-28 16:32:32 +0200635 __le64 sector = cpu_to_le64(dmreq->iv_sector);
Milan Brozed04d982013-10-28 23:21:04 +0100636 u8 buf[TCW_WHITENING_SIZE];
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200637 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
Milan Brozed04d982013-10-28 23:21:04 +0100638 int i, r;
639
640 /* xor whitening with sector number */
Ard Biesheuvel45fe93d2017-07-24 11:28:04 +0100641 crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
642 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
Milan Brozed04d982013-10-28 23:21:04 +0100643
644 /* calculate crc32 for every 32bit part and xor it */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200645 desc->tfm = tcw->crc32_tfm;
Milan Brozed04d982013-10-28 23:21:04 +0100646 for (i = 0; i < 4; i++) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200647 r = crypto_shash_init(desc);
Milan Brozed04d982013-10-28 23:21:04 +0100648 if (r)
649 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200650 r = crypto_shash_update(desc, &buf[i * 4], 4);
Milan Brozed04d982013-10-28 23:21:04 +0100651 if (r)
652 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200653 r = crypto_shash_final(desc, &buf[i * 4]);
Milan Brozed04d982013-10-28 23:21:04 +0100654 if (r)
655 goto out;
656 }
657 crypto_xor(&buf[0], &buf[12], 4);
658 crypto_xor(&buf[4], &buf[8], 4);
659
660 /* apply whitening (8 bytes) to whole sector */
661 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
662 crypto_xor(data + i * 8, buf, 8);
663out:
Milan Broz1a71d6f2014-11-22 09:36:04 +0100664 memzero_explicit(buf, sizeof(buf));
Milan Brozed04d982013-10-28 23:21:04 +0100665 return r;
666}
667
668static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
669 struct dm_crypt_request *dmreq)
670{
Milan Brozef43aa32017-01-04 20:23:54 +0100671 struct scatterlist *sg;
Milan Brozed04d982013-10-28 23:21:04 +0100672 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
Bart Van Assche350b5392016-06-28 16:32:32 +0200673 __le64 sector = cpu_to_le64(dmreq->iv_sector);
Milan Brozed04d982013-10-28 23:21:04 +0100674 u8 *src;
675 int r = 0;
676
677 /* Remove whitening from ciphertext */
678 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
Milan Brozef43aa32017-01-04 20:23:54 +0100679 sg = crypt_get_sg_data(cc, dmreq->sg_in);
680 src = kmap_atomic(sg_page(sg));
681 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
Milan Brozed04d982013-10-28 23:21:04 +0100682 kunmap_atomic(src);
683 }
684
685 /* Calculate IV */
Ard Biesheuvel45fe93d2017-07-24 11:28:04 +0100686 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)&sector, 8);
Milan Brozed04d982013-10-28 23:21:04 +0100687 if (cc->iv_size > 8)
Ard Biesheuvel45fe93d2017-07-24 11:28:04 +0100688 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
689 cc->iv_size - 8);
Milan Brozed04d982013-10-28 23:21:04 +0100690
691 return r;
692}
693
694static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
695 struct dm_crypt_request *dmreq)
696{
Milan Brozef43aa32017-01-04 20:23:54 +0100697 struct scatterlist *sg;
Milan Brozed04d982013-10-28 23:21:04 +0100698 u8 *dst;
699 int r;
700
701 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
702 return 0;
703
704 /* Apply whitening on ciphertext */
Milan Brozef43aa32017-01-04 20:23:54 +0100705 sg = crypt_get_sg_data(cc, dmreq->sg_out);
706 dst = kmap_atomic(sg_page(sg));
707 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
Milan Brozed04d982013-10-28 23:21:04 +0100708 kunmap_atomic(dst);
709
710 return r;
711}
712
Milan Brozef43aa32017-01-04 20:23:54 +0100713static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
714 struct dm_crypt_request *dmreq)
715{
716 /* Used only for writes, there must be an additional space to store IV */
717 get_random_bytes(iv, cc->iv_size);
718 return 0;
719}
720
Milan Brozb9411d72019-07-09 15:22:14 +0200721static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
722 const char *opts)
723{
Yang Yingliang3fd53532020-02-13 12:11:26 +0800724 if (crypt_integrity_aead(cc)) {
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300725 ti->error = "AEAD transforms not supported for EBOIV";
Milan Brozb9411d72019-07-09 15:22:14 +0200726 return -EINVAL;
727 }
728
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300729 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
730 ti->error = "Block size of EBOIV cipher does "
731 "not match IV size of block cipher";
732 return -EINVAL;
733 }
Milan Brozb9411d72019-07-09 15:22:14 +0200734
735 return 0;
736}
737
Milan Brozb9411d72019-07-09 15:22:14 +0200738static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
739 struct dm_crypt_request *dmreq)
740{
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300741 u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
742 struct skcipher_request *req;
743 struct scatterlist src, dst;
Damien Le Moal7785a9e2020-08-31 14:55:55 +0900744 DECLARE_CRYPTO_WAIT(wait);
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300745 int err;
Milan Brozb9411d72019-07-09 15:22:14 +0200746
Mikulas Patocka9402e952020-01-02 08:23:32 -0500747 req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300748 if (!req)
749 return -ENOMEM;
Milan Brozb9411d72019-07-09 15:22:14 +0200750
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300751 memset(buf, 0, cc->iv_size);
752 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
753
754 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
755 sg_init_one(&dst, iv, cc->iv_size);
756 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
757 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
758 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
759 skcipher_request_free(req);
760
761 return err;
Milan Brozb9411d72019-07-09 15:22:14 +0200762}
763
Milan Brozbbb16582020-01-03 09:20:22 +0100764static void crypt_iv_elephant_dtr(struct crypt_config *cc)
765{
766 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
767
768 crypto_free_skcipher(elephant->tfm);
769 elephant->tfm = NULL;
770}
771
772static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
773 const char *opts)
774{
775 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
776 int r;
777
Mikulas Patockacd746932020-07-09 23:20:42 -0700778 elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
779 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Brozbbb16582020-01-03 09:20:22 +0100780 if (IS_ERR(elephant->tfm)) {
781 r = PTR_ERR(elephant->tfm);
782 elephant->tfm = NULL;
783 return r;
784 }
785
786 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
787 if (r)
788 crypt_iv_elephant_dtr(cc);
789 return r;
790}
791
792static void diffuser_disk_to_cpu(u32 *d, size_t n)
793{
794#ifndef __LITTLE_ENDIAN
795 int i;
796
797 for (i = 0; i < n; i++)
798 d[i] = le32_to_cpu((__le32)d[i]);
799#endif
800}
801
802static void diffuser_cpu_to_disk(__le32 *d, size_t n)
803{
804#ifndef __LITTLE_ENDIAN
805 int i;
806
807 for (i = 0; i < n; i++)
808 d[i] = cpu_to_le32((u32)d[i]);
809#endif
810}
811
812static void diffuser_a_decrypt(u32 *d, size_t n)
813{
814 int i, i1, i2, i3;
815
816 for (i = 0; i < 5; i++) {
817 i1 = 0;
818 i2 = n - 2;
819 i3 = n - 5;
820
821 while (i1 < (n - 1)) {
822 d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
823 i1++; i2++; i3++;
824
825 if (i3 >= n)
826 i3 -= n;
827
828 d[i1] += d[i2] ^ d[i3];
829 i1++; i2++; i3++;
830
831 if (i2 >= n)
832 i2 -= n;
833
834 d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
835 i1++; i2++; i3++;
836
837 d[i1] += d[i2] ^ d[i3];
838 i1++; i2++; i3++;
839 }
840 }
841}
842
843static void diffuser_a_encrypt(u32 *d, size_t n)
844{
845 int i, i1, i2, i3;
846
847 for (i = 0; i < 5; i++) {
848 i1 = n - 1;
849 i2 = n - 2 - 1;
850 i3 = n - 5 - 1;
851
852 while (i1 > 0) {
853 d[i1] -= d[i2] ^ d[i3];
854 i1--; i2--; i3--;
855
856 d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
857 i1--; i2--; i3--;
858
859 if (i2 < 0)
860 i2 += n;
861
862 d[i1] -= d[i2] ^ d[i3];
863 i1--; i2--; i3--;
864
865 if (i3 < 0)
866 i3 += n;
867
868 d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
869 i1--; i2--; i3--;
870 }
871 }
872}
873
874static void diffuser_b_decrypt(u32 *d, size_t n)
875{
876 int i, i1, i2, i3;
877
878 for (i = 0; i < 3; i++) {
879 i1 = 0;
880 i2 = 2;
881 i3 = 5;
882
883 while (i1 < (n - 1)) {
884 d[i1] += d[i2] ^ d[i3];
885 i1++; i2++; i3++;
886
887 d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
888 i1++; i2++; i3++;
889
890 if (i2 >= n)
891 i2 -= n;
892
893 d[i1] += d[i2] ^ d[i3];
894 i1++; i2++; i3++;
895
896 if (i3 >= n)
897 i3 -= n;
898
899 d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
900 i1++; i2++; i3++;
901 }
902 }
903}
904
905static void diffuser_b_encrypt(u32 *d, size_t n)
906{
907 int i, i1, i2, i3;
908
909 for (i = 0; i < 3; i++) {
910 i1 = n - 1;
911 i2 = 2 - 1;
912 i3 = 5 - 1;
913
914 while (i1 > 0) {
915 d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
916 i1--; i2--; i3--;
917
918 if (i3 < 0)
919 i3 += n;
920
921 d[i1] -= d[i2] ^ d[i3];
922 i1--; i2--; i3--;
923
924 if (i2 < 0)
925 i2 += n;
926
927 d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
928 i1--; i2--; i3--;
929
930 d[i1] -= d[i2] ^ d[i3];
931 i1--; i2--; i3--;
932 }
933 }
934}
935
936static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
937{
938 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
939 u8 *es, *ks, *data, *data2, *data_offset;
940 struct skcipher_request *req;
941 struct scatterlist *sg, *sg2, src, dst;
Damien Le Moal7785a9e2020-08-31 14:55:55 +0900942 DECLARE_CRYPTO_WAIT(wait);
Milan Brozbbb16582020-01-03 09:20:22 +0100943 int i, r;
944
945 req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
946 es = kzalloc(16, GFP_NOIO); /* Key for AES */
947 ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
948
949 if (!req || !es || !ks) {
950 r = -ENOMEM;
951 goto out;
952 }
953
954 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
955
956 /* E(Ks, e(s)) */
957 sg_init_one(&src, es, 16);
958 sg_init_one(&dst, ks, 16);
959 skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
960 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
961 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
962 if (r)
963 goto out;
964
965 /* E(Ks, e'(s)) */
966 es[15] = 0x80;
967 sg_init_one(&dst, &ks[16], 16);
968 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
969 if (r)
970 goto out;
971
972 sg = crypt_get_sg_data(cc, dmreq->sg_out);
973 data = kmap_atomic(sg_page(sg));
974 data_offset = data + sg->offset;
975
976 /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
977 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
978 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
979 data2 = kmap_atomic(sg_page(sg2));
980 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
981 kunmap_atomic(data2);
982 }
983
984 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
985 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
986 diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
987 diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
988 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
989 }
990
991 for (i = 0; i < (cc->sector_size / 32); i++)
992 crypto_xor(data_offset + i * 32, ks, 32);
993
994 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
995 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
996 diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
997 diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
998 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
999 }
1000
1001 kunmap_atomic(data);
1002out:
Waiman Long453431a2020-08-06 23:18:13 -07001003 kfree_sensitive(ks);
1004 kfree_sensitive(es);
Milan Brozbbb16582020-01-03 09:20:22 +01001005 skcipher_request_free(req);
1006 return r;
1007}
1008
1009static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1010 struct dm_crypt_request *dmreq)
1011{
1012 int r;
1013
1014 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1015 r = crypt_iv_elephant(cc, dmreq);
1016 if (r)
1017 return r;
1018 }
1019
1020 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1021}
1022
1023static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1024 struct dm_crypt_request *dmreq)
1025{
1026 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1027 return crypt_iv_elephant(cc, dmreq);
1028
1029 return 0;
1030}
1031
1032static int crypt_iv_elephant_init(struct crypt_config *cc)
1033{
1034 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1035 int key_offset = cc->key_size - cc->key_extra_size;
1036
1037 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1038}
1039
1040static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1041{
1042 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1043 u8 key[ELEPHANT_MAX_KEY_SIZE];
1044
1045 memset(key, 0, cc->key_extra_size);
1046 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1047}
1048
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001049static const struct crypt_iv_operations crypt_iv_plain_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 .generator = crypt_iv_plain_gen
1051};
1052
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001053static const struct crypt_iv_operations crypt_iv_plain64_ops = {
Milan Broz61afef62009-12-10 23:52:25 +00001054 .generator = crypt_iv_plain64_gen
1055};
1056
Milan Broz7e3fd852017-06-06 09:07:01 +02001057static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
1058 .generator = crypt_iv_plain64be_gen
1059};
1060
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001061static const struct crypt_iv_operations crypt_iv_essiv_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 .generator = crypt_iv_essiv_gen
1063};
1064
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001065static const struct crypt_iv_operations crypt_iv_benbi_ops = {
Rik Snel48527fa2006-09-03 08:56:39 +10001066 .ctr = crypt_iv_benbi_ctr,
1067 .dtr = crypt_iv_benbi_dtr,
1068 .generator = crypt_iv_benbi_gen
1069};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001071static const struct crypt_iv_operations crypt_iv_null_ops = {
Ludwig Nussel46b47732007-05-09 02:32:55 -07001072 .generator = crypt_iv_null_gen
1073};
1074
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001075static const struct crypt_iv_operations crypt_iv_lmk_ops = {
Milan Broz34745782011-01-13 19:59:55 +00001076 .ctr = crypt_iv_lmk_ctr,
1077 .dtr = crypt_iv_lmk_dtr,
1078 .init = crypt_iv_lmk_init,
1079 .wipe = crypt_iv_lmk_wipe,
1080 .generator = crypt_iv_lmk_gen,
1081 .post = crypt_iv_lmk_post
1082};
1083
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001084static const struct crypt_iv_operations crypt_iv_tcw_ops = {
Milan Brozed04d982013-10-28 23:21:04 +01001085 .ctr = crypt_iv_tcw_ctr,
1086 .dtr = crypt_iv_tcw_dtr,
1087 .init = crypt_iv_tcw_init,
1088 .wipe = crypt_iv_tcw_wipe,
1089 .generator = crypt_iv_tcw_gen,
1090 .post = crypt_iv_tcw_post
1091};
1092
Milan Brozef43aa32017-01-04 20:23:54 +01001093static struct crypt_iv_operations crypt_iv_random_ops = {
1094 .generator = crypt_iv_random_gen
1095};
1096
Milan Brozb9411d72019-07-09 15:22:14 +02001097static struct crypt_iv_operations crypt_iv_eboiv_ops = {
1098 .ctr = crypt_iv_eboiv_ctr,
Milan Brozb9411d72019-07-09 15:22:14 +02001099 .generator = crypt_iv_eboiv_gen
1100};
1101
Milan Brozbbb16582020-01-03 09:20:22 +01001102static struct crypt_iv_operations crypt_iv_elephant_ops = {
1103 .ctr = crypt_iv_elephant_ctr,
1104 .dtr = crypt_iv_elephant_dtr,
1105 .init = crypt_iv_elephant_init,
1106 .wipe = crypt_iv_elephant_wipe,
1107 .generator = crypt_iv_elephant_gen,
1108 .post = crypt_iv_elephant_post
1109};
1110
Milan Brozef43aa32017-01-04 20:23:54 +01001111/*
1112 * Integrity extensions
1113 */
1114static bool crypt_integrity_aead(struct crypt_config *cc)
1115{
1116 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1117}
1118
1119static bool crypt_integrity_hmac(struct crypt_config *cc)
1120{
Milan Broz33d2f092017-03-16 15:39:40 +01001121 return crypt_integrity_aead(cc) && cc->key_mac_size;
Milan Brozef43aa32017-01-04 20:23:54 +01001122}
1123
1124/* Get sg containing data */
1125static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1126 struct scatterlist *sg)
1127{
Milan Broz33d2f092017-03-16 15:39:40 +01001128 if (unlikely(crypt_integrity_aead(cc)))
Milan Brozef43aa32017-01-04 20:23:54 +01001129 return &sg[2];
1130
1131 return sg;
1132}
1133
1134static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1135{
1136 struct bio_integrity_payload *bip;
1137 unsigned int tag_len;
1138 int ret;
1139
1140 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1141 return 0;
1142
1143 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1144 if (IS_ERR(bip))
1145 return PTR_ERR(bip);
1146
Mikulas Patockaff0c1292019-02-08 10:52:07 -05001147 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
Milan Brozef43aa32017-01-04 20:23:54 +01001148
1149 bip->bip_iter.bi_size = tag_len;
1150 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1151
Milan Brozef43aa32017-01-04 20:23:54 +01001152 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1153 tag_len, offset_in_page(io->integrity_metadata));
1154 if (unlikely(ret != tag_len))
1155 return -ENOMEM;
1156
1157 return 0;
1158}
1159
1160static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1161{
1162#ifdef CONFIG_BLK_DEV_INTEGRITY
1163 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
Milan Broz7a1cd722019-05-15 16:23:43 +02001164 struct mapped_device *md = dm_table_get_md(ti->table);
Milan Brozef43aa32017-01-04 20:23:54 +01001165
1166 /* From now we require underlying device with our integrity profile */
1167 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1168 ti->error = "Integrity profile not supported.";
1169 return -EINVAL;
1170 }
1171
Mikulas Patocka583fe742017-04-18 16:51:54 -04001172 if (bi->tag_size != cc->on_disk_tag_size ||
1173 bi->tuple_size != cc->on_disk_tag_size) {
Milan Brozef43aa32017-01-04 20:23:54 +01001174 ti->error = "Integrity profile tag size mismatch.";
1175 return -EINVAL;
1176 }
Mikulas Patocka583fe742017-04-18 16:51:54 -04001177 if (1 << bi->interval_exp != cc->sector_size) {
1178 ti->error = "Integrity profile sector size mismatch.";
1179 return -EINVAL;
1180 }
Milan Brozef43aa32017-01-04 20:23:54 +01001181
Milan Broz33d2f092017-03-16 15:39:40 +01001182 if (crypt_integrity_aead(cc)) {
Milan Brozef43aa32017-01-04 20:23:54 +01001183 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
Milan Broz7a1cd722019-05-15 16:23:43 +02001184 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
Milan Brozef43aa32017-01-04 20:23:54 +01001185 cc->integrity_tag_size, cc->integrity_iv_size);
1186
1187 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1188 ti->error = "Integrity AEAD auth tag size is not supported.";
1189 return -EINVAL;
1190 }
1191 } else if (cc->integrity_iv_size)
Milan Broz7a1cd722019-05-15 16:23:43 +02001192 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
Milan Brozef43aa32017-01-04 20:23:54 +01001193 cc->integrity_iv_size);
1194
1195 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1196 ti->error = "Not enough space for integrity tag in the profile.";
1197 return -EINVAL;
1198 }
1199
1200 return 0;
1201#else
1202 ti->error = "Integrity profile not supported.";
1203 return -EINVAL;
1204#endif
1205}
1206
Milan Brozd469f842007-10-19 22:42:37 +01001207static void crypt_convert_init(struct crypt_config *cc,
1208 struct convert_context *ctx,
1209 struct bio *bio_out, struct bio *bio_in,
Milan Brozfcd369d2008-02-08 02:10:41 +00001210 sector_t sector)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211{
1212 ctx->bio_in = bio_in;
1213 ctx->bio_out = bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -07001214 if (bio_in)
1215 ctx->iter_in = bio_in->bi_iter;
1216 if (bio_out)
1217 ctx->iter_out = bio_out->bi_iter;
Mikulas Patockac66029f2012-07-27 15:08:05 +01001218 ctx->cc_sector = sector + cc->iv_offset;
Milan Broz43d69032008-02-08 02:11:09 +00001219 init_completion(&ctx->restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220}
1221
Huang Yingb2174ee2009-03-16 17:44:33 +00001222static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
Milan Brozef43aa32017-01-04 20:23:54 +01001223 void *req)
Huang Yingb2174ee2009-03-16 17:44:33 +00001224{
1225 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1226}
1227
Milan Brozef43aa32017-01-04 20:23:54 +01001228static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
Huang Yingb2174ee2009-03-16 17:44:33 +00001229{
Milan Brozef43aa32017-01-04 20:23:54 +01001230 return (void *)((char *)dmreq - cc->dmreq_start);
Huang Yingb2174ee2009-03-16 17:44:33 +00001231}
1232
Milan Broz2dc53272011-01-13 19:59:54 +00001233static u8 *iv_of_dmreq(struct crypt_config *cc,
1234 struct dm_crypt_request *dmreq)
1235{
Milan Broz33d2f092017-03-16 15:39:40 +01001236 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01001237 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1238 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1239 else
1240 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1241 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
Milan Broz2dc53272011-01-13 19:59:54 +00001242}
1243
Milan Brozef43aa32017-01-04 20:23:54 +01001244static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1245 struct dm_crypt_request *dmreq)
1246{
1247 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1248}
1249
Christoph Hellwigc13b5482019-04-04 18:33:34 +02001250static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
Milan Brozef43aa32017-01-04 20:23:54 +01001251 struct dm_crypt_request *dmreq)
1252{
1253 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
Christoph Hellwigc13b5482019-04-04 18:33:34 +02001254 return (__le64 *) ptr;
Milan Brozef43aa32017-01-04 20:23:54 +01001255}
1256
1257static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1258 struct dm_crypt_request *dmreq)
1259{
1260 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1261 cc->iv_size + sizeof(uint64_t);
1262 return (unsigned int*)ptr;
1263}
1264
1265static void *tag_from_dmreq(struct crypt_config *cc,
1266 struct dm_crypt_request *dmreq)
1267{
1268 struct convert_context *ctx = dmreq->ctx;
1269 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1270
1271 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1272 cc->on_disk_tag_size];
1273}
1274
1275static void *iv_tag_from_dmreq(struct crypt_config *cc,
1276 struct dm_crypt_request *dmreq)
1277{
1278 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1279}
1280
1281static int crypt_convert_block_aead(struct crypt_config *cc,
1282 struct convert_context *ctx,
1283 struct aead_request *req,
1284 unsigned int tag_offset)
Milan Broz01482b72008-02-08 02:11:04 +00001285{
Kent Overstreet003b5c52013-10-11 15:45:43 -07001286 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1287 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001288 struct dm_crypt_request *dmreq;
Milan Brozef43aa32017-01-04 20:23:54 +01001289 u8 *iv, *org_iv, *tag_iv, *tag;
Christoph Hellwigc13b5482019-04-04 18:33:34 +02001290 __le64 *sector;
Milan Brozef43aa32017-01-04 20:23:54 +01001291 int r = 0;
1292
1293 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
Milan Broz01482b72008-02-08 02:11:04 +00001294
Milan Broz8f0009a2017-03-16 15:39:44 +01001295 /* Reject unexpected unaligned bio. */
Mikulas Patocka0440d5c2017-11-07 10:35:57 -05001296 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
Milan Broz8f0009a2017-03-16 15:39:44 +01001297 return -EIO;
Milan Broz01482b72008-02-08 02:11:04 +00001298
Huang Yingb2174ee2009-03-16 17:44:33 +00001299 dmreq = dmreq_of_req(cc, req);
Mikulas Patockac66029f2012-07-27 15:08:05 +01001300 dmreq->iv_sector = ctx->cc_sector;
Milan Broz8f0009a2017-03-16 15:39:44 +01001301 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
Mikulas Patockaff3af922017-03-23 10:23:14 -04001302 dmreq->iv_sector >>= cc->sector_shift;
Huang Yingb2174ee2009-03-16 17:44:33 +00001303 dmreq->ctx = ctx;
Milan Broz01482b72008-02-08 02:11:04 +00001304
Milan Brozef43aa32017-01-04 20:23:54 +01001305 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
Milan Broz01482b72008-02-08 02:11:04 +00001306
Milan Brozef43aa32017-01-04 20:23:54 +01001307 sector = org_sector_of_dmreq(cc, dmreq);
1308 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1309
1310 iv = iv_of_dmreq(cc, dmreq);
1311 org_iv = org_iv_of_dmreq(cc, dmreq);
1312 tag = tag_from_dmreq(cc, dmreq);
1313 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1314
1315 /* AEAD request:
1316 * |----- AAD -------|------ DATA -------|-- AUTH TAG --|
1317 * | (authenticated) | (auth+encryption) | |
1318 * | sector_LE | IV | sector in/out | tag in/out |
1319 */
1320 sg_init_table(dmreq->sg_in, 4);
1321 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1322 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
Milan Broz8f0009a2017-03-16 15:39:44 +01001323 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
Milan Brozef43aa32017-01-04 20:23:54 +01001324 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1325
1326 sg_init_table(dmreq->sg_out, 4);
1327 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1328 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
Milan Broz8f0009a2017-03-16 15:39:44 +01001329 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
Milan Brozef43aa32017-01-04 20:23:54 +01001330 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
Milan Broz01482b72008-02-08 02:11:04 +00001331
Milan Broz3a7f6c92008-02-08 02:11:14 +00001332 if (cc->iv_gen_ops) {
Milan Brozef43aa32017-01-04 20:23:54 +01001333 /* For READs use IV stored in integrity metadata */
1334 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1335 memcpy(org_iv, tag_iv, cc->iv_size);
1336 } else {
1337 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1338 if (r < 0)
1339 return r;
1340 /* Store generated IV in integrity metadata */
1341 if (cc->integrity_iv_size)
1342 memcpy(tag_iv, org_iv, cc->iv_size);
1343 }
1344 /* Working copy of IV, to be modified in crypto API */
1345 memcpy(iv, org_iv, cc->iv_size);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001346 }
1347
Milan Brozef43aa32017-01-04 20:23:54 +01001348 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1349 if (bio_data_dir(ctx->bio_in) == WRITE) {
1350 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
Milan Broz8f0009a2017-03-16 15:39:44 +01001351 cc->sector_size, iv);
Milan Brozef43aa32017-01-04 20:23:54 +01001352 r = crypto_aead_encrypt(req);
1353 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1354 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1355 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1356 } else {
1357 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
Milan Broz8f0009a2017-03-16 15:39:44 +01001358 cc->sector_size + cc->integrity_tag_size, iv);
Milan Brozef43aa32017-01-04 20:23:54 +01001359 r = crypto_aead_decrypt(req);
1360 }
1361
Milan Brozf7101262019-05-15 16:22:30 +02001362 if (r == -EBADMSG) {
1363 char b[BDEVNAME_SIZE];
1364 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
Milan Brozef43aa32017-01-04 20:23:54 +01001365 (unsigned long long)le64_to_cpu(*sector));
Milan Brozf7101262019-05-15 16:22:30 +02001366 }
Milan Brozef43aa32017-01-04 20:23:54 +01001367
1368 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1369 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1370
Milan Broz8f0009a2017-03-16 15:39:44 +01001371 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1372 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
Milan Brozef43aa32017-01-04 20:23:54 +01001373
1374 return r;
1375}
1376
1377static int crypt_convert_block_skcipher(struct crypt_config *cc,
1378 struct convert_context *ctx,
1379 struct skcipher_request *req,
1380 unsigned int tag_offset)
1381{
1382 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1383 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1384 struct scatterlist *sg_in, *sg_out;
1385 struct dm_crypt_request *dmreq;
Milan Brozef43aa32017-01-04 20:23:54 +01001386 u8 *iv, *org_iv, *tag_iv;
Christoph Hellwigc13b5482019-04-04 18:33:34 +02001387 __le64 *sector;
Milan Brozef43aa32017-01-04 20:23:54 +01001388 int r = 0;
1389
Milan Broz8f0009a2017-03-16 15:39:44 +01001390 /* Reject unexpected unaligned bio. */
Mikulas Patocka0440d5c2017-11-07 10:35:57 -05001391 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
Milan Broz8f0009a2017-03-16 15:39:44 +01001392 return -EIO;
1393
Milan Brozef43aa32017-01-04 20:23:54 +01001394 dmreq = dmreq_of_req(cc, req);
1395 dmreq->iv_sector = ctx->cc_sector;
Milan Broz8f0009a2017-03-16 15:39:44 +01001396 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
Mikulas Patockaff3af922017-03-23 10:23:14 -04001397 dmreq->iv_sector >>= cc->sector_shift;
Milan Brozef43aa32017-01-04 20:23:54 +01001398 dmreq->ctx = ctx;
1399
1400 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1401
1402 iv = iv_of_dmreq(cc, dmreq);
1403 org_iv = org_iv_of_dmreq(cc, dmreq);
1404 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1405
1406 sector = org_sector_of_dmreq(cc, dmreq);
1407 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1408
1409 /* For skcipher we use only the first sg item */
1410 sg_in = &dmreq->sg_in[0];
1411 sg_out = &dmreq->sg_out[0];
1412
1413 sg_init_table(sg_in, 1);
Milan Broz8f0009a2017-03-16 15:39:44 +01001414 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
Milan Brozef43aa32017-01-04 20:23:54 +01001415
1416 sg_init_table(sg_out, 1);
Milan Broz8f0009a2017-03-16 15:39:44 +01001417 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
Milan Brozef43aa32017-01-04 20:23:54 +01001418
1419 if (cc->iv_gen_ops) {
1420 /* For READs use IV stored in integrity metadata */
1421 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1422 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1423 } else {
1424 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1425 if (r < 0)
1426 return r;
Milan Brozbbb16582020-01-03 09:20:22 +01001427 /* Data can be already preprocessed in generator */
1428 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1429 sg_in = sg_out;
Milan Brozef43aa32017-01-04 20:23:54 +01001430 /* Store generated IV in integrity metadata */
1431 if (cc->integrity_iv_size)
1432 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1433 }
1434 /* Working copy of IV, to be modified in crypto API */
1435 memcpy(iv, org_iv, cc->iv_size);
1436 }
1437
Milan Broz8f0009a2017-03-16 15:39:44 +01001438 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001439
1440 if (bio_data_dir(ctx->bio_in) == WRITE)
Herbert Xubbdb23b2016-01-24 21:16:36 +08001441 r = crypto_skcipher_encrypt(req);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001442 else
Herbert Xubbdb23b2016-01-24 21:16:36 +08001443 r = crypto_skcipher_decrypt(req);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001444
Milan Broz2dc53272011-01-13 19:59:54 +00001445 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
Milan Brozef43aa32017-01-04 20:23:54 +01001446 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1447
Milan Broz8f0009a2017-03-16 15:39:44 +01001448 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1449 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
Milan Broz2dc53272011-01-13 19:59:54 +00001450
Milan Broz3a7f6c92008-02-08 02:11:14 +00001451 return r;
Milan Broz01482b72008-02-08 02:11:04 +00001452}
1453
Milan Broz95497a92008-02-08 02:11:12 +00001454static void kcryptd_async_done(struct crypto_async_request *async_req,
1455 int error);
Andi Kleenc0297722011-01-13 19:59:53 +00001456
Milan Brozef43aa32017-01-04 20:23:54 +01001457static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1458 struct convert_context *ctx)
Milan Brozddd42ed2008-02-08 02:11:07 +00001459{
Mikulas Patockac66029f2012-07-27 15:08:05 +01001460 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
Andi Kleenc0297722011-01-13 19:59:53 +00001461
Milan Brozef43aa32017-01-04 20:23:54 +01001462 if (!ctx->r.req)
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001463 ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
Andi Kleenc0297722011-01-13 19:59:53 +00001464
Milan Brozef43aa32017-01-04 20:23:54 +01001465 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
Milan Broz54cea3f2015-05-15 17:00:25 +02001466
1467 /*
1468 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1469 * requests if driver request queue is full.
1470 */
Milan Brozef43aa32017-01-04 20:23:54 +01001471 skcipher_request_set_callback(ctx->r.req,
Mikulas Patocka432061b2018-09-05 09:17:45 -04001472 CRYPTO_TFM_REQ_MAY_BACKLOG,
Milan Brozef43aa32017-01-04 20:23:54 +01001473 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
Milan Brozddd42ed2008-02-08 02:11:07 +00001474}
1475
Milan Brozef43aa32017-01-04 20:23:54 +01001476static void crypt_alloc_req_aead(struct crypt_config *cc,
1477 struct convert_context *ctx)
1478{
1479 if (!ctx->r.req_aead)
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001480 ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
Milan Brozef43aa32017-01-04 20:23:54 +01001481
1482 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1483
1484 /*
1485 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1486 * requests if driver request queue is full.
1487 */
1488 aead_request_set_callback(ctx->r.req_aead,
Mikulas Patocka432061b2018-09-05 09:17:45 -04001489 CRYPTO_TFM_REQ_MAY_BACKLOG,
Milan Brozef43aa32017-01-04 20:23:54 +01001490 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1491}
1492
1493static void crypt_alloc_req(struct crypt_config *cc,
1494 struct convert_context *ctx)
1495{
Milan Broz33d2f092017-03-16 15:39:40 +01001496 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01001497 crypt_alloc_req_aead(cc, ctx);
1498 else
1499 crypt_alloc_req_skcipher(cc, ctx);
1500}
1501
1502static void crypt_free_req_skcipher(struct crypt_config *cc,
1503 struct skcipher_request *req, struct bio *base_bio)
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001504{
1505 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1506
Herbert Xubbdb23b2016-01-24 21:16:36 +08001507 if ((struct skcipher_request *)(io + 1) != req)
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001508 mempool_free(req, &cc->req_pool);
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001509}
1510
Milan Brozef43aa32017-01-04 20:23:54 +01001511static void crypt_free_req_aead(struct crypt_config *cc,
1512 struct aead_request *req, struct bio *base_bio)
1513{
1514 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1515
1516 if ((struct aead_request *)(io + 1) != req)
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001517 mempool_free(req, &cc->req_pool);
Milan Brozef43aa32017-01-04 20:23:54 +01001518}
1519
1520static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1521{
Milan Broz33d2f092017-03-16 15:39:40 +01001522 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01001523 crypt_free_req_aead(cc, req, base_bio);
1524 else
1525 crypt_free_req_skcipher(cc, req, base_bio);
1526}
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528/*
1529 * Encrypt / decrypt data from one bio to another one (can be the same one)
1530 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001531static blk_status_t crypt_convert(struct crypt_config *cc,
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01001532 struct convert_context *ctx, bool atomic)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533{
Milan Brozef43aa32017-01-04 20:23:54 +01001534 unsigned int tag_offset = 0;
Mikulas Patockaff3af922017-03-23 10:23:14 -04001535 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
Milan Broz3f1e9072008-03-28 14:16:07 -07001536 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
Mikulas Patocka40b62292012-07-27 15:08:04 +01001538 atomic_set(&ctx->cc_pending, 1);
Milan Brozc8081612008-10-10 13:37:08 +01001539
Kent Overstreet003b5c52013-10-11 15:45:43 -07001540 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
Milan Broz3a7f6c92008-02-08 02:11:14 +00001542 crypt_alloc_req(cc, ctx);
Mikulas Patocka40b62292012-07-27 15:08:04 +01001543 atomic_inc(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -07001544
Milan Broz33d2f092017-03-16 15:39:40 +01001545 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01001546 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1547 else
1548 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001549
1550 switch (r) {
Milan Broz54cea3f2015-05-15 17:00:25 +02001551 /*
1552 * The request was queued by a crypto driver
1553 * but the driver request queue is full, let's wait.
1554 */
Milan Broz3a7f6c92008-02-08 02:11:14 +00001555 case -EBUSY:
1556 wait_for_completion(&ctx->restart);
Wolfram Sang16735d02013-11-14 14:32:02 -08001557 reinit_completion(&ctx->restart);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001558 fallthrough;
Milan Broz54cea3f2015-05-15 17:00:25 +02001559 /*
1560 * The request is queued and processed asynchronously,
1561 * completion function kcryptd_async_done() will be called.
1562 */
Rabin Vincentc0403ec2015-05-05 15:15:56 +02001563 case -EINPROGRESS:
Milan Brozef43aa32017-01-04 20:23:54 +01001564 ctx->r.req = NULL;
Milan Broz8f0009a2017-03-16 15:39:44 +01001565 ctx->cc_sector += sector_step;
Mikulas Patocka583fe742017-04-18 16:51:54 -04001566 tag_offset++;
Milan Broz3a7f6c92008-02-08 02:11:14 +00001567 continue;
Milan Broz54cea3f2015-05-15 17:00:25 +02001568 /*
1569 * The request was already processed (synchronously).
1570 */
Milan Broz3f1e9072008-03-28 14:16:07 -07001571 case 0:
Mikulas Patocka40b62292012-07-27 15:08:04 +01001572 atomic_dec(&ctx->cc_pending);
Milan Broz8f0009a2017-03-16 15:39:44 +01001573 ctx->cc_sector += sector_step;
Mikulas Patocka583fe742017-04-18 16:51:54 -04001574 tag_offset++;
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01001575 if (!atomic)
1576 cond_resched();
Milan Broz3f1e9072008-03-28 14:16:07 -07001577 continue;
Milan Brozef43aa32017-01-04 20:23:54 +01001578 /*
1579 * There was a data integrity error.
1580 */
1581 case -EBADMSG:
1582 atomic_dec(&ctx->cc_pending);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001583 return BLK_STS_PROTECTION;
Milan Brozef43aa32017-01-04 20:23:54 +01001584 /*
1585 * There was an error while processing the request.
1586 */
Milan Broz3f1e9072008-03-28 14:16:07 -07001587 default:
Mikulas Patocka40b62292012-07-27 15:08:04 +01001588 atomic_dec(&ctx->cc_pending);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001589 return BLK_STS_IOERR;
Milan Broz3f1e9072008-03-28 14:16:07 -07001590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 }
1592
Milan Broz3f1e9072008-03-28 14:16:07 -07001593 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594}
1595
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001596static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598/*
1599 * Generate a new unfragmented bio with the given size
Mike Snitzer586b2862015-09-09 21:34:51 -04001600 * This should never violate the device limitations (but only because
1601 * max_segment_size is being constrained to PAGE_SIZE).
Mikulas Patocka7145c242015-02-13 08:24:41 -05001602 *
1603 * This function may be called concurrently. If we allocate from the mempool
1604 * concurrently, there is a possibility of deadlock. For example, if we have
1605 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
1606 * the mempool concurrently, it may deadlock in a situation where both processes
1607 * have allocated 128 pages and the mempool is exhausted.
1608 *
1609 * In order to avoid this scenario we allocate the pages under a mutex.
1610 *
1611 * In order to not degrade performance with excessive locking, we try
1612 * non-blocking allocations without a mutex first but on failure we fallback
1613 * to blocking allocations with a mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 */
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001615static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001617 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001618 struct bio *clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Mikulas Patocka7145c242015-02-13 08:24:41 -05001620 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1621 unsigned i, len, remaining_size;
Milan Broz91e10622007-12-13 14:16:10 +00001622 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
Mikulas Patocka7145c242015-02-13 08:24:41 -05001624retry:
Mel Gormand0164ad2015-11-06 16:28:21 -08001625 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
Mikulas Patocka7145c242015-02-13 08:24:41 -05001626 mutex_lock(&cc->bio_alloc_lock);
1627
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001628 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
Milan Broz8b004452006-10-03 01:15:37 -07001629 if (!clone)
Milan Brozef43aa32017-01-04 20:23:54 +01001630 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
Olaf Kirch027581f2007-05-09 02:32:52 -07001632 clone_init(io, clone);
Milan Broz6a24c712006-10-03 01:15:40 -07001633
Mikulas Patocka7145c242015-02-13 08:24:41 -05001634 remaining_size = size;
1635
Olaf Kirchf97380b2007-05-09 02:32:54 -07001636 for (i = 0; i < nr_iovecs; i++) {
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001637 page = mempool_alloc(&cc->page_pool, gfp_mask);
Mikulas Patocka7145c242015-02-13 08:24:41 -05001638 if (!page) {
1639 crypt_free_buffer_pages(cc, clone);
1640 bio_put(clone);
Mel Gormand0164ad2015-11-06 16:28:21 -08001641 gfp_mask |= __GFP_DIRECT_RECLAIM;
Mikulas Patocka7145c242015-02-13 08:24:41 -05001642 goto retry;
1643 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Mikulas Patocka7145c242015-02-13 08:24:41 -05001645 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
Ming Lei0dae7fe2016-10-29 16:08:06 +08001647 bio_add_page(clone, page, len, 0);
Milan Broz91e10622007-12-13 14:16:10 +00001648
Mikulas Patocka7145c242015-02-13 08:24:41 -05001649 remaining_size -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 }
1651
Milan Brozef43aa32017-01-04 20:23:54 +01001652 /* Allocate space for integrity tags */
1653 if (dm_crypt_integrity_io_alloc(io, clone)) {
1654 crypt_free_buffer_pages(cc, clone);
1655 bio_put(clone);
1656 clone = NULL;
1657 }
1658out:
Mel Gormand0164ad2015-11-06 16:28:21 -08001659 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
Mikulas Patocka7145c242015-02-13 08:24:41 -05001660 mutex_unlock(&cc->bio_alloc_lock);
1661
Milan Broz8b004452006-10-03 01:15:37 -07001662 return clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663}
1664
Neil Brown644bd2f2007-10-16 13:48:46 +02001665static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 struct bio_vec *bv;
Ming Lei6dc4f102019-02-15 19:13:19 +08001668 struct bvec_iter_all iter_all;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02001670 bio_for_each_segment_all(bv, clone, iter_all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 BUG_ON(!bv->bv_page);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001672 mempool_free(bv->bv_page, &cc->page_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 }
1674}
1675
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001676static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1677 struct bio *bio, sector_t sector)
Milan Brozdc440d1e2008-10-10 13:37:03 +01001678{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001679 io->cc = cc;
Milan Brozdc440d1e2008-10-10 13:37:03 +01001680 io->base_bio = bio;
1681 io->sector = sector;
1682 io->error = 0;
Milan Brozef43aa32017-01-04 20:23:54 +01001683 io->ctx.r.req = NULL;
1684 io->integrity_metadata = NULL;
1685 io->integrity_metadata_from_pool = false;
Mikulas Patocka40b62292012-07-27 15:08:04 +01001686 atomic_set(&io->io_pending, 0);
Milan Brozdc440d1e2008-10-10 13:37:03 +01001687}
1688
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001689static void crypt_inc_pending(struct dm_crypt_io *io)
1690{
Mikulas Patocka40b62292012-07-27 15:08:04 +01001691 atomic_inc(&io->io_pending);
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001692}
1693
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694/*
1695 * One of the bios was finished. Check for completion of
1696 * the whole request and correctly clean up the buffer.
1697 */
Milan Broz5742fd72008-02-08 02:10:43 +00001698static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001700 struct crypt_config *cc = io->cc;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001701 struct bio *base_bio = io->base_bio;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001702 blk_status_t error = io->error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Mikulas Patocka40b62292012-07-27 15:08:04 +01001704 if (!atomic_dec_and_test(&io->io_pending))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 return;
1706
Milan Brozef43aa32017-01-04 20:23:54 +01001707 if (io->ctx.r.req)
1708 crypt_free_req(cc, io->ctx.r.req, base_bio);
1709
1710 if (unlikely(io->integrity_metadata_from_pool))
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001711 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
Milan Brozef43aa32017-01-04 20:23:54 +01001712 else
1713 kfree(io->integrity_metadata);
Milan Brozb35f8ca2009-03-16 17:44:36 +00001714
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001715 base_bio->bi_status = error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001716 bio_endio(base_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717}
1718
1719/*
Milan Brozcabf08e2007-10-19 22:38:58 +01001720 * kcryptd/kcryptd_io:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 *
1722 * Needed because it would be very unwise to do decryption in an
Milan Broz23541d22006-10-03 01:15:39 -07001723 * interrupt context.
Milan Brozcabf08e2007-10-19 22:38:58 +01001724 *
1725 * kcryptd performs the actual encryption or decryption.
1726 *
1727 * kcryptd_io performs the IO submission.
1728 *
1729 * They must be separated as otherwise the final stages could be
1730 * starved by new requests which can block in the first stages due
1731 * to memory allocation.
Andi Kleenc0297722011-01-13 19:59:53 +00001732 *
1733 * The work is done per CPU global for all dm-crypt instances.
1734 * They should not depend on each other and do not block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001736static void crypt_endio(struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001737{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001738 struct dm_crypt_io *io = clone->bi_private;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001739 struct crypt_config *cc = io->cc;
Milan Brozee7a4912008-02-08 02:10:46 +00001740 unsigned rw = bio_data_dir(clone);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001741 blk_status_t error;
Milan Broz8b004452006-10-03 01:15:37 -07001742
1743 /*
NeilBrown6712ecf2007-09-27 12:47:43 +02001744 * free the processed pages
Milan Broz8b004452006-10-03 01:15:37 -07001745 */
Milan Brozee7a4912008-02-08 02:10:46 +00001746 if (rw == WRITE)
Neil Brown644bd2f2007-10-16 13:48:46 +02001747 crypt_free_buffer_pages(cc, clone);
Milan Brozee7a4912008-02-08 02:10:46 +00001748
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001749 error = clone->bi_status;
Milan Brozee7a4912008-02-08 02:10:46 +00001750 bio_put(clone);
1751
Sasha Levin9b81c842015-08-10 19:05:18 -04001752 if (rw == READ && !error) {
Milan Brozee7a4912008-02-08 02:10:46 +00001753 kcryptd_queue_crypt(io);
1754 return;
NeilBrown6712ecf2007-09-27 12:47:43 +02001755 }
Milan Broz8b004452006-10-03 01:15:37 -07001756
Sasha Levin9b81c842015-08-10 19:05:18 -04001757 if (unlikely(error))
1758 io->error = error;
Milan Broz5742fd72008-02-08 02:10:43 +00001759
1760 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001761}
1762
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001763static void clone_init(struct dm_crypt_io *io, struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001764{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001765 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001766
1767 clone->bi_private = io;
1768 clone->bi_end_io = crypt_endio;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001769 bio_set_dev(clone, cc->dev->bdev);
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001770 clone->bi_opf = io->base_bio->bi_opf;
Milan Broz8b004452006-10-03 01:15:37 -07001771}
1772
Milan Broz20c82532011-01-13 19:59:53 +00001773static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
Milan Broz8b004452006-10-03 01:15:37 -07001774{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001775 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001776 struct bio *clone;
Milan Broz93e605c2006-10-03 01:15:38 -07001777
Milan Broz8b004452006-10-03 01:15:37 -07001778 /*
Mike Snitzer59779072015-04-09 16:53:24 -04001779 * We need the original biovec array in order to decrypt
1780 * the whole bio data *afterwards* -- thanks to immutable
1781 * biovecs we don't need to worry about the block layer
1782 * modifying the biovec array; so leverage bio_clone_fast().
Milan Broz8b004452006-10-03 01:15:37 -07001783 */
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001784 clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
Jens Axboe7eaceac2011-03-10 08:52:07 +01001785 if (!clone)
Milan Broz20c82532011-01-13 19:59:53 +00001786 return 1;
Milan Broz8b004452006-10-03 01:15:37 -07001787
Milan Broz20c82532011-01-13 19:59:53 +00001788 crypt_inc_pending(io);
1789
Milan Broz8b004452006-10-03 01:15:37 -07001790 clone_init(io, clone);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001791 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz8b004452006-10-03 01:15:37 -07001792
Milan Brozef43aa32017-01-04 20:23:54 +01001793 if (dm_crypt_integrity_io_alloc(io, clone)) {
1794 crypt_dec_pending(io);
1795 bio_put(clone);
1796 return 1;
1797 }
1798
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001799 submit_bio_noacct(clone);
Milan Broz20c82532011-01-13 19:59:53 +00001800 return 0;
Milan Broz8b004452006-10-03 01:15:37 -07001801}
1802
Mikulas Patockadc267622015-02-13 08:25:59 -05001803static void kcryptd_io_read_work(struct work_struct *work)
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001804{
1805 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1806
Mikulas Patockadc267622015-02-13 08:25:59 -05001807 crypt_inc_pending(io);
1808 if (kcryptd_io_read(io, GFP_NOIO))
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001809 io->error = BLK_STS_RESOURCE;
Mikulas Patockadc267622015-02-13 08:25:59 -05001810 crypt_dec_pending(io);
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001811}
1812
Mikulas Patockadc267622015-02-13 08:25:59 -05001813static void kcryptd_queue_read(struct dm_crypt_io *io)
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001814{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001815 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001816
Mikulas Patockadc267622015-02-13 08:25:59 -05001817 INIT_WORK(&io->work, kcryptd_io_read_work);
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001818 queue_work(cc->io_queue, &io->work);
1819}
1820
Mikulas Patockadc267622015-02-13 08:25:59 -05001821static void kcryptd_io_write(struct dm_crypt_io *io)
1822{
1823 struct bio *clone = io->ctx.bio_out;
1824
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001825 submit_bio_noacct(clone);
Mikulas Patockadc267622015-02-13 08:25:59 -05001826}
1827
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001828#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1829
Mikulas Patockadc267622015-02-13 08:25:59 -05001830static int dmcrypt_write(void *data)
1831{
1832 struct crypt_config *cc = data;
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001833 struct dm_crypt_io *io;
1834
Mikulas Patockadc267622015-02-13 08:25:59 -05001835 while (1) {
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001836 struct rb_root write_tree;
Mikulas Patockadc267622015-02-13 08:25:59 -05001837 struct blk_plug plug;
1838
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001839 spin_lock_irq(&cc->write_thread_lock);
Mikulas Patockadc267622015-02-13 08:25:59 -05001840continue_locked:
1841
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001842 if (!RB_EMPTY_ROOT(&cc->write_tree))
Mikulas Patockadc267622015-02-13 08:25:59 -05001843 goto pop_from_list;
1844
Rabin Vincentf659b102016-09-21 16:22:29 +02001845 set_current_state(TASK_INTERRUPTIBLE);
Mikulas Patockadc267622015-02-13 08:25:59 -05001846
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001847 spin_unlock_irq(&cc->write_thread_lock);
Mikulas Patockadc267622015-02-13 08:25:59 -05001848
Rabin Vincentf659b102016-09-21 16:22:29 +02001849 if (unlikely(kthread_should_stop())) {
Davidlohr Bueso642fa442017-01-03 13:43:14 -08001850 set_current_state(TASK_RUNNING);
Rabin Vincentf659b102016-09-21 16:22:29 +02001851 break;
1852 }
1853
Mikulas Patockadc267622015-02-13 08:25:59 -05001854 schedule();
1855
Davidlohr Bueso642fa442017-01-03 13:43:14 -08001856 set_current_state(TASK_RUNNING);
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001857 spin_lock_irq(&cc->write_thread_lock);
Mikulas Patockadc267622015-02-13 08:25:59 -05001858 goto continue_locked;
1859
1860pop_from_list:
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001861 write_tree = cc->write_tree;
1862 cc->write_tree = RB_ROOT;
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001863 spin_unlock_irq(&cc->write_thread_lock);
Mikulas Patockadc267622015-02-13 08:25:59 -05001864
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001865 BUG_ON(rb_parent(write_tree.rb_node));
1866
1867 /*
1868 * Note: we cannot walk the tree here with rb_next because
1869 * the structures may be freed when kcryptd_io_write is called.
1870 */
Mikulas Patockadc267622015-02-13 08:25:59 -05001871 blk_start_plug(&plug);
1872 do {
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001873 io = crypt_io_from_node(rb_first(&write_tree));
1874 rb_erase(&io->rb_node, &write_tree);
Mikulas Patockadc267622015-02-13 08:25:59 -05001875 kcryptd_io_write(io);
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001876 } while (!RB_EMPTY_ROOT(&write_tree));
Mikulas Patockadc267622015-02-13 08:25:59 -05001877 blk_finish_plug(&plug);
1878 }
1879 return 0;
1880}
1881
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001882static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
Milan Broz4e4eef62008-02-08 02:10:49 +00001883{
Milan Brozdec1ced2008-02-08 02:10:57 +00001884 struct bio *clone = io->ctx.bio_out;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001885 struct crypt_config *cc = io->cc;
Mikulas Patockadc267622015-02-13 08:25:59 -05001886 unsigned long flags;
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001887 sector_t sector;
1888 struct rb_node **rbp, *parent;
Milan Brozdec1ced2008-02-08 02:10:57 +00001889
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001890 if (unlikely(io->error)) {
Milan Brozdec1ced2008-02-08 02:10:57 +00001891 crypt_free_buffer_pages(cc, clone);
1892 bio_put(clone);
Milan Broz6c031f42008-10-10 13:37:06 +01001893 crypt_dec_pending(io);
Milan Brozdec1ced2008-02-08 02:10:57 +00001894 return;
1895 }
1896
1897 /* crypt_convert should have filled the clone bio */
Kent Overstreet003b5c52013-10-11 15:45:43 -07001898 BUG_ON(io->ctx.iter_out.bi_size);
Milan Brozdec1ced2008-02-08 02:10:57 +00001899
Kent Overstreet4f024f32013-10-11 15:44:27 -07001900 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz899c95d2008-02-08 02:11:02 +00001901
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01001902 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1903 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001904 submit_bio_noacct(clone);
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05001905 return;
1906 }
1907
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001908 spin_lock_irqsave(&cc->write_thread_lock, flags);
1909 if (RB_EMPTY_ROOT(&cc->write_tree))
1910 wake_up_process(cc->write_thread);
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001911 rbp = &cc->write_tree.rb_node;
1912 parent = NULL;
1913 sector = io->sector;
1914 while (*rbp) {
1915 parent = *rbp;
1916 if (sector < crypt_io_from_node(parent)->sector)
1917 rbp = &(*rbp)->rb_left;
1918 else
1919 rbp = &(*rbp)->rb_right;
1920 }
1921 rb_link_node(&io->rb_node, parent, rbp);
1922 rb_insert_color(&io->rb_node, &cc->write_tree);
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001923 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
Milan Broz4e4eef62008-02-08 02:10:49 +00001924}
1925
Damien Le Moal8e225f02020-07-08 18:28:08 +09001926static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1927 struct convert_context *ctx)
1928
1929{
1930 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
1931 return false;
1932
1933 /*
1934 * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
1935 * constraints so they do not need to be issued inline by
1936 * kcryptd_crypt_write_convert().
1937 */
1938 switch (bio_op(ctx->bio_in)) {
1939 case REQ_OP_WRITE:
1940 case REQ_OP_WRITE_SAME:
1941 case REQ_OP_WRITE_ZEROES:
1942 return true;
1943 default:
1944 return false;
1945 }
1946}
1947
Milan Brozfc5a5e92008-10-10 13:37:04 +01001948static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001949{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001950 struct crypt_config *cc = io->cc;
Damien Le Moal8e225f02020-07-08 18:28:08 +09001951 struct convert_context *ctx = &io->ctx;
Milan Broz8b004452006-10-03 01:15:37 -07001952 struct bio *clone;
Milan Brozc8081612008-10-10 13:37:08 +01001953 int crypt_finished;
Milan Brozb635b002008-10-21 17:45:00 +01001954 sector_t sector = io->sector;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001955 blk_status_t r;
Milan Broz8b004452006-10-03 01:15:37 -07001956
Milan Broz93e605c2006-10-03 01:15:38 -07001957 /*
Milan Brozfc5a5e92008-10-10 13:37:04 +01001958 * Prevent io from disappearing until this function completes.
1959 */
1960 crypt_inc_pending(io);
Damien Le Moal8e225f02020-07-08 18:28:08 +09001961 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
Milan Brozfc5a5e92008-10-10 13:37:04 +01001962
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001963 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1964 if (unlikely(!clone)) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001965 io->error = BLK_STS_IOERR;
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001966 goto dec;
Milan Broz8b004452006-10-03 01:15:37 -07001967 }
Milan Broz899c95d2008-02-08 02:11:02 +00001968
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001969 io->ctx.bio_out = clone;
1970 io->ctx.iter_out = clone->bi_iter;
1971
1972 sector += bio_sectors(clone);
1973
1974 crypt_inc_pending(io);
Damien Le Moal8e225f02020-07-08 18:28:08 +09001975 r = crypt_convert(cc, ctx,
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01001976 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001977 if (r)
Milan Brozef43aa32017-01-04 20:23:54 +01001978 io->error = r;
Damien Le Moal8e225f02020-07-08 18:28:08 +09001979 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
1980 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
1981 /* Wait for completion signaled by kcryptd_async_done() */
1982 wait_for_completion(&ctx->restart);
1983 crypt_finished = 1;
1984 }
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001985
1986 /* Encryption was already finished, submit io now */
1987 if (crypt_finished) {
1988 kcryptd_crypt_write_io_submit(io, 0);
1989 io->sector = sector;
1990 }
1991
1992dec:
Milan Broz899c95d2008-02-08 02:11:02 +00001993 crypt_dec_pending(io);
Milan Broz84131db2008-02-08 02:10:59 +00001994}
1995
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001996static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
Milan Broz5742fd72008-02-08 02:10:43 +00001997{
Milan Broz5742fd72008-02-08 02:10:43 +00001998 crypt_dec_pending(io);
1999}
2000
Milan Broz4e4eef62008-02-08 02:10:49 +00002001static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07002002{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01002003 struct crypt_config *cc = io->cc;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002004 blk_status_t r;
Milan Broz8b004452006-10-03 01:15:37 -07002005
Milan Broz3e1a8bd2008-10-10 13:37:02 +01002006 crypt_inc_pending(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00002007
Milan Broz53017032008-02-08 02:10:38 +00002008 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
Milan Broz0c395b02008-02-08 02:10:54 +00002009 io->sector);
Milan Broz8b004452006-10-03 01:15:37 -07002010
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01002011 r = crypt_convert(cc, &io->ctx,
2012 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002013 if (r)
Milan Brozef43aa32017-01-04 20:23:54 +01002014 io->error = r;
Milan Broz5742fd72008-02-08 02:10:43 +00002015
Mikulas Patocka40b62292012-07-27 15:08:04 +01002016 if (atomic_dec_and_test(&io->ctx.cc_pending))
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01002017 kcryptd_crypt_read_done(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00002018
2019 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07002020}
2021
Milan Broz95497a92008-02-08 02:11:12 +00002022static void kcryptd_async_done(struct crypto_async_request *async_req,
2023 int error)
2024{
Huang Yingb2174ee2009-03-16 17:44:33 +00002025 struct dm_crypt_request *dmreq = async_req->data;
2026 struct convert_context *ctx = dmreq->ctx;
Milan Broz95497a92008-02-08 02:11:12 +00002027 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01002028 struct crypt_config *cc = io->cc;
Milan Broz95497a92008-02-08 02:11:12 +00002029
Milan Broz54cea3f2015-05-15 17:00:25 +02002030 /*
2031 * A request from crypto driver backlog is going to be processed now,
2032 * finish the completion and continue in crypt_convert().
2033 * (Callback will be called for the second time for this request.)
2034 */
Rabin Vincentc0403ec2015-05-05 15:15:56 +02002035 if (error == -EINPROGRESS) {
2036 complete(&ctx->restart);
Milan Broz95497a92008-02-08 02:11:12 +00002037 return;
Rabin Vincentc0403ec2015-05-05 15:15:56 +02002038 }
Milan Broz95497a92008-02-08 02:11:12 +00002039
Milan Broz2dc53272011-01-13 19:59:54 +00002040 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
Milan Brozef43aa32017-01-04 20:23:54 +01002041 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
Milan Broz2dc53272011-01-13 19:59:54 +00002042
Milan Brozef43aa32017-01-04 20:23:54 +01002043 if (error == -EBADMSG) {
Milan Brozf7101262019-05-15 16:22:30 +02002044 char b[BDEVNAME_SIZE];
2045 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
Milan Brozef43aa32017-01-04 20:23:54 +01002046 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002047 io->error = BLK_STS_PROTECTION;
Milan Brozef43aa32017-01-04 20:23:54 +01002048 } else if (error < 0)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002049 io->error = BLK_STS_IOERR;
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01002050
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04002051 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
Milan Broz95497a92008-02-08 02:11:12 +00002052
Mikulas Patocka40b62292012-07-27 15:08:04 +01002053 if (!atomic_dec_and_test(&ctx->cc_pending))
Rabin Vincentc0403ec2015-05-05 15:15:56 +02002054 return;
Milan Broz95497a92008-02-08 02:11:12 +00002055
Damien Le Moal8e225f02020-07-08 18:28:08 +09002056 /*
2057 * The request is fully completed: for inline writes, let
2058 * kcryptd_crypt_write_convert() do the IO submission.
2059 */
2060 if (bio_data_dir(io->base_bio) == READ) {
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01002061 kcryptd_crypt_read_done(io);
Damien Le Moal8e225f02020-07-08 18:28:08 +09002062 return;
2063 }
2064
2065 if (kcryptd_crypt_write_inline(cc, ctx)) {
2066 complete(&ctx->restart);
2067 return;
2068 }
2069
2070 kcryptd_crypt_write_io_submit(io, 1);
Milan Broz95497a92008-02-08 02:11:12 +00002071}
2072
Milan Broz4e4eef62008-02-08 02:10:49 +00002073static void kcryptd_crypt(struct work_struct *work)
2074{
2075 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2076
2077 if (bio_data_dir(io->base_bio) == READ)
2078 kcryptd_crypt_read_convert(io);
2079 else
2080 kcryptd_crypt_write_convert(io);
Milan Broz8b004452006-10-03 01:15:37 -07002081}
2082
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01002083static void kcryptd_crypt_tasklet(unsigned long work)
2084{
2085 kcryptd_crypt((struct work_struct *)work);
2086}
2087
Alasdair G Kergon395b1672008-02-08 02:10:52 +00002088static void kcryptd_queue_crypt(struct dm_crypt_io *io)
2089{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01002090 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00002091
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01002092 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2093 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2094 if (in_irq()) {
2095 /* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
2096 tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2097 tasklet_schedule(&io->tasklet);
2098 return;
2099 }
2100
2101 kcryptd_crypt(&io->work);
2102 return;
2103 }
2104
Alasdair G Kergon395b1672008-02-08 02:10:52 +00002105 INIT_WORK(&io->work, kcryptd_crypt);
2106 queue_work(cc->crypt_queue, &io->work);
2107}
2108
Milan Brozef43aa32017-01-04 20:23:54 +01002109static void crypt_free_tfms_aead(struct crypt_config *cc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110{
Milan Brozef43aa32017-01-04 20:23:54 +01002111 if (!cc->cipher_tfm.tfms_aead)
2112 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Milan Brozef43aa32017-01-04 20:23:54 +01002114 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2115 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2116 cc->cipher_tfm.tfms_aead[0] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 }
2118
Milan Brozef43aa32017-01-04 20:23:54 +01002119 kfree(cc->cipher_tfm.tfms_aead);
2120 cc->cipher_tfm.tfms_aead = NULL;
2121}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
Milan Brozef43aa32017-01-04 20:23:54 +01002123static void crypt_free_tfms_skcipher(struct crypt_config *cc)
Milan Brozd1f96422011-01-13 19:59:54 +00002124{
Milan Brozd1f96422011-01-13 19:59:54 +00002125 unsigned i;
2126
Milan Brozef43aa32017-01-04 20:23:54 +01002127 if (!cc->cipher_tfm.tfms)
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002128 return;
2129
Milan Brozd1f96422011-01-13 19:59:54 +00002130 for (i = 0; i < cc->tfms_count; i++)
Milan Brozef43aa32017-01-04 20:23:54 +01002131 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2132 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2133 cc->cipher_tfm.tfms[i] = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00002134 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002135
Milan Brozef43aa32017-01-04 20:23:54 +01002136 kfree(cc->cipher_tfm.tfms);
2137 cc->cipher_tfm.tfms = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138}
2139
2140static void crypt_free_tfms(struct crypt_config *cc)
2141{
Milan Broz33d2f092017-03-16 15:39:40 +01002142 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01002143 crypt_free_tfms_aead(cc);
2144 else
2145 crypt_free_tfms_skcipher(cc);
Milan Brozd1f96422011-01-13 19:59:54 +00002146}
2147
Milan Brozef43aa32017-01-04 20:23:54 +01002148static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
Milan Brozd1f96422011-01-13 19:59:54 +00002149{
Milan Brozd1f96422011-01-13 19:59:54 +00002150 unsigned i;
2151 int err;
2152
Kees Cook6396bb22018-06-12 14:03:40 -07002153 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2154 sizeof(struct crypto_skcipher *),
2155 GFP_KERNEL);
Milan Brozef43aa32017-01-04 20:23:54 +01002156 if (!cc->cipher_tfm.tfms)
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002157 return -ENOMEM;
2158
Milan Brozd1f96422011-01-13 19:59:54 +00002159 for (i = 0; i < cc->tfms_count; i++) {
Mikulas Patockacd746932020-07-09 23:20:42 -07002160 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2161 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Brozef43aa32017-01-04 20:23:54 +01002162 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2163 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002164 crypt_free_tfms(cc);
Milan Brozd1f96422011-01-13 19:59:54 +00002165 return err;
2166 }
2167 }
2168
Eric Biggersaf331eb2018-12-05 20:53:00 -08002169 /*
2170 * dm-crypt performance can vary greatly depending on which crypto
2171 * algorithm implementation is used. Help people debug performance
2172 * problems by logging the ->cra_driver_name.
2173 */
Milan Broz7a1cd722019-05-15 16:23:43 +02002174 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
Eric Biggersaf331eb2018-12-05 20:53:00 -08002175 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
Milan Brozd1f96422011-01-13 19:59:54 +00002176 return 0;
2177}
2178
Milan Brozef43aa32017-01-04 20:23:54 +01002179static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2180{
Milan Brozef43aa32017-01-04 20:23:54 +01002181 int err;
2182
2183 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2184 if (!cc->cipher_tfm.tfms)
2185 return -ENOMEM;
2186
Mikulas Patockacd746932020-07-09 23:20:42 -07002187 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2188 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Brozef43aa32017-01-04 20:23:54 +01002189 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2190 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2191 crypt_free_tfms(cc);
2192 return err;
2193 }
2194
Milan Broz7a1cd722019-05-15 16:23:43 +02002195 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
Eric Biggersaf331eb2018-12-05 20:53:00 -08002196 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
Milan Brozef43aa32017-01-04 20:23:54 +01002197 return 0;
2198}
2199
2200static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2201{
Milan Broz33d2f092017-03-16 15:39:40 +01002202 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01002203 return crypt_alloc_tfms_aead(cc, ciphermode);
2204 else
2205 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2206}
2207
2208static unsigned crypt_subkey_size(struct crypt_config *cc)
2209{
2210 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2211}
2212
2213static unsigned crypt_authenckey_size(struct crypt_config *cc)
2214{
2215 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2216}
2217
2218/*
2219 * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
2220 * the key must be for some reason in special format.
2221 * This funcion converts cc->key to this special format.
2222 */
2223static void crypt_copy_authenckey(char *p, const void *key,
2224 unsigned enckeylen, unsigned authkeylen)
2225{
2226 struct crypto_authenc_key_param *param;
2227 struct rtattr *rta;
2228
2229 rta = (struct rtattr *)p;
2230 param = RTA_DATA(rta);
2231 param->enckeylen = cpu_to_be32(enckeylen);
2232 rta->rta_len = RTA_LENGTH(sizeof(*param));
2233 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
2234 p += RTA_SPACE(sizeof(*param));
2235 memcpy(p, key + enckeylen, authkeylen);
2236 p += authkeylen;
2237 memcpy(p, key, enckeylen);
2238}
2239
Mikulas Patocka671ea6b2016-08-25 07:12:54 -04002240static int crypt_setkey(struct crypt_config *cc)
Andi Kleenc0297722011-01-13 19:59:53 +00002241{
Milan Brozda31a072013-10-28 23:21:03 +01002242 unsigned subkey_size;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002243 int err = 0, i, r;
Andi Kleenc0297722011-01-13 19:59:53 +00002244
Milan Brozda31a072013-10-28 23:21:03 +01002245 /* Ignore extra keys (which are used for IV etc) */
Milan Brozef43aa32017-01-04 20:23:54 +01002246 subkey_size = crypt_subkey_size(cc);
Milan Brozda31a072013-10-28 23:21:03 +01002247
Milan Broz27c70032018-01-03 22:48:59 +01002248 if (crypt_integrity_hmac(cc)) {
2249 if (subkey_size < cc->key_mac_size)
2250 return -EINVAL;
2251
Milan Brozef43aa32017-01-04 20:23:54 +01002252 crypt_copy_authenckey(cc->authenc_key, cc->key,
2253 subkey_size - cc->key_mac_size,
2254 cc->key_mac_size);
Milan Broz27c70032018-01-03 22:48:59 +01002255 }
2256
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002257 for (i = 0; i < cc->tfms_count; i++) {
Milan Broz33d2f092017-03-16 15:39:40 +01002258 if (crypt_integrity_hmac(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01002259 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2260 cc->authenc_key, crypt_authenckey_size(cc));
Milan Broz33d2f092017-03-16 15:39:40 +01002261 else if (crypt_integrity_aead(cc))
2262 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2263 cc->key + (i * subkey_size),
2264 subkey_size);
Milan Brozef43aa32017-01-04 20:23:54 +01002265 else
2266 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2267 cc->key + (i * subkey_size),
2268 subkey_size);
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002269 if (r)
2270 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +00002271 }
2272
Milan Brozef43aa32017-01-04 20:23:54 +01002273 if (crypt_integrity_hmac(cc))
2274 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2275
Andi Kleenc0297722011-01-13 19:59:53 +00002276 return err;
2277}
2278
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002279#ifdef CONFIG_KEYS
2280
Ondrej Kozina027c4312016-12-01 18:20:52 +01002281static bool contains_whitespace(const char *str)
2282{
2283 while (*str)
2284 if (isspace(*str++))
2285 return true;
2286 return false;
2287}
2288
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002289static int set_key_user(struct crypt_config *cc, struct key *key)
2290{
2291 const struct user_key_payload *ukp;
2292
2293 ukp = user_key_payload_locked(key);
2294 if (!ukp)
2295 return -EKEYREVOKED;
2296
2297 if (cc->key_size != ukp->datalen)
2298 return -EINVAL;
2299
2300 memcpy(cc->key, ukp->data, cc->key_size);
2301
2302 return 0;
2303}
2304
2305#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
2306static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2307{
2308 const struct encrypted_key_payload *ekp;
2309
2310 ekp = key->payload.data[0];
2311 if (!ekp)
2312 return -EKEYREVOKED;
2313
2314 if (cc->key_size != ekp->decrypted_datalen)
2315 return -EINVAL;
2316
2317 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2318
2319 return 0;
2320}
2321#endif /* CONFIG_ENCRYPTED_KEYS */
2322
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002323static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2324{
2325 char *new_key_string, *key_desc;
2326 int ret;
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002327 struct key_type *type;
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002328 struct key *key;
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002329 int (*set_key)(struct crypt_config *cc, struct key *key);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002330
Ondrej Kozina027c4312016-12-01 18:20:52 +01002331 /*
2332 * Reject key_string with whitespace. dm core currently lacks code for
2333 * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
2334 */
2335 if (contains_whitespace(key_string)) {
2336 DMERR("whitespace chars not allowed in key string");
2337 return -EINVAL;
2338 }
2339
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002340 /* look for next ':' separating key_type from key_description */
2341 key_desc = strpbrk(key_string, ":");
2342 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2343 return -EINVAL;
2344
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002345 if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2346 type = &key_type_logon;
2347 set_key = set_key_user;
2348 } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2349 type = &key_type_user;
2350 set_key = set_key_user;
2351#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
2352 } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
2353 type = &key_type_encrypted;
2354 set_key = set_key_encrypted;
2355#endif
2356 } else {
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002357 return -EINVAL;
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002358 }
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002359
2360 new_key_string = kstrdup(key_string, GFP_KERNEL);
2361 if (!new_key_string)
2362 return -ENOMEM;
2363
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002364 key = request_key(type, key_desc + 1, NULL);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002365 if (IS_ERR(key)) {
Waiman Long453431a2020-08-06 23:18:13 -07002366 kfree_sensitive(new_key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002367 return PTR_ERR(key);
2368 }
2369
Ondrej Kozinaf5b0cba2017-01-31 15:47:11 +01002370 down_read(&key->sem);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002371
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002372 ret = set_key(cc, key);
2373 if (ret < 0) {
Ondrej Kozinaf5b0cba2017-01-31 15:47:11 +01002374 up_read(&key->sem);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002375 key_put(key);
Waiman Long453431a2020-08-06 23:18:13 -07002376 kfree_sensitive(new_key_string);
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002377 return ret;
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002378 }
2379
Ondrej Kozinaf5b0cba2017-01-31 15:47:11 +01002380 up_read(&key->sem);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002381 key_put(key);
2382
2383 /* clear the flag since following operations may invalidate previously valid key */
2384 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2385
2386 ret = crypt_setkey(cc);
2387
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002388 if (!ret) {
2389 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
Waiman Long453431a2020-08-06 23:18:13 -07002390 kfree_sensitive(cc->key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002391 cc->key_string = new_key_string;
2392 } else
Waiman Long453431a2020-08-06 23:18:13 -07002393 kfree_sensitive(new_key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002394
2395 return ret;
2396}
2397
2398static int get_key_size(char **key_string)
2399{
2400 char *colon, dummy;
2401 int ret;
2402
2403 if (*key_string[0] != ':')
2404 return strlen(*key_string) >> 1;
2405
2406 /* look for next ':' in key string */
2407 colon = strpbrk(*key_string + 1, ":");
2408 if (!colon)
2409 return -EINVAL;
2410
2411 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2412 return -EINVAL;
2413
2414 *key_string = colon;
2415
2416 /* remaining key string should be :<logon|user>:<key_desc> */
2417
2418 return ret;
2419}
2420
2421#else
2422
2423static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2424{
2425 return -EINVAL;
2426}
2427
2428static int get_key_size(char **key_string)
2429{
2430 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2431}
2432
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002433#endif /* CONFIG_KEYS */
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002434
Milan Broze48d4bb2006-10-03 01:15:37 -07002435static int crypt_set_key(struct crypt_config *cc, char *key)
2436{
Milan Brozde8be5a2011-03-24 13:54:27 +00002437 int r = -EINVAL;
2438 int key_string_len = strlen(key);
2439
Milan Broz69a8cfc2011-01-13 19:59:49 +00002440 /* Hyphen (which gives a key_size of zero) means there is no key. */
2441 if (!cc->key_size && strcmp(key, "-"))
Milan Brozde8be5a2011-03-24 13:54:27 +00002442 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07002443
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002444 /* ':' means the key is in kernel keyring, short-circuit normal key processing */
2445 if (key[0] == ':') {
2446 r = crypt_set_keyring_key(cc, key + 1);
2447 goto out;
2448 }
2449
Ondrej Kozina265e9092016-11-02 15:02:08 +01002450 /* clear the flag since following operations may invalidate previously valid key */
2451 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2452
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002453 /* wipe references to any kernel keyring key */
Waiman Long453431a2020-08-06 23:18:13 -07002454 kfree_sensitive(cc->key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002455 cc->key_string = NULL;
2456
Andy Shevchenkoe944e032017-04-27 16:52:04 +03002457 /* Decode key from its hex representation. */
2458 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
Milan Brozde8be5a2011-03-24 13:54:27 +00002459 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07002460
Mikulas Patocka671ea6b2016-08-25 07:12:54 -04002461 r = crypt_setkey(cc);
Ondrej Kozina265e9092016-11-02 15:02:08 +01002462 if (!r)
2463 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
Milan Brozde8be5a2011-03-24 13:54:27 +00002464
2465out:
2466 /* Hex key string not needed after here, so wipe it. */
2467 memset(key, '0', key_string_len);
2468
2469 return r;
Milan Broze48d4bb2006-10-03 01:15:37 -07002470}
2471
2472static int crypt_wipe_key(struct crypt_config *cc)
2473{
Ondrej Kozinac82feee2017-04-24 14:21:53 +02002474 int r;
2475
Milan Broze48d4bb2006-10-03 01:15:37 -07002476 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
Ondrej Kozinac82feee2017-04-24 14:21:53 +02002477 get_random_bytes(&cc->key, cc->key_size);
Milan Broz4a52ffc2019-07-09 15:22:12 +02002478
2479 /* Wipe IV private keys */
2480 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2481 r = cc->iv_gen_ops->wipe(cc);
2482 if (r)
2483 return r;
2484 }
2485
Waiman Long453431a2020-08-06 23:18:13 -07002486 kfree_sensitive(cc->key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002487 cc->key_string = NULL;
Ondrej Kozinac82feee2017-04-24 14:21:53 +02002488 r = crypt_setkey(cc);
2489 memset(&cc->key, 0, cc->key_size * sizeof(u8));
Andi Kleenc0297722011-01-13 19:59:53 +00002490
Ondrej Kozinac82feee2017-04-24 14:21:53 +02002491 return r;
Milan Broze48d4bb2006-10-03 01:15:37 -07002492}
2493
Mikulas Patocka50593532017-08-13 22:45:08 -04002494static void crypt_calculate_pages_per_client(void)
2495{
Arun KSca79b0c2018-12-28 00:34:29 -08002496 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
Mikulas Patocka50593532017-08-13 22:45:08 -04002497
2498 if (!dm_crypt_clients_n)
2499 return;
2500
2501 pages /= dm_crypt_clients_n;
2502 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2503 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2504 dm_crypt_pages_per_client = pages;
2505}
2506
2507static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2508{
2509 struct crypt_config *cc = pool_data;
2510 struct page *page;
2511
2512 if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
2513 likely(gfp_mask & __GFP_NORETRY))
2514 return NULL;
2515
2516 page = alloc_page(gfp_mask);
2517 if (likely(page != NULL))
2518 percpu_counter_add(&cc->n_allocated_pages, 1);
2519
2520 return page;
2521}
2522
2523static void crypt_page_free(void *page, void *pool_data)
2524{
2525 struct crypt_config *cc = pool_data;
2526
2527 __free_page(page);
2528 percpu_counter_sub(&cc->n_allocated_pages, 1);
2529}
2530
Milan Broz28513fc2010-08-12 04:14:06 +01002531static void crypt_dtr(struct dm_target *ti)
2532{
2533 struct crypt_config *cc = ti->private;
2534
2535 ti->private = NULL;
2536
2537 if (!cc)
2538 return;
2539
Rabin Vincentf659b102016-09-21 16:22:29 +02002540 if (cc->write_thread)
Mikulas Patockadc267622015-02-13 08:25:59 -05002541 kthread_stop(cc->write_thread);
2542
Milan Broz28513fc2010-08-12 04:14:06 +01002543 if (cc->io_queue)
2544 destroy_workqueue(cc->io_queue);
2545 if (cc->crypt_queue)
2546 destroy_workqueue(cc->crypt_queue);
2547
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002548 crypt_free_tfms(cc);
2549
Kent Overstreet6f1c8192018-05-20 18:25:53 -04002550 bioset_exit(&cc->bs);
Milan Broz28513fc2010-08-12 04:14:06 +01002551
Kent Overstreet6f1c8192018-05-20 18:25:53 -04002552 mempool_exit(&cc->page_pool);
2553 mempool_exit(&cc->req_pool);
2554 mempool_exit(&cc->tag_pool);
2555
Kent Overstreetd00a11d2018-06-02 13:45:04 -04002556 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2557 percpu_counter_destroy(&cc->n_allocated_pages);
2558
Milan Broz28513fc2010-08-12 04:14:06 +01002559 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2560 cc->iv_gen_ops->dtr(cc);
2561
Milan Broz28513fc2010-08-12 04:14:06 +01002562 if (cc->dev)
2563 dm_put_device(ti, cc->dev);
2564
Waiman Long453431a2020-08-06 23:18:13 -07002565 kfree_sensitive(cc->cipher_string);
2566 kfree_sensitive(cc->key_string);
2567 kfree_sensitive(cc->cipher_auth);
2568 kfree_sensitive(cc->authenc_key);
Milan Broz28513fc2010-08-12 04:14:06 +01002569
Mike Snitzerd5ffebd2018-01-05 21:17:20 -05002570 mutex_destroy(&cc->bio_alloc_lock);
2571
Milan Broz28513fc2010-08-12 04:14:06 +01002572 /* Must zero key material before freeing */
Waiman Long453431a2020-08-06 23:18:13 -07002573 kfree_sensitive(cc);
Mikulas Patocka50593532017-08-13 22:45:08 -04002574
2575 spin_lock(&dm_crypt_clients_lock);
2576 WARN_ON(!dm_crypt_clients_n);
2577 dm_crypt_clients_n--;
2578 crypt_calculate_pages_per_client();
2579 spin_unlock(&dm_crypt_clients_lock);
Milan Broz28513fc2010-08-12 04:14:06 +01002580}
2581
Milan Broze889f972017-03-16 15:39:39 +01002582static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583{
Milan Broz5ebaee62010-08-12 04:14:07 +01002584 struct crypt_config *cc = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585
Milan Broz33d2f092017-03-16 15:39:40 +01002586 if (crypt_integrity_aead(cc))
Milan Broze889f972017-03-16 15:39:39 +01002587 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2588 else
2589 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590
Milan Broz5ebaee62010-08-12 04:14:07 +01002591 if (cc->iv_size)
2592 /* at least a 64 bit sector number should fit in our buffer */
2593 cc->iv_size = max(cc->iv_size,
2594 (unsigned int)(sizeof(u64) / sizeof(u8)));
2595 else if (ivmode) {
2596 DMWARN("Selected cipher does not support IVs");
2597 ivmode = NULL;
2598 }
2599
2600 /* Choose ivmode, see comments at iv code. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 if (ivmode == NULL)
2602 cc->iv_gen_ops = NULL;
2603 else if (strcmp(ivmode, "plain") == 0)
2604 cc->iv_gen_ops = &crypt_iv_plain_ops;
Milan Broz61afef62009-12-10 23:52:25 +00002605 else if (strcmp(ivmode, "plain64") == 0)
2606 cc->iv_gen_ops = &crypt_iv_plain64_ops;
Milan Broz7e3fd852017-06-06 09:07:01 +02002607 else if (strcmp(ivmode, "plain64be") == 0)
2608 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 else if (strcmp(ivmode, "essiv") == 0)
2610 cc->iv_gen_ops = &crypt_iv_essiv_ops;
Rik Snel48527fa2006-09-03 08:56:39 +10002611 else if (strcmp(ivmode, "benbi") == 0)
2612 cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel46b47732007-05-09 02:32:55 -07002613 else if (strcmp(ivmode, "null") == 0)
2614 cc->iv_gen_ops = &crypt_iv_null_ops;
Milan Brozb9411d72019-07-09 15:22:14 +02002615 else if (strcmp(ivmode, "eboiv") == 0)
2616 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
Milan Brozbbb16582020-01-03 09:20:22 +01002617 else if (strcmp(ivmode, "elephant") == 0) {
2618 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2619 cc->key_parts = 2;
2620 cc->key_extra_size = cc->key_size / 2;
2621 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2622 return -EINVAL;
2623 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2624 } else if (strcmp(ivmode, "lmk") == 0) {
Milan Broz34745782011-01-13 19:59:55 +00002625 cc->iv_gen_ops = &crypt_iv_lmk_ops;
Milan Brozed04d982013-10-28 23:21:04 +01002626 /*
2627 * Version 2 and 3 is recognised according
Milan Broz34745782011-01-13 19:59:55 +00002628 * to length of provided multi-key string.
2629 * If present (version 3), last key is used as IV seed.
Milan Brozed04d982013-10-28 23:21:04 +01002630 * All keys (including IV seed) are always the same size.
Milan Broz34745782011-01-13 19:59:55 +00002631 */
Milan Brozda31a072013-10-28 23:21:03 +01002632 if (cc->key_size % cc->key_parts) {
Milan Broz34745782011-01-13 19:59:55 +00002633 cc->key_parts++;
Milan Brozda31a072013-10-28 23:21:03 +01002634 cc->key_extra_size = cc->key_size / cc->key_parts;
2635 }
Milan Brozed04d982013-10-28 23:21:04 +01002636 } else if (strcmp(ivmode, "tcw") == 0) {
2637 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2638 cc->key_parts += 2; /* IV + whitening */
2639 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
Milan Broze889f972017-03-16 15:39:39 +01002640 } else if (strcmp(ivmode, "random") == 0) {
2641 cc->iv_gen_ops = &crypt_iv_random_ops;
2642 /* Need storage space in integrity fields. */
2643 cc->integrity_iv_size = cc->iv_size;
Milan Broz34745782011-01-13 19:59:55 +00002644 } else {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002645 ti->error = "Invalid IV mode";
Milan Broze889f972017-03-16 15:39:39 +01002646 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 }
2648
Milan Broze889f972017-03-16 15:39:39 +01002649 return 0;
2650}
2651
Milan Broz33d2f092017-03-16 15:39:40 +01002652/*
Milan Broz33d2f092017-03-16 15:39:40 +01002653 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
2654 * The HMAC is needed to calculate tag size (HMAC digest size).
2655 * This should be probably done by crypto-api calls (once available...)
2656 */
2657static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2658{
2659 char *start, *end, *mac_alg = NULL;
2660 struct crypto_ahash *mac;
2661
2662 if (!strstarts(cipher_api, "authenc("))
2663 return 0;
2664
2665 start = strchr(cipher_api, '(');
2666 end = strchr(cipher_api, ',');
2667 if (!start || !end || ++start > end)
2668 return -EINVAL;
2669
2670 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2671 if (!mac_alg)
2672 return -ENOMEM;
2673 strncpy(mac_alg, start, end - start);
2674
Mikulas Patockacd746932020-07-09 23:20:42 -07002675 mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Broz33d2f092017-03-16 15:39:40 +01002676 kfree(mac_alg);
2677
2678 if (IS_ERR(mac))
2679 return PTR_ERR(mac);
2680
2681 cc->key_mac_size = crypto_ahash_digestsize(mac);
2682 crypto_free_ahash(mac);
2683
2684 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2685 if (!cc->authenc_key)
2686 return -ENOMEM;
2687
2688 return 0;
2689}
2690
2691static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2692 char **ivmode, char **ivopts)
Milan Broz5ebaee62010-08-12 04:14:07 +01002693{
2694 struct crypt_config *cc = ti->private;
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002695 char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
Milan Broz33d2f092017-03-16 15:39:40 +01002696 int ret = -EINVAL;
2697
2698 cc->tfms_count = 1;
2699
2700 /*
2701 * New format (capi: prefix)
2702 * capi:cipher_api_spec-iv:ivopts
2703 */
2704 tmp = &cipher_in[strlen("capi:")];
Milan Broz1856b9f2019-01-09 11:57:14 +01002705
2706 /* Separate IV options if present, it can contain another '-' in hash name */
2707 *ivopts = strrchr(tmp, ':');
2708 if (*ivopts) {
2709 **ivopts = '\0';
2710 (*ivopts)++;
2711 }
2712 /* Parse IV mode */
2713 *ivmode = strrchr(tmp, '-');
2714 if (*ivmode) {
2715 **ivmode = '\0';
2716 (*ivmode)++;
2717 }
2718 /* The rest is crypto API spec */
2719 cipher_api = tmp;
Milan Broz33d2f092017-03-16 15:39:40 +01002720
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002721 /* Alloc AEAD, can be used only in new format. */
2722 if (crypt_integrity_aead(cc)) {
2723 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2724 if (ret < 0) {
2725 ti->error = "Invalid AEAD cipher spec";
2726 return -ENOMEM;
2727 }
2728 }
2729
Milan Broz33d2f092017-03-16 15:39:40 +01002730 if (*ivmode && !strcmp(*ivmode, "lmk"))
2731 cc->tfms_count = 64;
2732
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002733 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2734 if (!*ivopts) {
2735 ti->error = "Digest algorithm missing for ESSIV mode";
2736 return -EINVAL;
2737 }
2738 ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2739 cipher_api, *ivopts);
2740 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2741 ti->error = "Cannot allocate cipher string";
2742 return -ENOMEM;
2743 }
2744 cipher_api = buf;
2745 }
2746
Milan Broz33d2f092017-03-16 15:39:40 +01002747 cc->key_parts = cc->tfms_count;
2748
2749 /* Allocate cipher */
2750 ret = crypt_alloc_tfms(cc, cipher_api);
2751 if (ret < 0) {
2752 ti->error = "Error allocating crypto tfm";
2753 return ret;
2754 }
2755
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002756 if (crypt_integrity_aead(cc))
Milan Broz33d2f092017-03-16 15:39:40 +01002757 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002758 else
Milan Broz33d2f092017-03-16 15:39:40 +01002759 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2760
Milan Broz33d2f092017-03-16 15:39:40 +01002761 return 0;
2762}
2763
2764static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2765 char **ivmode, char **ivopts)
2766{
2767 struct crypt_config *cc = ti->private;
2768 char *tmp, *cipher, *chainmode, *keycount;
Milan Broz5ebaee62010-08-12 04:14:07 +01002769 char *cipher_api = NULL;
2770 int ret = -EINVAL;
2771 char dummy;
2772
Milan Broz33d2f092017-03-16 15:39:40 +01002773 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
Milan Broz5ebaee62010-08-12 04:14:07 +01002774 ti->error = "Bad cipher specification";
2775 return -EINVAL;
2776 }
2777
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 /*
Milan Broz5ebaee62010-08-12 04:14:07 +01002779 * Legacy dm-crypt cipher specification
2780 * cipher[:keycount]-mode-iv:ivopts
2781 */
2782 tmp = cipher_in;
2783 keycount = strsep(&tmp, "-");
2784 cipher = strsep(&keycount, ":");
2785
Milan Broz69a8cfc2011-01-13 19:59:49 +00002786 if (!keycount)
Milan Broz5ebaee62010-08-12 04:14:07 +01002787 cc->tfms_count = 1;
2788 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2789 !is_power_of_2(cc->tfms_count)) {
2790 ti->error = "Bad cipher key count specification";
2791 return -EINVAL;
2792 }
Milan Broz28513fc2010-08-12 04:14:06 +01002793 cc->key_parts = cc->tfms_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
Milan Brozddd42ed2008-02-08 02:11:07 +00002795 chainmode = strsep(&tmp, "-");
Milan Broz1856b9f2019-01-09 11:57:14 +01002796 *ivmode = strsep(&tmp, ":");
2797 *ivopts = tmp;
Milan Brozddd42ed2008-02-08 02:11:07 +00002798
2799 /*
2800 * For compatibility with the original dm-crypt mapping format, if
2801 * only the cipher name is supplied, use cbc-plain.
Milan Broz28513fc2010-08-12 04:14:06 +01002802 */
Milan Broz33d2f092017-03-16 15:39:40 +01002803 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
Milan Brozcabf08e2007-10-19 22:38:58 +01002804 chainmode = "cbc";
Milan Broz33d2f092017-03-16 15:39:40 +01002805 *ivmode = "plain";
Milan Brozcabf08e2007-10-19 22:38:58 +01002806 }
2807
Milan Broz33d2f092017-03-16 15:39:40 +01002808 if (strcmp(chainmode, "ecb") && !*ivmode) {
Andi Kleenc0297722011-01-13 19:59:53 +00002809 ti->error = "IV mechanism required";
2810 return -EINVAL;
2811 }
2812
Milan Brozcabf08e2007-10-19 22:38:58 +01002813 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
Milan Broz9934a8b2007-10-19 22:38:57 +01002814 if (!cipher_api)
Milan Broz28513fc2010-08-12 04:14:06 +01002815 goto bad_mem;
Milan Broz9934a8b2007-10-19 22:38:57 +01002816
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002817 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2818 if (!*ivopts) {
2819 ti->error = "Digest algorithm missing for ESSIV mode";
2820 kfree(cipher_api);
2821 return -EINVAL;
2822 }
2823 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2824 "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2825 } else {
2826 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2827 "%s(%s)", chainmode, cipher);
2828 }
2829 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 kfree(cipher_api);
Milan Broz28513fc2010-08-12 04:14:06 +01002831 goto bad_mem;
2832 }
2833
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 /* Allocate cipher */
2835 ret = crypt_alloc_tfms(cc, cipher_api);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 if (ret < 0) {
2837 ti->error = "Error allocating crypto tfm";
Milan Broz33d2f092017-03-16 15:39:40 +01002838 kfree(cipher_api);
2839 return ret;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002840 }
Jeffy Chenbd86e322017-09-27 20:28:57 +08002841 kfree(cipher_api);
Mikulas Patocka647c7db2009-06-22 10:12:23 +01002842
Milan Broz33d2f092017-03-16 15:39:40 +01002843 return 0;
2844bad_mem:
2845 ti->error = "Cannot allocate cipher strings";
2846 return -ENOMEM;
2847}
2848
2849static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2850{
2851 struct crypt_config *cc = ti->private;
2852 char *ivmode = NULL, *ivopts = NULL;
2853 int ret;
2854
2855 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
2856 if (!cc->cipher_string) {
2857 ti->error = "Cannot allocate cipher strings";
2858 return -ENOMEM;
2859 }
2860
2861 if (strstarts(cipher_in, "capi:"))
2862 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
2863 else
2864 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
2865 if (ret)
2866 return ret;
2867
Mikulas Patocka647c7db2009-06-22 10:12:23 +01002868 /* Initialize IV */
Milan Broze889f972017-03-16 15:39:39 +01002869 ret = crypt_ctr_ivmode(ti, ivmode);
2870 if (ret < 0)
Milan Broz33d2f092017-03-16 15:39:40 +01002871 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872
Milan Brozda31a072013-10-28 23:21:03 +01002873 /* Initialize and set key */
2874 ret = crypt_set_key(cc, key);
2875 if (ret < 0) {
2876 ti->error = "Error decoding and setting key";
Milan Broz33d2f092017-03-16 15:39:40 +01002877 return ret;
Milan Brozda31a072013-10-28 23:21:03 +01002878 }
2879
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 /* Allocate IV */
2881 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
2882 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
2883 if (ret < 0) {
2884 ti->error = "Error creating IV";
Milan Broz33d2f092017-03-16 15:39:40 +01002885 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 }
2887 }
2888
2889 /* Initialize IV (set keys for ESSIV etc) */
2890 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
2891 ret = cc->iv_gen_ops->init(cc);
2892 if (ret < 0) {
2893 ti->error = "Error initialising IV";
Milan Broz33d2f092017-03-16 15:39:40 +01002894 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 }
2896 }
2897
Ondrej Kozinadc949022018-01-12 16:30:32 +01002898 /* wipe the kernel key payload copy */
2899 if (cc->key_string)
2900 memset(cc->key, 0, cc->key_size * sizeof(u8));
2901
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904
Milan Brozef43aa32017-01-04 20:23:54 +01002905static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
2906{
2907 struct crypt_config *cc = ti->private;
2908 struct dm_arg_set as;
Eric Biggers5916a222017-06-22 11:32:45 -07002909 static const struct dm_arg _args[] = {
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01002910 {0, 8, "Invalid number of feature args"},
Milan Brozef43aa32017-01-04 20:23:54 +01002911 };
2912 unsigned int opt_params, val;
2913 const char *opt_string, *sval;
Milan Broz8f0009a2017-03-16 15:39:44 +01002914 char dummy;
Milan Brozef43aa32017-01-04 20:23:54 +01002915 int ret;
2916
2917 /* Optional parameters */
2918 as.argc = argc;
2919 as.argv = argv;
2920
2921 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2922 if (ret)
2923 return ret;
2924
2925 while (opt_params--) {
2926 opt_string = dm_shift_arg(&as);
2927 if (!opt_string) {
2928 ti->error = "Not enough feature arguments";
2929 return -EINVAL;
2930 }
2931
2932 if (!strcasecmp(opt_string, "allow_discards"))
2933 ti->num_discard_bios = 1;
2934
2935 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
2936 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2937
2938 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
2939 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01002940 else if (!strcasecmp(opt_string, "no_read_workqueue"))
2941 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
2942 else if (!strcasecmp(opt_string, "no_write_workqueue"))
2943 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
Milan Brozef43aa32017-01-04 20:23:54 +01002944 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
2945 if (val == 0 || val > MAX_TAG_SIZE) {
2946 ti->error = "Invalid integrity arguments";
2947 return -EINVAL;
2948 }
2949 cc->on_disk_tag_size = val;
2950 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
2951 if (!strcasecmp(sval, "aead")) {
2952 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
Milan Brozef43aa32017-01-04 20:23:54 +01002953 } else if (strcasecmp(sval, "none")) {
2954 ti->error = "Unknown integrity profile";
2955 return -EINVAL;
2956 }
2957
2958 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
2959 if (!cc->cipher_auth)
2960 return -ENOMEM;
Mikulas Patockaff3af922017-03-23 10:23:14 -04002961 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
Milan Broz8f0009a2017-03-16 15:39:44 +01002962 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
2963 cc->sector_size > 4096 ||
Mikulas Patockaff3af922017-03-23 10:23:14 -04002964 (cc->sector_size & (cc->sector_size - 1))) {
Milan Broz8f0009a2017-03-16 15:39:44 +01002965 ti->error = "Invalid feature value for sector_size";
2966 return -EINVAL;
2967 }
Milan Broz783874b2017-09-13 15:45:56 +02002968 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
2969 ti->error = "Device size is not multiple of sector_size feature";
2970 return -EINVAL;
2971 }
Mikulas Patockaff3af922017-03-23 10:23:14 -04002972 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
Milan Broz8f0009a2017-03-16 15:39:44 +01002973 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
2974 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2975 else {
Milan Brozef43aa32017-01-04 20:23:54 +01002976 ti->error = "Invalid feature arguments";
2977 return -EINVAL;
2978 }
2979 }
2980
2981 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982}
2983
Damien Le Moal8e225f02020-07-08 18:28:08 +09002984#ifdef CONFIG_BLK_DEV_ZONED
2985
2986static int crypt_report_zones(struct dm_target *ti,
2987 struct dm_report_zones_args *args, unsigned int nr_zones)
2988{
2989 struct crypt_config *cc = ti->private;
2990 sector_t sector = cc->start + dm_target_offset(ti, args->next_sector);
2991
2992 args->start = cc->start;
2993 return blkdev_report_zones(cc->dev->bdev, sector, nr_zones,
2994 dm_report_zones_cb, args);
2995}
2996
2997#endif
2998
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999/*
3000 * Construct an encryption mapping:
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003001 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 */
3003static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3004{
3005 struct crypt_config *cc;
Michał Mirosławed0302e2018-10-09 22:13:43 +02003006 const char *devname = dm_table_device_name(ti->table);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003007 int key_size;
Milan Brozef43aa32017-01-04 20:23:54 +01003008 unsigned int align_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 unsigned long long tmpll;
3010 int ret;
Milan Brozef43aa32017-01-04 20:23:54 +01003011 size_t iv_size_padding, additional_req_size;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01003012 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013
Milan Broz772ae5f2011-08-02 12:32:08 +01003014 if (argc < 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 ti->error = "Not enough arguments";
3016 return -EINVAL;
3017 }
3018
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003019 key_size = get_key_size(&argv[1]);
3020 if (key_size < 0) {
3021 ti->error = "Cannot parse key size";
3022 return -EINVAL;
3023 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024
Zhengyuan Liu9c81c992019-06-12 14:14:45 +08003025 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 if (!cc) {
3027 ti->error = "Cannot allocate encryption context";
3028 return -ENOMEM;
3029 }
3030 cc->key_size = key_size;
Milan Broz8f0009a2017-03-16 15:39:44 +01003031 cc->sector_size = (1 << SECTOR_SHIFT);
Mikulas Patockaff3af922017-03-23 10:23:14 -04003032 cc->sector_shift = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033
3034 ti->private = cc;
Milan Brozef43aa32017-01-04 20:23:54 +01003035
Mikulas Patocka50593532017-08-13 22:45:08 -04003036 spin_lock(&dm_crypt_clients_lock);
3037 dm_crypt_clients_n++;
3038 crypt_calculate_pages_per_client();
3039 spin_unlock(&dm_crypt_clients_lock);
3040
3041 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3042 if (ret < 0)
3043 goto bad;
3044
Milan Brozef43aa32017-01-04 20:23:54 +01003045 /* Optional parameters need to be read before cipher constructor */
3046 if (argc > 5) {
3047 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
3048 if (ret)
3049 goto bad;
3050 }
3051
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
3053 if (ret < 0)
3054 goto bad;
3055
Milan Broz33d2f092017-03-16 15:39:40 +01003056 if (crypt_integrity_aead(cc)) {
Milan Brozef43aa32017-01-04 20:23:54 +01003057 cc->dmreq_start = sizeof(struct aead_request);
3058 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3059 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3060 } else {
3061 cc->dmreq_start = sizeof(struct skcipher_request);
3062 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3063 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3064 }
Mikulas Patockad49ec522014-08-28 11:09:31 -04003065 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3066
Milan Brozef43aa32017-01-04 20:23:54 +01003067 if (align_mask < CRYPTO_MINALIGN) {
Mikulas Patockad49ec522014-08-28 11:09:31 -04003068 /* Allocate the padding exactly */
3069 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
Milan Brozef43aa32017-01-04 20:23:54 +01003070 & align_mask;
Mikulas Patockad49ec522014-08-28 11:09:31 -04003071 } else {
3072 /*
3073 * If the cipher requires greater alignment than kmalloc
3074 * alignment, we don't know the exact position of the
3075 * initialization vector. We must assume worst case.
3076 */
Milan Brozef43aa32017-01-04 20:23:54 +01003077 iv_size_padding = align_mask;
Mikulas Patockad49ec522014-08-28 11:09:31 -04003078 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079
Milan Brozef43aa32017-01-04 20:23:54 +01003080 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
3081 additional_req_size = sizeof(struct dm_crypt_request) +
3082 iv_size_padding + cc->iv_size +
3083 cc->iv_size +
3084 sizeof(uint64_t) +
3085 sizeof(unsigned int);
3086
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003087 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3088 if (ret) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 ti->error = "Cannot allocate crypt request mempool";
3090 goto bad;
3091 }
3092
Mike Snitzer30187e12016-01-31 13:28:26 -05003093 cc->per_bio_data_size = ti->per_io_data_size =
Milan Brozef43aa32017-01-04 20:23:54 +01003094 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
Mikulas Patockad49ec522014-08-28 11:09:31 -04003095 ARCH_KMALLOC_MINALIGN);
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04003096
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003097 ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
3098 if (ret) {
Milan Broz8b004452006-10-03 01:15:37 -07003099 ti->error = "Cannot allocate page mempool";
Milan Broze48d4bb2006-10-03 01:15:37 -07003100 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 }
Milan Broze48d4bb2006-10-03 01:15:37 -07003102
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003103 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3104 if (ret) {
Milan Broz0c395b02008-02-08 02:10:54 +00003105 ti->error = "Cannot allocate crypt bioset";
Milan Brozcabf08e2007-10-19 22:38:58 +01003106 goto bad;
Milan Broz93e605c2006-10-03 01:15:38 -07003107 }
Milan Brozcabf08e2007-10-19 22:38:58 +01003108
Mikulas Patocka7145c242015-02-13 08:24:41 -05003109 mutex_init(&cc->bio_alloc_lock);
3110
Milan Brozcabf08e2007-10-19 22:38:58 +01003111 ret = -EINVAL;
Milan Broz8f0009a2017-03-16 15:39:44 +01003112 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
3113 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
Milan Brozcabf08e2007-10-19 22:38:58 +01003114 ti->error = "Invalid iv_offset sector";
3115 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 }
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08003117 cc->iv_offset = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118
Vivek Goyale80d1c82015-07-31 09:20:36 -04003119 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3120 if (ret) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 ti->error = "Device lookup failed";
3122 goto bad;
3123 }
3124
Vivek Goyale80d1c82015-07-31 09:20:36 -04003125 ret = -EINVAL;
Milan Brozef87bfc2018-11-07 22:24:55 +01003126 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 ti->error = "Invalid device sector";
3128 goto bad;
3129 }
3130 cc->start = tmpll;
3131
Damien Le Moal8e225f02020-07-08 18:28:08 +09003132 /*
3133 * For zoned block devices, we need to preserve the issuer write
3134 * ordering. To do so, disable write workqueues and force inline
3135 * encryption completion.
3136 */
3137 if (bdev_is_zoned(cc->dev->bdev)) {
3138 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3139 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3140 }
3141
Milan Broz33d2f092017-03-16 15:39:40 +01003142 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
Milan Brozef43aa32017-01-04 20:23:54 +01003143 ret = crypt_integrity_ctr(cc, ti);
Milan Broz772ae5f2011-08-02 12:32:08 +01003144 if (ret)
3145 goto bad;
3146
Milan Brozef43aa32017-01-04 20:23:54 +01003147 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3148 if (!cc->tag_pool_max_sectors)
3149 cc->tag_pool_max_sectors = 1;
Milan Broz772ae5f2011-08-02 12:32:08 +01003150
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003151 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
Milan Brozef43aa32017-01-04 20:23:54 +01003152 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003153 if (ret) {
Milan Brozef43aa32017-01-04 20:23:54 +01003154 ti->error = "Cannot allocate integrity tags mempool";
3155 goto bad;
Milan Broz772ae5f2011-08-02 12:32:08 +01003156 }
Mikulas Patocka583fe742017-04-18 16:51:54 -04003157
3158 cc->tag_pool_max_sectors <<= cc->sector_shift;
Milan Broz772ae5f2011-08-02 12:32:08 +01003159 }
3160
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 ret = -ENOMEM;
Mike Snitzerf612b212019-11-20 17:27:39 -05003162 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 if (!cc->io_queue) {
3164 ti->error = "Couldn't create kcryptd io queue";
3165 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 }
Christophe Saout37af6562006-10-30 20:39:08 +01003167
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003168 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
Mike Snitzerf612b212019-11-20 17:27:39 -05003169 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
Michał Mirosławed0302e2018-10-09 22:13:43 +02003170 1, devname);
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003171 else
Michał Mirosławed0302e2018-10-09 22:13:43 +02003172 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
Mike Snitzerf612b212019-11-20 17:27:39 -05003173 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
Michał Mirosławed0302e2018-10-09 22:13:43 +02003174 num_online_cpus(), devname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 if (!cc->crypt_queue) {
3176 ti->error = "Couldn't create kcryptd queue";
3177 goto bad;
3178 }
3179
Mikulas Patockac7329ef2018-07-11 12:10:51 -04003180 spin_lock_init(&cc->write_thread_lock);
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05003181 cc->write_tree = RB_ROOT;
Mikulas Patockadc267622015-02-13 08:25:59 -05003182
Michał Mirosławed0302e2018-10-09 22:13:43 +02003183 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
Mikulas Patockadc267622015-02-13 08:25:59 -05003184 if (IS_ERR(cc->write_thread)) {
3185 ret = PTR_ERR(cc->write_thread);
3186 cc->write_thread = NULL;
3187 ti->error = "Couldn't spawn write thread";
3188 goto bad;
3189 }
3190 wake_up_process(cc->write_thread);
3191
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00003192 ti->num_flush_bios = 1;
Milan Broz983c7db2011-09-25 23:26:21 +01003193
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 return 0;
3195
3196bad:
3197 crypt_dtr(ti);
3198 return ret;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01003199}
3200
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00003201static int crypt_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202{
3203 struct dm_crypt_io *io;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01003204 struct crypt_config *cc = ti->private;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01003205
Milan Broz772ae5f2011-08-02 12:32:08 +01003206 /*
Mike Christie28a8f0d2016-06-05 14:32:25 -05003207 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
3208 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
Mike Christiee6047142016-06-05 14:32:04 -05003209 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
Milan Broz772ae5f2011-08-02 12:32:08 +01003210 */
Jens Axboe1eff9d32016-08-05 15:35:16 -06003211 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
Mike Christie28a8f0d2016-06-05 14:32:25 -05003212 bio_op(bio) == REQ_OP_DISCARD)) {
Christoph Hellwig74d46992017-08-23 19:10:32 +02003213 bio_set_dev(bio, cc->dev->bdev);
Milan Broz772ae5f2011-08-02 12:32:08 +01003214 if (bio_sectors(bio))
Kent Overstreet4f024f32013-10-11 15:44:27 -07003215 bio->bi_iter.bi_sector = cc->start +
3216 dm_target_offset(ti, bio->bi_iter.bi_sector);
Mikulas Patocka647c7db2009-06-22 10:12:23 +01003217 return DM_MAPIO_REMAPPED;
3218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219
Mikulas Patocka4e870e92016-08-30 16:38:42 -04003220 /*
3221 * Check if bio is too large, split as needed.
3222 */
3223 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
Milan Brozef43aa32017-01-04 20:23:54 +01003224 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
Mikulas Patocka4e870e92016-08-30 16:38:42 -04003225 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
3226
Milan Broz8f0009a2017-03-16 15:39:44 +01003227 /*
3228 * Ensure that bio is a multiple of internal sector encryption size
3229 * and is aligned to this size as defined in IO hints.
3230 */
3231 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
Christoph Hellwig846785e2017-06-03 09:38:02 +02003232 return DM_MAPIO_KILL;
Milan Broz8f0009a2017-03-16 15:39:44 +01003233
3234 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
Christoph Hellwig846785e2017-06-03 09:38:02 +02003235 return DM_MAPIO_KILL;
Milan Broz8f0009a2017-03-16 15:39:44 +01003236
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04003237 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3238 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
Milan Brozef43aa32017-01-04 20:23:54 +01003239
3240 if (cc->on_disk_tag_size) {
Mikulas Patocka583fe742017-04-18 16:51:54 -04003241 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
Milan Brozef43aa32017-01-04 20:23:54 +01003242
3243 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
Mikulas Patocka583fe742017-04-18 16:51:54 -04003244 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
Milan Brozef43aa32017-01-04 20:23:54 +01003245 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
3246 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3247 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003248 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
Milan Brozef43aa32017-01-04 20:23:54 +01003249 io->integrity_metadata_from_pool = true;
3250 }
3251 }
3252
Milan Broz33d2f092017-03-16 15:39:40 +01003253 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01003254 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3255 else
3256 io->ctx.r.req = (struct skcipher_request *)(io + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257
Milan Broz20c82532011-01-13 19:59:53 +00003258 if (bio_data_dir(io->base_bio) == READ) {
3259 if (kcryptd_io_read(io, GFP_NOWAIT))
Mikulas Patockadc267622015-02-13 08:25:59 -05003260 kcryptd_queue_read(io);
Milan Broz20c82532011-01-13 19:59:53 +00003261 } else
Andrew Morton4ee218c2006-03-27 01:17:48 -08003262 kcryptd_queue_crypt(io);
3263
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 return DM_MAPIO_SUBMITTED;
3265}
3266
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003267static void crypt_status(struct dm_target *ti, status_type_t type,
3268 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269{
Milan Broz5ebaee62010-08-12 04:14:07 +01003270 struct crypt_config *cc = ti->private;
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003271 unsigned i, sz = 0;
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003272 int num_feature_args = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273
3274 switch (type) {
3275 case STATUSTYPE_INFO:
3276 result[0] = '\0';
3277 break;
3278
3279 case STATUSTYPE_TABLE:
Milan Broz7dbcd132011-01-13 19:59:52 +00003280 DMEMIT("%s ", cc->cipher_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003282 if (cc->key_size > 0) {
3283 if (cc->key_string)
3284 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3285 else
3286 for (i = 0; i < cc->key_size; i++)
3287 DMEMIT("%02x", cc->key[i]);
3288 } else
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003289 DMEMIT("-");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290
3291 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3292 cc->dev->name, (unsigned long long)cc->start);
Milan Broz772ae5f2011-08-02 12:32:08 +01003293
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003294 num_feature_args += !!ti->num_discard_bios;
3295 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05003296 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01003297 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3298 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
Mikulas Patockaff3af922017-03-23 10:23:14 -04003299 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
Milan Broz8f0009a2017-03-16 15:39:44 +01003300 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
Milan Brozef43aa32017-01-04 20:23:54 +01003301 if (cc->on_disk_tag_size)
3302 num_feature_args++;
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003303 if (num_feature_args) {
3304 DMEMIT(" %d", num_feature_args);
3305 if (ti->num_discard_bios)
3306 DMEMIT(" allow_discards");
3307 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3308 DMEMIT(" same_cpu_crypt");
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05003309 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3310 DMEMIT(" submit_from_crypt_cpus");
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01003311 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3312 DMEMIT(" no_read_workqueue");
3313 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3314 DMEMIT(" no_write_workqueue");
Milan Brozef43aa32017-01-04 20:23:54 +01003315 if (cc->on_disk_tag_size)
3316 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
Milan Broz8f0009a2017-03-16 15:39:44 +01003317 if (cc->sector_size != (1 << SECTOR_SHIFT))
3318 DMEMIT(" sector_size:%d", cc->sector_size);
3319 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3320 DMEMIT(" iv_large_sectors");
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003321 }
Milan Broz772ae5f2011-08-02 12:32:08 +01003322
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 break;
3324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325}
3326
Milan Broze48d4bb2006-10-03 01:15:37 -07003327static void crypt_postsuspend(struct dm_target *ti)
3328{
3329 struct crypt_config *cc = ti->private;
3330
3331 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3332}
3333
3334static int crypt_preresume(struct dm_target *ti)
3335{
3336 struct crypt_config *cc = ti->private;
3337
3338 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3339 DMERR("aborting resume - crypt key is not set.");
3340 return -EAGAIN;
3341 }
3342
3343 return 0;
3344}
3345
3346static void crypt_resume(struct dm_target *ti)
3347{
3348 struct crypt_config *cc = ti->private;
3349
3350 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3351}
3352
3353/* Message interface
3354 * key set <key>
3355 * key wipe
3356 */
Mike Snitzer1eb5fa82018-02-28 15:59:59 -05003357static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
3358 char *result, unsigned maxlen)
Milan Broze48d4bb2006-10-03 01:15:37 -07003359{
3360 struct crypt_config *cc = ti->private;
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003361 int key_size, ret = -EINVAL;
Milan Broze48d4bb2006-10-03 01:15:37 -07003362
3363 if (argc < 2)
3364 goto error;
3365
Mike Snitzer498f0102011-08-02 12:32:04 +01003366 if (!strcasecmp(argv[0], "key")) {
Milan Broze48d4bb2006-10-03 01:15:37 -07003367 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3368 DMWARN("not suspended during key manipulation.");
3369 return -EINVAL;
3370 }
Mike Snitzer498f0102011-08-02 12:32:04 +01003371 if (argc == 3 && !strcasecmp(argv[1], "set")) {
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003372 /* The key size may not be changed. */
3373 key_size = get_key_size(&argv[2]);
3374 if (key_size < 0 || cc->key_size != key_size) {
3375 memset(argv[2], '0', strlen(argv[2]));
3376 return -EINVAL;
3377 }
3378
Milan Broz542da312009-12-10 23:51:57 +00003379 ret = crypt_set_key(cc, argv[2]);
3380 if (ret)
3381 return ret;
3382 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3383 ret = cc->iv_gen_ops->init(cc);
Ondrej Kozinadc949022018-01-12 16:30:32 +01003384 /* wipe the kernel key payload copy */
3385 if (cc->key_string)
3386 memset(cc->key, 0, cc->key_size * sizeof(u8));
Milan Broz542da312009-12-10 23:51:57 +00003387 return ret;
3388 }
Milan Broz4a52ffc2019-07-09 15:22:12 +02003389 if (argc == 2 && !strcasecmp(argv[1], "wipe"))
Milan Broze48d4bb2006-10-03 01:15:37 -07003390 return crypt_wipe_key(cc);
3391 }
3392
3393error:
3394 DMWARN("unrecognised message received.");
3395 return -EINVAL;
3396}
3397
Mike Snitzeraf4874e2009-06-22 10:12:33 +01003398static int crypt_iterate_devices(struct dm_target *ti,
3399 iterate_devices_callout_fn fn, void *data)
3400{
3401 struct crypt_config *cc = ti->private;
3402
Mike Snitzer5dea2712009-07-23 20:30:42 +01003403 return fn(ti, cc->dev, cc->start, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01003404}
3405
Mike Snitzer586b2862015-09-09 21:34:51 -04003406static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3407{
Milan Broz8f0009a2017-03-16 15:39:44 +01003408 struct crypt_config *cc = ti->private;
3409
Mike Snitzer586b2862015-09-09 21:34:51 -04003410 /*
3411 * Unfortunate constraint that is required to avoid the potential
3412 * for exceeding underlying device's max_segments limits -- due to
3413 * crypt_alloc_buffer() possibly allocating pages for the encryption
3414 * bio that are not as physically contiguous as the original bio.
3415 */
3416 limits->max_segment_size = PAGE_SIZE;
Milan Broz8f0009a2017-03-16 15:39:44 +01003417
Mikulas Patockabc9e9cf2018-08-10 11:23:56 -04003418 limits->logical_block_size =
Eric Biggers64611a152020-06-04 12:01:26 -07003419 max_t(unsigned, limits->logical_block_size, cc->sector_size);
Mikulas Patockabc9e9cf2018-08-10 11:23:56 -04003420 limits->physical_block_size =
3421 max_t(unsigned, limits->physical_block_size, cc->sector_size);
3422 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
Mike Snitzer586b2862015-09-09 21:34:51 -04003423}
3424
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425static struct target_type crypt_target = {
3426 .name = "crypt",
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01003427 .version = {1, 22, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 .module = THIS_MODULE,
3429 .ctr = crypt_ctr,
3430 .dtr = crypt_dtr,
Damien Le Moal8e225f02020-07-08 18:28:08 +09003431#ifdef CONFIG_BLK_DEV_ZONED
3432 .features = DM_TARGET_ZONED_HM,
3433 .report_zones = crypt_report_zones,
3434#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 .map = crypt_map,
3436 .status = crypt_status,
Milan Broze48d4bb2006-10-03 01:15:37 -07003437 .postsuspend = crypt_postsuspend,
3438 .preresume = crypt_preresume,
3439 .resume = crypt_resume,
3440 .message = crypt_message,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01003441 .iterate_devices = crypt_iterate_devices,
Mike Snitzer586b2862015-09-09 21:34:51 -04003442 .io_hints = crypt_io_hints,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443};
3444
3445static int __init dm_crypt_init(void)
3446{
3447 int r;
3448
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 r = dm_register_target(&crypt_target);
Mikulas Patocka94f5e022015-02-13 08:25:26 -05003450 if (r < 0)
Alasdair G Kergon72d94862006-06-26 00:27:35 -07003451 DMERR("register failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 return r;
3454}
3455
3456static void __exit dm_crypt_exit(void)
3457{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00003458 dm_unregister_target(&crypt_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459}
3460
3461module_init(dm_crypt_init);
3462module_exit(dm_crypt_exit);
3463
Jana Saoutbf142992014-06-24 14:27:04 -04003464MODULE_AUTHOR("Jana Saout <jana@saout.de>");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3466MODULE_LICENSE("GPL");