blob: d4ae31558826a4bead8f3cb6d1adc0f093de8b5a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Jana Saoutbf142992014-06-24 14:27:04 -04002 * Copyright (C) 2003 Jana Saout <jana@saout.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Brozbbb16582020-01-03 09:20:22 +01004 * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file is released under the GPL.
8 */
9
Milan Broz43d69032008-02-08 02:11:09 +000010#include <linux/completion.h>
Herbert Xud1806f62006-08-22 20:29:17 +100011#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
Ondrej Kozinac538f6e2016-11-21 15:58:51 +010015#include <linux/key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/bio.h>
17#include <linux/blkdev.h>
Christoph Hellwigfe45e632021-09-20 14:33:27 +020018#include <linux/blk-integrity.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/crypto.h>
22#include <linux/workqueue.h>
Mikulas Patockadc267622015-02-13 08:25:59 -050023#include <linux/kthread.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070024#include <linux/backing-dev.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
David Hardeman378f0582005-09-17 17:55:31 +100026#include <linux/scatterlist.h>
Mikulas Patockab3c5fd32015-02-13 08:27:41 -050027#include <linux/rbtree.h>
Ondrej Kozina027c4312016-12-01 18:20:52 +010028#include <linux/ctype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/page.h>
Rik Snel48527fa2006-09-03 08:56:39 +100030#include <asm/unaligned.h>
Milan Broz34745782011-01-13 19:59:55 +000031#include <crypto/hash.h>
32#include <crypto/md5.h>
33#include <crypto/algapi.h>
Herbert Xubbdb23b2016-01-24 21:16:36 +080034#include <crypto/skcipher.h>
Milan Brozef43aa32017-01-04 20:23:54 +010035#include <crypto/aead.h>
36#include <crypto/authenc.h>
37#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
Dmitry Baryshkov27f54112020-04-20 16:46:59 +030038#include <linux/key-type.h>
Ondrej Kozinac538f6e2016-11-21 15:58:51 +010039#include <keys/user-type.h>
Dmitry Baryshkov27f54112020-04-20 16:46:59 +030040#include <keys/encrypted-type.h>
Ahmad Fatoum363880c2021-01-22 09:43:21 +010041#include <keys/trusted-type.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Mikulas Patocka586e80e2008-10-21 17:44:59 +010043#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Michael Weiß58d0f182021-09-04 11:59:30 +020045#include "dm-audit.h"
46
Alasdair G Kergon72d94862006-06-26 00:27:35 -070047#define DM_MSG_PREFIX "crypt"
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * context holding the current state of a multi-part conversion
51 */
52struct convert_context {
Milan Broz43d69032008-02-08 02:11:09 +000053 struct completion restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 struct bio *bio_in;
55 struct bio *bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -070056 struct bvec_iter iter_in;
57 struct bvec_iter iter_out;
AliOS system security8d683dc2018-11-05 15:31:42 +080058 u64 cc_sector;
Mikulas Patocka40b62292012-07-27 15:08:04 +010059 atomic_t cc_pending;
Milan Brozef43aa32017-01-04 20:23:54 +010060 union {
61 struct skcipher_request *req;
62 struct aead_request *req_aead;
63 } r;
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065};
66
Milan Broz53017032008-02-08 02:10:38 +000067/*
68 * per bio private data
69 */
70struct dm_crypt_io {
Alasdair G Kergon49a8a922012-07-27 15:08:05 +010071 struct crypt_config *cc;
Milan Broz53017032008-02-08 02:10:38 +000072 struct bio *base_bio;
Milan Brozef43aa32017-01-04 20:23:54 +010073 u8 *integrity_metadata;
74 bool integrity_metadata_from_pool;
Milan Broz53017032008-02-08 02:10:38 +000075 struct work_struct work;
Ignat Korchagin39d42fa2020-07-06 18:37:31 +010076 struct tasklet_struct tasklet;
Milan Broz53017032008-02-08 02:10:38 +000077
78 struct convert_context ctx;
79
Mikulas Patocka40b62292012-07-27 15:08:04 +010080 atomic_t io_pending;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020081 blk_status_t error;
Milan Broz0c395b02008-02-08 02:10:54 +000082 sector_t sector;
Mikulas Patockadc267622015-02-13 08:25:59 -050083
Mikulas Patockab3c5fd32015-02-13 08:27:41 -050084 struct rb_node rb_node;
Mikulas Patocka298a9fa2014-03-28 15:51:55 -040085} CRYPTO_MINALIGN_ATTR;
Milan Broz53017032008-02-08 02:10:38 +000086
Milan Broz01482b72008-02-08 02:11:04 +000087struct dm_crypt_request {
Huang Yingb2174ee2009-03-16 17:44:33 +000088 struct convert_context *ctx;
Milan Brozef43aa32017-01-04 20:23:54 +010089 struct scatterlist sg_in[4];
90 struct scatterlist sg_out[4];
AliOS system security8d683dc2018-11-05 15:31:42 +080091 u64 iv_sector;
Milan Broz01482b72008-02-08 02:11:04 +000092};
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094struct crypt_config;
95
96struct crypt_iv_operations {
97 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +010098 const char *opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 void (*dtr)(struct crypt_config *cc);
Milan Brozb95bf2d2009-12-10 23:51:56 +0000100 int (*init)(struct crypt_config *cc);
Milan Broz542da312009-12-10 23:51:57 +0000101 int (*wipe)(struct crypt_config *cc);
Milan Broz2dc53272011-01-13 19:59:54 +0000102 int (*generator)(struct crypt_config *cc, u8 *iv,
103 struct dm_crypt_request *dmreq);
104 int (*post)(struct crypt_config *cc, u8 *iv,
105 struct dm_crypt_request *dmreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106};
107
Milan Broz60473592009-12-10 23:51:55 +0000108struct iv_benbi_private {
109 int shift;
110};
111
Milan Broz34745782011-01-13 19:59:55 +0000112#define LMK_SEED_SIZE 64 /* hash + 0 */
113struct iv_lmk_private {
114 struct crypto_shash *hash_tfm;
115 u8 *seed;
116};
117
Milan Brozed04d982013-10-28 23:21:04 +0100118#define TCW_WHITENING_SIZE 16
119struct iv_tcw_private {
120 struct crypto_shash *crc32_tfm;
121 u8 *iv_seed;
122 u8 *whitening;
123};
124
Milan Brozbbb16582020-01-03 09:20:22 +0100125#define ELEPHANT_MAX_KEY_SIZE 32
126struct iv_elephant_private {
127 struct crypto_skcipher *tfm;
128};
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130/*
131 * Crypt: maps a linear range of a block device
132 * and encrypts / decrypts at the same time.
133 */
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -0500134enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
Ignat Korchagin39d42fa2020-07-06 18:37:31 +0100135 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
Damien Le Moal8e225f02020-07-08 18:28:08 +0900136 DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
137 DM_CRYPT_WRITE_INLINE };
Andi Kleenc0297722011-01-13 19:59:53 +0000138
Milan Brozef43aa32017-01-04 20:23:54 +0100139enum cipher_flags {
Geert Uytterhoeven74d1da32020-12-16 14:15:42 +0100140 CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */
Milan Broz8f0009a2017-03-16 15:39:44 +0100141 CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
Milan Brozbbb16582020-01-03 09:20:22 +0100142 CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
Milan Brozef43aa32017-01-04 20:23:54 +0100143};
144
Andi Kleenc0297722011-01-13 19:59:53 +0000145/*
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500146 * The fields in here must be read only after initialization.
Andi Kleenc0297722011-01-13 19:59:53 +0000147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148struct crypt_config {
149 struct dm_dev *dev;
150 sector_t start;
151
Mikulas Patocka50593532017-08-13 22:45:08 -0400152 struct percpu_counter n_allocated_pages;
153
Milan Brozcabf08e2007-10-19 22:38:58 +0100154 struct workqueue_struct *io_queue;
155 struct workqueue_struct *crypt_queue;
Milan Broz3f1e9072008-03-28 14:16:07 -0700156
Mikulas Patockac7329ef2018-07-11 12:10:51 -0400157 spinlock_t write_thread_lock;
Mike Snitzer72d711c2018-05-22 18:26:20 -0400158 struct task_struct *write_thread;
Mikulas Patockab3c5fd32015-02-13 08:27:41 -0500159 struct rb_root write_tree;
Mikulas Patockadc267622015-02-13 08:25:59 -0500160
Milan Broz7dbcd132011-01-13 19:59:52 +0000161 char *cipher_string;
Milan Brozef43aa32017-01-04 20:23:54 +0100162 char *cipher_auth;
Ondrej Kozinac538f6e2016-11-21 15:58:51 +0100163 char *key_string;
Milan Broz5ebaee62010-08-12 04:14:07 +0100164
Julia Lawall1b1b58f2015-11-29 14:09:19 +0100165 const struct crypt_iv_operations *iv_gen_ops;
Herbert Xu79066ad2006-12-05 13:41:52 -0800166 union {
Milan Broz60473592009-12-10 23:51:55 +0000167 struct iv_benbi_private benbi;
Milan Broz34745782011-01-13 19:59:55 +0000168 struct iv_lmk_private lmk;
Milan Brozed04d982013-10-28 23:21:04 +0100169 struct iv_tcw_private tcw;
Milan Brozbbb16582020-01-03 09:20:22 +0100170 struct iv_elephant_private elephant;
Herbert Xu79066ad2006-12-05 13:41:52 -0800171 } iv_gen_private;
AliOS system security8d683dc2018-11-05 15:31:42 +0800172 u64 iv_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 unsigned int iv_size;
Mikulas Patockaff3af922017-03-23 10:23:14 -0400174 unsigned short int sector_size;
175 unsigned char sector_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Milan Brozef43aa32017-01-04 20:23:54 +0100177 union {
178 struct crypto_skcipher **tfms;
179 struct crypto_aead **tfms_aead;
180 } cipher_tfm;
Milan Brozd1f96422011-01-13 19:59:54 +0000181 unsigned tfms_count;
Milan Brozef43aa32017-01-04 20:23:54 +0100182 unsigned long cipher_flags;
Andi Kleenc0297722011-01-13 19:59:53 +0000183
184 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000185 * Layout of each crypto request:
186 *
Herbert Xubbdb23b2016-01-24 21:16:36 +0800187 * struct skcipher_request
Milan Brozddd42ed2008-02-08 02:11:07 +0000188 * context
189 * padding
190 * struct dm_crypt_request
191 * padding
192 * IV
193 *
194 * The padding is added so that dm_crypt_request and the IV are
195 * correctly aligned.
196 */
197 unsigned int dmreq_start;
Milan Brozddd42ed2008-02-08 02:11:07 +0000198
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400199 unsigned int per_bio_data_size;
200
Milan Broze48d4bb2006-10-03 01:15:37 -0700201 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 unsigned int key_size;
Milan Brozda31a072013-10-28 23:21:03 +0100203 unsigned int key_parts; /* independent parts in key buffer */
204 unsigned int key_extra_size; /* additional keys length */
Milan Brozef43aa32017-01-04 20:23:54 +0100205 unsigned int key_mac_size; /* MAC key size for authenc(...) */
206
207 unsigned int integrity_tag_size;
208 unsigned int integrity_iv_size;
209 unsigned int on_disk_tag_size;
210
Mike Snitzer72d711c2018-05-22 18:26:20 -0400211 /*
212 * pool for per bio private data, crypto requests,
213 * encryption requeusts/buffer pages and integrity tags
214 */
215 unsigned tag_pool_max_sectors;
216 mempool_t tag_pool;
217 mempool_t req_pool;
218 mempool_t page_pool;
219
220 struct bio_set bs;
221 struct mutex bio_alloc_lock;
222
Milan Brozef43aa32017-01-04 20:23:54 +0100223 u8 *authenc_key; /* space for keys in authenc() format (if used) */
Gustavo A. R. Silvab18ae8d2020-05-07 13:51:58 -0500224 u8 key[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225};
226
Milan Brozef43aa32017-01-04 20:23:54 +0100227#define MIN_IOS 64
228#define MAX_TAG_SIZE 480
229#define POOL_ENTRY_SIZE 512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Mikulas Patocka50593532017-08-13 22:45:08 -0400231static DEFINE_SPINLOCK(dm_crypt_clients_lock);
232static unsigned dm_crypt_clients_n = 0;
233static volatile unsigned long dm_crypt_pages_per_client;
234#define DM_CRYPT_MEMORY_PERCENT 2
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100235#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
Mikulas Patocka50593532017-08-13 22:45:08 -0400236
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100237static void clone_init(struct dm_crypt_io *, struct bio *);
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000238static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Milan Brozef43aa32017-01-04 20:23:54 +0100239static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
240 struct scatterlist *sg);
Olaf Kirch027581f2007-05-09 02:32:52 -0700241
Yang Yingliang3fd53532020-02-13 12:11:26 +0800242static bool crypt_integrity_aead(struct crypt_config *cc);
243
Andi Kleenc0297722011-01-13 19:59:53 +0000244/*
Eric Biggers86f917a2017-03-30 22:18:48 -0700245 * Use this to access cipher attributes that are independent of the key.
Andi Kleenc0297722011-01-13 19:59:53 +0000246 */
Herbert Xubbdb23b2016-01-24 21:16:36 +0800247static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
Andi Kleenc0297722011-01-13 19:59:53 +0000248{
Milan Brozef43aa32017-01-04 20:23:54 +0100249 return cc->cipher_tfm.tfms[0];
250}
251
252static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
253{
254 return cc->cipher_tfm.tfms_aead[0];
Andi Kleenc0297722011-01-13 19:59:53 +0000255}
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 * Different IV generation algorithms:
259 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000260 * plain: the initial vector is the 32-bit little-endian version of the sector
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200261 * number, padded with zeros if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 *
Milan Broz61afef62009-12-10 23:52:25 +0000263 * plain64: the initial vector is the 64-bit little-endian version of the sector
264 * number, padded with zeros if necessary.
265 *
Milan Broz7e3fd852017-06-06 09:07:01 +0200266 * plain64be: the initial vector is the 64-bit big-endian version of the sector
267 * number, padded with zeros if necessary.
268 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000269 * essiv: "encrypted sector|salt initial vector", the sector number is
270 * encrypted with the bulk cipher using a salt as key. The salt
271 * should be derived from the bulk cipher's key via hashing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 *
Rik Snel48527fa2006-09-03 08:56:39 +1000273 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
274 * (needed for LRW-32-AES and possible other narrow block modes)
275 *
Ludwig Nussel46b47732007-05-09 02:32:55 -0700276 * null: the initial vector is always zero. Provides compatibility with
277 * obsolete loop_fish2 devices. Do not use for new devices.
278 *
Milan Broz34745782011-01-13 19:59:55 +0000279 * lmk: Compatible implementation of the block chaining mode used
280 * by the Loop-AES block device encryption system
281 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
282 * It operates on full 512 byte sectors and uses CBC
283 * with an IV derived from the sector number, the data and
284 * optionally extra IV seed.
285 * This means that after decryption the first block
286 * of sector must be tweaked according to decrypted data.
287 * Loop-AES can use three encryption schemes:
288 * version 1: is plain aes-cbc mode
289 * version 2: uses 64 multikey scheme with lmk IV generator
290 * version 3: the same as version 2 with additional IV seed
291 * (it uses 65 keys, last key is used as IV seed)
292 *
Milan Brozed04d982013-10-28 23:21:04 +0100293 * tcw: Compatible implementation of the block chaining mode used
294 * by the TrueCrypt device encryption system (prior to version 4.1).
Milan Broze44f23b2015-04-05 18:03:10 +0200295 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
Milan Brozed04d982013-10-28 23:21:04 +0100296 * It operates on full 512 byte sectors and uses CBC
297 * with an IV derived from initial key and the sector number.
298 * In addition, whitening value is applied on every sector, whitening
299 * is calculated from initial key, sector number and mixed using CRC32.
300 * Note that this encryption scheme is vulnerable to watermarking attacks
301 * and should be used for old compatible containers access only.
Milan Brozb9411d72019-07-09 15:22:14 +0200302 *
303 * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
304 * The IV is encrypted little-endian byte-offset (with the same key
305 * and cipher as the volume).
Milan Brozbbb16582020-01-03 09:20:22 +0100306 *
307 * elephant: The extended version of eboiv with additional Elephant diffuser
308 * used with Bitlocker CBC mode.
309 * This mode was used in older Windows systems
Alexander A. Klimov6f3bc222020-06-27 12:31:38 +0200310 * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 */
312
Milan Broz2dc53272011-01-13 19:59:54 +0000313static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
314 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
316 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100317 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
319 return 0;
320}
321
Milan Broz61afef62009-12-10 23:52:25 +0000322static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
Milan Broz2dc53272011-01-13 19:59:54 +0000323 struct dm_crypt_request *dmreq)
Milan Broz61afef62009-12-10 23:52:25 +0000324{
325 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100326 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Milan Broz61afef62009-12-10 23:52:25 +0000327
328 return 0;
329}
330
Milan Broz7e3fd852017-06-06 09:07:01 +0200331static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
332 struct dm_crypt_request *dmreq)
333{
334 memset(iv, 0, cc->iv_size);
335 /* iv_size is at least of size u64; usually it is 16 bytes */
336 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
337
338 return 0;
339}
340
Milan Broz2dc53272011-01-13 19:59:54 +0000341static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
342 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343{
Ard Biesheuvela1a262b2019-08-19 17:17:37 +0300344 /*
345 * ESSIV encryption of the IV is now handled by the crypto API,
346 * so just pass the plain sector number here.
347 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100349 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Andi Kleenc0297722011-01-13 19:59:53 +0000350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 return 0;
352}
353
Rik Snel48527fa2006-09-03 08:56:39 +1000354static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
355 const char *opts)
356{
Milan Broz4ea94712020-01-06 10:11:47 +0100357 unsigned bs;
358 int log;
359
Yang Yingliang3fd53532020-02-13 12:11:26 +0800360 if (crypt_integrity_aead(cc))
Milan Broz4ea94712020-01-06 10:11:47 +0100361 bs = crypto_aead_blocksize(any_tfm_aead(cc));
362 else
363 bs = crypto_skcipher_blocksize(any_tfm(cc));
364 log = ilog2(bs);
Rik Snel48527fa2006-09-03 08:56:39 +1000365
366 /* we need to calculate how far we must shift the sector count
367 * to get the cipher block count, we use this shift in _gen */
368
369 if (1 << log != bs) {
370 ti->error = "cypher blocksize is not a power of 2";
371 return -EINVAL;
372 }
373
374 if (log > 9) {
375 ti->error = "cypher blocksize is > 512";
376 return -EINVAL;
377 }
378
Milan Broz60473592009-12-10 23:51:55 +0000379 cc->iv_gen_private.benbi.shift = 9 - log;
Rik Snel48527fa2006-09-03 08:56:39 +1000380
381 return 0;
382}
383
384static void crypt_iv_benbi_dtr(struct crypt_config *cc)
385{
Rik Snel48527fa2006-09-03 08:56:39 +1000386}
387
Milan Broz2dc53272011-01-13 19:59:54 +0000388static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
389 struct dm_crypt_request *dmreq)
Rik Snel48527fa2006-09-03 08:56:39 +1000390{
Herbert Xu79066ad2006-12-05 13:41:52 -0800391 __be64 val;
392
Rik Snel48527fa2006-09-03 08:56:39 +1000393 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
Herbert Xu79066ad2006-12-05 13:41:52 -0800394
Milan Broz2dc53272011-01-13 19:59:54 +0000395 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
Herbert Xu79066ad2006-12-05 13:41:52 -0800396 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
Rik Snel48527fa2006-09-03 08:56:39 +1000397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 return 0;
399}
400
Milan Broz2dc53272011-01-13 19:59:54 +0000401static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
402 struct dm_crypt_request *dmreq)
Ludwig Nussel46b47732007-05-09 02:32:55 -0700403{
404 memset(iv, 0, cc->iv_size);
405
406 return 0;
407}
408
Milan Broz34745782011-01-13 19:59:55 +0000409static void crypt_iv_lmk_dtr(struct crypt_config *cc)
410{
411 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
412
413 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
414 crypto_free_shash(lmk->hash_tfm);
415 lmk->hash_tfm = NULL;
416
Waiman Long453431a2020-08-06 23:18:13 -0700417 kfree_sensitive(lmk->seed);
Milan Broz34745782011-01-13 19:59:55 +0000418 lmk->seed = NULL;
419}
420
421static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
422 const char *opts)
423{
424 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
425
Milan Broz8f0009a2017-03-16 15:39:44 +0100426 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
427 ti->error = "Unsupported sector size for LMK";
428 return -EINVAL;
429 }
430
Mikulas Patockacd746932020-07-09 23:20:42 -0700431 lmk->hash_tfm = crypto_alloc_shash("md5", 0,
432 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Broz34745782011-01-13 19:59:55 +0000433 if (IS_ERR(lmk->hash_tfm)) {
434 ti->error = "Error initializing LMK hash";
435 return PTR_ERR(lmk->hash_tfm);
436 }
437
438 /* No seed in LMK version 2 */
439 if (cc->key_parts == cc->tfms_count) {
440 lmk->seed = NULL;
441 return 0;
442 }
443
444 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
445 if (!lmk->seed) {
446 crypt_iv_lmk_dtr(cc);
447 ti->error = "Error kmallocing seed storage in LMK";
448 return -ENOMEM;
449 }
450
451 return 0;
452}
453
454static int crypt_iv_lmk_init(struct crypt_config *cc)
455{
456 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
457 int subkey_size = cc->key_size / cc->key_parts;
458
459 /* LMK seed is on the position of LMK_KEYS + 1 key */
460 if (lmk->seed)
461 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
462 crypto_shash_digestsize(lmk->hash_tfm));
463
464 return 0;
465}
466
467static int crypt_iv_lmk_wipe(struct crypt_config *cc)
468{
469 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
470
471 if (lmk->seed)
472 memset(lmk->seed, 0, LMK_SEED_SIZE);
473
474 return 0;
475}
476
477static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
478 struct dm_crypt_request *dmreq,
479 u8 *data)
480{
481 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200482 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
Milan Broz34745782011-01-13 19:59:55 +0000483 struct md5_state md5state;
Milan Brozda31a072013-10-28 23:21:03 +0100484 __le32 buf[4];
Milan Broz34745782011-01-13 19:59:55 +0000485 int i, r;
486
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200487 desc->tfm = lmk->hash_tfm;
Milan Broz34745782011-01-13 19:59:55 +0000488
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200489 r = crypto_shash_init(desc);
Milan Broz34745782011-01-13 19:59:55 +0000490 if (r)
491 return r;
492
493 if (lmk->seed) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200494 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
Milan Broz34745782011-01-13 19:59:55 +0000495 if (r)
496 return r;
497 }
498
499 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200500 r = crypto_shash_update(desc, data + 16, 16 * 31);
Milan Broz34745782011-01-13 19:59:55 +0000501 if (r)
502 return r;
503
504 /* Sector is cropped to 56 bits here */
505 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
506 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
507 buf[2] = cpu_to_le32(4024);
508 buf[3] = 0;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200509 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
Milan Broz34745782011-01-13 19:59:55 +0000510 if (r)
511 return r;
512
513 /* No MD5 padding here */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200514 r = crypto_shash_export(desc, &md5state);
Milan Broz34745782011-01-13 19:59:55 +0000515 if (r)
516 return r;
517
518 for (i = 0; i < MD5_HASH_WORDS; i++)
519 __cpu_to_le32s(&md5state.hash[i]);
520 memcpy(iv, &md5state.hash, cc->iv_size);
521
522 return 0;
523}
524
525static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
526 struct dm_crypt_request *dmreq)
527{
Milan Brozef43aa32017-01-04 20:23:54 +0100528 struct scatterlist *sg;
Milan Broz34745782011-01-13 19:59:55 +0000529 u8 *src;
530 int r = 0;
531
532 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
Milan Brozef43aa32017-01-04 20:23:54 +0100533 sg = crypt_get_sg_data(cc, dmreq->sg_in);
534 src = kmap_atomic(sg_page(sg));
535 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
Cong Wangc2e022c2011-11-28 13:26:02 +0800536 kunmap_atomic(src);
Milan Broz34745782011-01-13 19:59:55 +0000537 } else
538 memset(iv, 0, cc->iv_size);
539
540 return r;
541}
542
543static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
544 struct dm_crypt_request *dmreq)
545{
Milan Brozef43aa32017-01-04 20:23:54 +0100546 struct scatterlist *sg;
Milan Broz34745782011-01-13 19:59:55 +0000547 u8 *dst;
548 int r;
549
550 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
551 return 0;
552
Milan Brozef43aa32017-01-04 20:23:54 +0100553 sg = crypt_get_sg_data(cc, dmreq->sg_out);
554 dst = kmap_atomic(sg_page(sg));
555 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
Milan Broz34745782011-01-13 19:59:55 +0000556
557 /* Tweak the first block of plaintext sector */
558 if (!r)
Milan Brozef43aa32017-01-04 20:23:54 +0100559 crypto_xor(dst + sg->offset, iv, cc->iv_size);
Milan Broz34745782011-01-13 19:59:55 +0000560
Cong Wangc2e022c2011-11-28 13:26:02 +0800561 kunmap_atomic(dst);
Milan Broz34745782011-01-13 19:59:55 +0000562 return r;
563}
564
Milan Brozed04d982013-10-28 23:21:04 +0100565static void crypt_iv_tcw_dtr(struct crypt_config *cc)
566{
567 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
568
Waiman Long453431a2020-08-06 23:18:13 -0700569 kfree_sensitive(tcw->iv_seed);
Milan Brozed04d982013-10-28 23:21:04 +0100570 tcw->iv_seed = NULL;
Waiman Long453431a2020-08-06 23:18:13 -0700571 kfree_sensitive(tcw->whitening);
Milan Brozed04d982013-10-28 23:21:04 +0100572 tcw->whitening = NULL;
573
574 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
575 crypto_free_shash(tcw->crc32_tfm);
576 tcw->crc32_tfm = NULL;
577}
578
579static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
580 const char *opts)
581{
582 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
583
Milan Broz8f0009a2017-03-16 15:39:44 +0100584 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
585 ti->error = "Unsupported sector size for TCW";
586 return -EINVAL;
587 }
588
Milan Brozed04d982013-10-28 23:21:04 +0100589 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
590 ti->error = "Wrong key size for TCW";
591 return -EINVAL;
592 }
593
Mikulas Patockacd746932020-07-09 23:20:42 -0700594 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
595 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Brozed04d982013-10-28 23:21:04 +0100596 if (IS_ERR(tcw->crc32_tfm)) {
597 ti->error = "Error initializing CRC32 in TCW";
598 return PTR_ERR(tcw->crc32_tfm);
599 }
600
601 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
602 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
603 if (!tcw->iv_seed || !tcw->whitening) {
604 crypt_iv_tcw_dtr(cc);
605 ti->error = "Error allocating seed storage in TCW";
606 return -ENOMEM;
607 }
608
609 return 0;
610}
611
612static int crypt_iv_tcw_init(struct crypt_config *cc)
613{
614 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
615 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
616
617 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
618 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
619 TCW_WHITENING_SIZE);
620
621 return 0;
622}
623
624static int crypt_iv_tcw_wipe(struct crypt_config *cc)
625{
626 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
627
628 memset(tcw->iv_seed, 0, cc->iv_size);
629 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
630
631 return 0;
632}
633
634static int crypt_iv_tcw_whitening(struct crypt_config *cc,
635 struct dm_crypt_request *dmreq,
636 u8 *data)
637{
638 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
Bart Van Assche350b5392016-06-28 16:32:32 +0200639 __le64 sector = cpu_to_le64(dmreq->iv_sector);
Milan Brozed04d982013-10-28 23:21:04 +0100640 u8 buf[TCW_WHITENING_SIZE];
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200641 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
Milan Brozed04d982013-10-28 23:21:04 +0100642 int i, r;
643
644 /* xor whitening with sector number */
Ard Biesheuvel45fe93d2017-07-24 11:28:04 +0100645 crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
646 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
Milan Brozed04d982013-10-28 23:21:04 +0100647
648 /* calculate crc32 for every 32bit part and xor it */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200649 desc->tfm = tcw->crc32_tfm;
Milan Brozed04d982013-10-28 23:21:04 +0100650 for (i = 0; i < 4; i++) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200651 r = crypto_shash_init(desc);
Milan Brozed04d982013-10-28 23:21:04 +0100652 if (r)
653 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200654 r = crypto_shash_update(desc, &buf[i * 4], 4);
Milan Brozed04d982013-10-28 23:21:04 +0100655 if (r)
656 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200657 r = crypto_shash_final(desc, &buf[i * 4]);
Milan Brozed04d982013-10-28 23:21:04 +0100658 if (r)
659 goto out;
660 }
661 crypto_xor(&buf[0], &buf[12], 4);
662 crypto_xor(&buf[4], &buf[8], 4);
663
664 /* apply whitening (8 bytes) to whole sector */
665 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
666 crypto_xor(data + i * 8, buf, 8);
667out:
Milan Broz1a71d6f2014-11-22 09:36:04 +0100668 memzero_explicit(buf, sizeof(buf));
Milan Brozed04d982013-10-28 23:21:04 +0100669 return r;
670}
671
672static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
673 struct dm_crypt_request *dmreq)
674{
Milan Brozef43aa32017-01-04 20:23:54 +0100675 struct scatterlist *sg;
Milan Brozed04d982013-10-28 23:21:04 +0100676 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
Bart Van Assche350b5392016-06-28 16:32:32 +0200677 __le64 sector = cpu_to_le64(dmreq->iv_sector);
Milan Brozed04d982013-10-28 23:21:04 +0100678 u8 *src;
679 int r = 0;
680
681 /* Remove whitening from ciphertext */
682 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
Milan Brozef43aa32017-01-04 20:23:54 +0100683 sg = crypt_get_sg_data(cc, dmreq->sg_in);
684 src = kmap_atomic(sg_page(sg));
685 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
Milan Brozed04d982013-10-28 23:21:04 +0100686 kunmap_atomic(src);
687 }
688
689 /* Calculate IV */
Ard Biesheuvel45fe93d2017-07-24 11:28:04 +0100690 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)&sector, 8);
Milan Brozed04d982013-10-28 23:21:04 +0100691 if (cc->iv_size > 8)
Ard Biesheuvel45fe93d2017-07-24 11:28:04 +0100692 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
693 cc->iv_size - 8);
Milan Brozed04d982013-10-28 23:21:04 +0100694
695 return r;
696}
697
698static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
699 struct dm_crypt_request *dmreq)
700{
Milan Brozef43aa32017-01-04 20:23:54 +0100701 struct scatterlist *sg;
Milan Brozed04d982013-10-28 23:21:04 +0100702 u8 *dst;
703 int r;
704
705 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
706 return 0;
707
708 /* Apply whitening on ciphertext */
Milan Brozef43aa32017-01-04 20:23:54 +0100709 sg = crypt_get_sg_data(cc, dmreq->sg_out);
710 dst = kmap_atomic(sg_page(sg));
711 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
Milan Brozed04d982013-10-28 23:21:04 +0100712 kunmap_atomic(dst);
713
714 return r;
715}
716
Milan Brozef43aa32017-01-04 20:23:54 +0100717static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
718 struct dm_crypt_request *dmreq)
719{
720 /* Used only for writes, there must be an additional space to store IV */
721 get_random_bytes(iv, cc->iv_size);
722 return 0;
723}
724
Milan Brozb9411d72019-07-09 15:22:14 +0200725static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
726 const char *opts)
727{
Yang Yingliang3fd53532020-02-13 12:11:26 +0800728 if (crypt_integrity_aead(cc)) {
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300729 ti->error = "AEAD transforms not supported for EBOIV";
Milan Brozb9411d72019-07-09 15:22:14 +0200730 return -EINVAL;
731 }
732
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300733 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
734 ti->error = "Block size of EBOIV cipher does "
735 "not match IV size of block cipher";
736 return -EINVAL;
737 }
Milan Brozb9411d72019-07-09 15:22:14 +0200738
739 return 0;
740}
741
Milan Brozb9411d72019-07-09 15:22:14 +0200742static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
743 struct dm_crypt_request *dmreq)
744{
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300745 u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
746 struct skcipher_request *req;
747 struct scatterlist src, dst;
Damien Le Moal7785a9e2020-08-31 14:55:55 +0900748 DECLARE_CRYPTO_WAIT(wait);
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300749 int err;
Milan Brozb9411d72019-07-09 15:22:14 +0200750
Mikulas Patocka9402e952020-01-02 08:23:32 -0500751 req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300752 if (!req)
753 return -ENOMEM;
Milan Brozb9411d72019-07-09 15:22:14 +0200754
Ard Biesheuvel39d13a12019-08-07 08:50:22 +0300755 memset(buf, 0, cc->iv_size);
756 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
757
758 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
759 sg_init_one(&dst, iv, cc->iv_size);
760 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
761 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
762 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
763 skcipher_request_free(req);
764
765 return err;
Milan Brozb9411d72019-07-09 15:22:14 +0200766}
767
Milan Brozbbb16582020-01-03 09:20:22 +0100768static void crypt_iv_elephant_dtr(struct crypt_config *cc)
769{
770 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
771
772 crypto_free_skcipher(elephant->tfm);
773 elephant->tfm = NULL;
774}
775
776static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
777 const char *opts)
778{
779 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
780 int r;
781
Mikulas Patockacd746932020-07-09 23:20:42 -0700782 elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
783 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Brozbbb16582020-01-03 09:20:22 +0100784 if (IS_ERR(elephant->tfm)) {
785 r = PTR_ERR(elephant->tfm);
786 elephant->tfm = NULL;
787 return r;
788 }
789
790 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
791 if (r)
792 crypt_iv_elephant_dtr(cc);
793 return r;
794}
795
796static void diffuser_disk_to_cpu(u32 *d, size_t n)
797{
798#ifndef __LITTLE_ENDIAN
799 int i;
800
801 for (i = 0; i < n; i++)
802 d[i] = le32_to_cpu((__le32)d[i]);
803#endif
804}
805
806static void diffuser_cpu_to_disk(__le32 *d, size_t n)
807{
808#ifndef __LITTLE_ENDIAN
809 int i;
810
811 for (i = 0; i < n; i++)
812 d[i] = cpu_to_le32((u32)d[i]);
813#endif
814}
815
816static void diffuser_a_decrypt(u32 *d, size_t n)
817{
818 int i, i1, i2, i3;
819
820 for (i = 0; i < 5; i++) {
821 i1 = 0;
822 i2 = n - 2;
823 i3 = n - 5;
824
825 while (i1 < (n - 1)) {
826 d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
827 i1++; i2++; i3++;
828
829 if (i3 >= n)
830 i3 -= n;
831
832 d[i1] += d[i2] ^ d[i3];
833 i1++; i2++; i3++;
834
835 if (i2 >= n)
836 i2 -= n;
837
838 d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
839 i1++; i2++; i3++;
840
841 d[i1] += d[i2] ^ d[i3];
842 i1++; i2++; i3++;
843 }
844 }
845}
846
847static void diffuser_a_encrypt(u32 *d, size_t n)
848{
849 int i, i1, i2, i3;
850
851 for (i = 0; i < 5; i++) {
852 i1 = n - 1;
853 i2 = n - 2 - 1;
854 i3 = n - 5 - 1;
855
856 while (i1 > 0) {
857 d[i1] -= d[i2] ^ d[i3];
858 i1--; i2--; i3--;
859
860 d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
861 i1--; i2--; i3--;
862
863 if (i2 < 0)
864 i2 += n;
865
866 d[i1] -= d[i2] ^ d[i3];
867 i1--; i2--; i3--;
868
869 if (i3 < 0)
870 i3 += n;
871
872 d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
873 i1--; i2--; i3--;
874 }
875 }
876}
877
878static void diffuser_b_decrypt(u32 *d, size_t n)
879{
880 int i, i1, i2, i3;
881
882 for (i = 0; i < 3; i++) {
883 i1 = 0;
884 i2 = 2;
885 i3 = 5;
886
887 while (i1 < (n - 1)) {
888 d[i1] += d[i2] ^ d[i3];
889 i1++; i2++; i3++;
890
891 d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
892 i1++; i2++; i3++;
893
894 if (i2 >= n)
895 i2 -= n;
896
897 d[i1] += d[i2] ^ d[i3];
898 i1++; i2++; i3++;
899
900 if (i3 >= n)
901 i3 -= n;
902
903 d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
904 i1++; i2++; i3++;
905 }
906 }
907}
908
909static void diffuser_b_encrypt(u32 *d, size_t n)
910{
911 int i, i1, i2, i3;
912
913 for (i = 0; i < 3; i++) {
914 i1 = n - 1;
915 i2 = 2 - 1;
916 i3 = 5 - 1;
917
918 while (i1 > 0) {
919 d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
920 i1--; i2--; i3--;
921
922 if (i3 < 0)
923 i3 += n;
924
925 d[i1] -= d[i2] ^ d[i3];
926 i1--; i2--; i3--;
927
928 if (i2 < 0)
929 i2 += n;
930
931 d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
932 i1--; i2--; i3--;
933
934 d[i1] -= d[i2] ^ d[i3];
935 i1--; i2--; i3--;
936 }
937 }
938}
939
940static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
941{
942 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
943 u8 *es, *ks, *data, *data2, *data_offset;
944 struct skcipher_request *req;
945 struct scatterlist *sg, *sg2, src, dst;
Damien Le Moal7785a9e2020-08-31 14:55:55 +0900946 DECLARE_CRYPTO_WAIT(wait);
Milan Brozbbb16582020-01-03 09:20:22 +0100947 int i, r;
948
949 req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
950 es = kzalloc(16, GFP_NOIO); /* Key for AES */
951 ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
952
953 if (!req || !es || !ks) {
954 r = -ENOMEM;
955 goto out;
956 }
957
958 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
959
960 /* E(Ks, e(s)) */
961 sg_init_one(&src, es, 16);
962 sg_init_one(&dst, ks, 16);
963 skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
964 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
965 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
966 if (r)
967 goto out;
968
969 /* E(Ks, e'(s)) */
970 es[15] = 0x80;
971 sg_init_one(&dst, &ks[16], 16);
972 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
973 if (r)
974 goto out;
975
976 sg = crypt_get_sg_data(cc, dmreq->sg_out);
977 data = kmap_atomic(sg_page(sg));
978 data_offset = data + sg->offset;
979
980 /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
981 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
982 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
983 data2 = kmap_atomic(sg_page(sg2));
984 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
985 kunmap_atomic(data2);
986 }
987
988 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
989 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
990 diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
991 diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
992 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
993 }
994
995 for (i = 0; i < (cc->sector_size / 32); i++)
996 crypto_xor(data_offset + i * 32, ks, 32);
997
998 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
999 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
1000 diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
1001 diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
1002 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
1003 }
1004
1005 kunmap_atomic(data);
1006out:
Waiman Long453431a2020-08-06 23:18:13 -07001007 kfree_sensitive(ks);
1008 kfree_sensitive(es);
Milan Brozbbb16582020-01-03 09:20:22 +01001009 skcipher_request_free(req);
1010 return r;
1011}
1012
1013static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1014 struct dm_crypt_request *dmreq)
1015{
1016 int r;
1017
1018 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1019 r = crypt_iv_elephant(cc, dmreq);
1020 if (r)
1021 return r;
1022 }
1023
1024 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1025}
1026
1027static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1028 struct dm_crypt_request *dmreq)
1029{
1030 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1031 return crypt_iv_elephant(cc, dmreq);
1032
1033 return 0;
1034}
1035
1036static int crypt_iv_elephant_init(struct crypt_config *cc)
1037{
1038 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1039 int key_offset = cc->key_size - cc->key_extra_size;
1040
1041 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1042}
1043
1044static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1045{
1046 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1047 u8 key[ELEPHANT_MAX_KEY_SIZE];
1048
1049 memset(key, 0, cc->key_extra_size);
1050 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1051}
1052
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001053static const struct crypt_iv_operations crypt_iv_plain_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 .generator = crypt_iv_plain_gen
1055};
1056
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001057static const struct crypt_iv_operations crypt_iv_plain64_ops = {
Milan Broz61afef62009-12-10 23:52:25 +00001058 .generator = crypt_iv_plain64_gen
1059};
1060
Milan Broz7e3fd852017-06-06 09:07:01 +02001061static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
1062 .generator = crypt_iv_plain64be_gen
1063};
1064
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001065static const struct crypt_iv_operations crypt_iv_essiv_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 .generator = crypt_iv_essiv_gen
1067};
1068
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001069static const struct crypt_iv_operations crypt_iv_benbi_ops = {
Rik Snel48527fa2006-09-03 08:56:39 +10001070 .ctr = crypt_iv_benbi_ctr,
1071 .dtr = crypt_iv_benbi_dtr,
1072 .generator = crypt_iv_benbi_gen
1073};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001075static const struct crypt_iv_operations crypt_iv_null_ops = {
Ludwig Nussel46b47732007-05-09 02:32:55 -07001076 .generator = crypt_iv_null_gen
1077};
1078
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001079static const struct crypt_iv_operations crypt_iv_lmk_ops = {
Milan Broz34745782011-01-13 19:59:55 +00001080 .ctr = crypt_iv_lmk_ctr,
1081 .dtr = crypt_iv_lmk_dtr,
1082 .init = crypt_iv_lmk_init,
1083 .wipe = crypt_iv_lmk_wipe,
1084 .generator = crypt_iv_lmk_gen,
1085 .post = crypt_iv_lmk_post
1086};
1087
Julia Lawall1b1b58f2015-11-29 14:09:19 +01001088static const struct crypt_iv_operations crypt_iv_tcw_ops = {
Milan Brozed04d982013-10-28 23:21:04 +01001089 .ctr = crypt_iv_tcw_ctr,
1090 .dtr = crypt_iv_tcw_dtr,
1091 .init = crypt_iv_tcw_init,
1092 .wipe = crypt_iv_tcw_wipe,
1093 .generator = crypt_iv_tcw_gen,
1094 .post = crypt_iv_tcw_post
1095};
1096
Rikard Falkeborne8dc79d2020-11-21 23:22:48 +01001097static const struct crypt_iv_operations crypt_iv_random_ops = {
Milan Brozef43aa32017-01-04 20:23:54 +01001098 .generator = crypt_iv_random_gen
1099};
1100
Rikard Falkeborne8dc79d2020-11-21 23:22:48 +01001101static const struct crypt_iv_operations crypt_iv_eboiv_ops = {
Milan Brozb9411d72019-07-09 15:22:14 +02001102 .ctr = crypt_iv_eboiv_ctr,
Milan Brozb9411d72019-07-09 15:22:14 +02001103 .generator = crypt_iv_eboiv_gen
1104};
1105
Rikard Falkeborne8dc79d2020-11-21 23:22:48 +01001106static const struct crypt_iv_operations crypt_iv_elephant_ops = {
Milan Brozbbb16582020-01-03 09:20:22 +01001107 .ctr = crypt_iv_elephant_ctr,
1108 .dtr = crypt_iv_elephant_dtr,
1109 .init = crypt_iv_elephant_init,
1110 .wipe = crypt_iv_elephant_wipe,
1111 .generator = crypt_iv_elephant_gen,
1112 .post = crypt_iv_elephant_post
1113};
1114
Milan Brozef43aa32017-01-04 20:23:54 +01001115/*
1116 * Integrity extensions
1117 */
1118static bool crypt_integrity_aead(struct crypt_config *cc)
1119{
1120 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1121}
1122
1123static bool crypt_integrity_hmac(struct crypt_config *cc)
1124{
Milan Broz33d2f092017-03-16 15:39:40 +01001125 return crypt_integrity_aead(cc) && cc->key_mac_size;
Milan Brozef43aa32017-01-04 20:23:54 +01001126}
1127
1128/* Get sg containing data */
1129static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1130 struct scatterlist *sg)
1131{
Milan Broz33d2f092017-03-16 15:39:40 +01001132 if (unlikely(crypt_integrity_aead(cc)))
Milan Brozef43aa32017-01-04 20:23:54 +01001133 return &sg[2];
1134
1135 return sg;
1136}
1137
1138static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1139{
1140 struct bio_integrity_payload *bip;
1141 unsigned int tag_len;
1142 int ret;
1143
1144 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1145 return 0;
1146
1147 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1148 if (IS_ERR(bip))
1149 return PTR_ERR(bip);
1150
Mikulas Patockaff0c1292019-02-08 10:52:07 -05001151 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
Milan Brozef43aa32017-01-04 20:23:54 +01001152
1153 bip->bip_iter.bi_size = tag_len;
1154 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1155
Milan Brozef43aa32017-01-04 20:23:54 +01001156 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1157 tag_len, offset_in_page(io->integrity_metadata));
1158 if (unlikely(ret != tag_len))
1159 return -ENOMEM;
1160
1161 return 0;
1162}
1163
1164static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1165{
1166#ifdef CONFIG_BLK_DEV_INTEGRITY
1167 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
Milan Broz7a1cd722019-05-15 16:23:43 +02001168 struct mapped_device *md = dm_table_get_md(ti->table);
Milan Brozef43aa32017-01-04 20:23:54 +01001169
1170 /* From now we require underlying device with our integrity profile */
1171 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1172 ti->error = "Integrity profile not supported.";
1173 return -EINVAL;
1174 }
1175
Mikulas Patocka583fe742017-04-18 16:51:54 -04001176 if (bi->tag_size != cc->on_disk_tag_size ||
1177 bi->tuple_size != cc->on_disk_tag_size) {
Milan Brozef43aa32017-01-04 20:23:54 +01001178 ti->error = "Integrity profile tag size mismatch.";
1179 return -EINVAL;
1180 }
Mikulas Patocka583fe742017-04-18 16:51:54 -04001181 if (1 << bi->interval_exp != cc->sector_size) {
1182 ti->error = "Integrity profile sector size mismatch.";
1183 return -EINVAL;
1184 }
Milan Brozef43aa32017-01-04 20:23:54 +01001185
Milan Broz33d2f092017-03-16 15:39:40 +01001186 if (crypt_integrity_aead(cc)) {
Milan Brozef43aa32017-01-04 20:23:54 +01001187 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
Milan Broz7a1cd722019-05-15 16:23:43 +02001188 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
Milan Brozef43aa32017-01-04 20:23:54 +01001189 cc->integrity_tag_size, cc->integrity_iv_size);
1190
1191 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1192 ti->error = "Integrity AEAD auth tag size is not supported.";
1193 return -EINVAL;
1194 }
1195 } else if (cc->integrity_iv_size)
Milan Broz7a1cd722019-05-15 16:23:43 +02001196 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
Milan Brozef43aa32017-01-04 20:23:54 +01001197 cc->integrity_iv_size);
1198
1199 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1200 ti->error = "Not enough space for integrity tag in the profile.";
1201 return -EINVAL;
1202 }
1203
1204 return 0;
1205#else
1206 ti->error = "Integrity profile not supported.";
1207 return -EINVAL;
1208#endif
1209}
1210
Milan Brozd469f842007-10-19 22:42:37 +01001211static void crypt_convert_init(struct crypt_config *cc,
1212 struct convert_context *ctx,
1213 struct bio *bio_out, struct bio *bio_in,
Milan Brozfcd369d2008-02-08 02:10:41 +00001214 sector_t sector)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215{
1216 ctx->bio_in = bio_in;
1217 ctx->bio_out = bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -07001218 if (bio_in)
1219 ctx->iter_in = bio_in->bi_iter;
1220 if (bio_out)
1221 ctx->iter_out = bio_out->bi_iter;
Mikulas Patockac66029f2012-07-27 15:08:05 +01001222 ctx->cc_sector = sector + cc->iv_offset;
Milan Broz43d69032008-02-08 02:11:09 +00001223 init_completion(&ctx->restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224}
1225
Huang Yingb2174ee2009-03-16 17:44:33 +00001226static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
Milan Brozef43aa32017-01-04 20:23:54 +01001227 void *req)
Huang Yingb2174ee2009-03-16 17:44:33 +00001228{
1229 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1230}
1231
Milan Brozef43aa32017-01-04 20:23:54 +01001232static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
Huang Yingb2174ee2009-03-16 17:44:33 +00001233{
Milan Brozef43aa32017-01-04 20:23:54 +01001234 return (void *)((char *)dmreq - cc->dmreq_start);
Huang Yingb2174ee2009-03-16 17:44:33 +00001235}
1236
Milan Broz2dc53272011-01-13 19:59:54 +00001237static u8 *iv_of_dmreq(struct crypt_config *cc,
1238 struct dm_crypt_request *dmreq)
1239{
Milan Broz33d2f092017-03-16 15:39:40 +01001240 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01001241 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1242 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1243 else
1244 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1245 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
Milan Broz2dc53272011-01-13 19:59:54 +00001246}
1247
Milan Brozef43aa32017-01-04 20:23:54 +01001248static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1249 struct dm_crypt_request *dmreq)
1250{
1251 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1252}
1253
Christoph Hellwigc13b5482019-04-04 18:33:34 +02001254static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
Milan Brozef43aa32017-01-04 20:23:54 +01001255 struct dm_crypt_request *dmreq)
1256{
1257 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
Christoph Hellwigc13b5482019-04-04 18:33:34 +02001258 return (__le64 *) ptr;
Milan Brozef43aa32017-01-04 20:23:54 +01001259}
1260
1261static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1262 struct dm_crypt_request *dmreq)
1263{
1264 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1265 cc->iv_size + sizeof(uint64_t);
1266 return (unsigned int*)ptr;
1267}
1268
1269static void *tag_from_dmreq(struct crypt_config *cc,
1270 struct dm_crypt_request *dmreq)
1271{
1272 struct convert_context *ctx = dmreq->ctx;
1273 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1274
1275 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1276 cc->on_disk_tag_size];
1277}
1278
1279static void *iv_tag_from_dmreq(struct crypt_config *cc,
1280 struct dm_crypt_request *dmreq)
1281{
1282 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1283}
1284
1285static int crypt_convert_block_aead(struct crypt_config *cc,
1286 struct convert_context *ctx,
1287 struct aead_request *req,
1288 unsigned int tag_offset)
Milan Broz01482b72008-02-08 02:11:04 +00001289{
Kent Overstreet003b5c52013-10-11 15:45:43 -07001290 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1291 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001292 struct dm_crypt_request *dmreq;
Milan Brozef43aa32017-01-04 20:23:54 +01001293 u8 *iv, *org_iv, *tag_iv, *tag;
Christoph Hellwigc13b5482019-04-04 18:33:34 +02001294 __le64 *sector;
Milan Brozef43aa32017-01-04 20:23:54 +01001295 int r = 0;
1296
1297 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
Milan Broz01482b72008-02-08 02:11:04 +00001298
Milan Broz8f0009a2017-03-16 15:39:44 +01001299 /* Reject unexpected unaligned bio. */
Mikulas Patocka0440d5c2017-11-07 10:35:57 -05001300 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
Milan Broz8f0009a2017-03-16 15:39:44 +01001301 return -EIO;
Milan Broz01482b72008-02-08 02:11:04 +00001302
Huang Yingb2174ee2009-03-16 17:44:33 +00001303 dmreq = dmreq_of_req(cc, req);
Mikulas Patockac66029f2012-07-27 15:08:05 +01001304 dmreq->iv_sector = ctx->cc_sector;
Milan Broz8f0009a2017-03-16 15:39:44 +01001305 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
Mikulas Patockaff3af922017-03-23 10:23:14 -04001306 dmreq->iv_sector >>= cc->sector_shift;
Huang Yingb2174ee2009-03-16 17:44:33 +00001307 dmreq->ctx = ctx;
Milan Broz01482b72008-02-08 02:11:04 +00001308
Milan Brozef43aa32017-01-04 20:23:54 +01001309 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
Milan Broz01482b72008-02-08 02:11:04 +00001310
Milan Brozef43aa32017-01-04 20:23:54 +01001311 sector = org_sector_of_dmreq(cc, dmreq);
1312 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1313
1314 iv = iv_of_dmreq(cc, dmreq);
1315 org_iv = org_iv_of_dmreq(cc, dmreq);
1316 tag = tag_from_dmreq(cc, dmreq);
1317 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1318
1319 /* AEAD request:
1320 * |----- AAD -------|------ DATA -------|-- AUTH TAG --|
1321 * | (authenticated) | (auth+encryption) | |
1322 * | sector_LE | IV | sector in/out | tag in/out |
1323 */
1324 sg_init_table(dmreq->sg_in, 4);
1325 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1326 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
Milan Broz8f0009a2017-03-16 15:39:44 +01001327 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
Milan Brozef43aa32017-01-04 20:23:54 +01001328 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1329
1330 sg_init_table(dmreq->sg_out, 4);
1331 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1332 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
Milan Broz8f0009a2017-03-16 15:39:44 +01001333 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
Milan Brozef43aa32017-01-04 20:23:54 +01001334 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
Milan Broz01482b72008-02-08 02:11:04 +00001335
Milan Broz3a7f6c92008-02-08 02:11:14 +00001336 if (cc->iv_gen_ops) {
Milan Brozef43aa32017-01-04 20:23:54 +01001337 /* For READs use IV stored in integrity metadata */
1338 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1339 memcpy(org_iv, tag_iv, cc->iv_size);
1340 } else {
1341 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1342 if (r < 0)
1343 return r;
1344 /* Store generated IV in integrity metadata */
1345 if (cc->integrity_iv_size)
1346 memcpy(tag_iv, org_iv, cc->iv_size);
1347 }
1348 /* Working copy of IV, to be modified in crypto API */
1349 memcpy(iv, org_iv, cc->iv_size);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001350 }
1351
Milan Brozef43aa32017-01-04 20:23:54 +01001352 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1353 if (bio_data_dir(ctx->bio_in) == WRITE) {
1354 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
Milan Broz8f0009a2017-03-16 15:39:44 +01001355 cc->sector_size, iv);
Milan Brozef43aa32017-01-04 20:23:54 +01001356 r = crypto_aead_encrypt(req);
1357 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1358 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1359 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1360 } else {
1361 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
Milan Broz8f0009a2017-03-16 15:39:44 +01001362 cc->sector_size + cc->integrity_tag_size, iv);
Milan Brozef43aa32017-01-04 20:23:54 +01001363 r = crypto_aead_decrypt(req);
1364 }
1365
Milan Brozf7101262019-05-15 16:22:30 +02001366 if (r == -EBADMSG) {
1367 char b[BDEVNAME_SIZE];
Michael Weiß58d0f182021-09-04 11:59:30 +02001368 sector_t s = le64_to_cpu(*sector);
1369
1370 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
1371 bio_devname(ctx->bio_in, b), s);
1372 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
1373 ctx->bio_in, s, 0);
Milan Brozf7101262019-05-15 16:22:30 +02001374 }
Milan Brozef43aa32017-01-04 20:23:54 +01001375
1376 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1377 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1378
Milan Broz8f0009a2017-03-16 15:39:44 +01001379 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1380 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
Milan Brozef43aa32017-01-04 20:23:54 +01001381
1382 return r;
1383}
1384
1385static int crypt_convert_block_skcipher(struct crypt_config *cc,
1386 struct convert_context *ctx,
1387 struct skcipher_request *req,
1388 unsigned int tag_offset)
1389{
1390 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1391 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1392 struct scatterlist *sg_in, *sg_out;
1393 struct dm_crypt_request *dmreq;
Milan Brozef43aa32017-01-04 20:23:54 +01001394 u8 *iv, *org_iv, *tag_iv;
Christoph Hellwigc13b5482019-04-04 18:33:34 +02001395 __le64 *sector;
Milan Brozef43aa32017-01-04 20:23:54 +01001396 int r = 0;
1397
Milan Broz8f0009a2017-03-16 15:39:44 +01001398 /* Reject unexpected unaligned bio. */
Mikulas Patocka0440d5c2017-11-07 10:35:57 -05001399 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
Milan Broz8f0009a2017-03-16 15:39:44 +01001400 return -EIO;
1401
Milan Brozef43aa32017-01-04 20:23:54 +01001402 dmreq = dmreq_of_req(cc, req);
1403 dmreq->iv_sector = ctx->cc_sector;
Milan Broz8f0009a2017-03-16 15:39:44 +01001404 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
Mikulas Patockaff3af922017-03-23 10:23:14 -04001405 dmreq->iv_sector >>= cc->sector_shift;
Milan Brozef43aa32017-01-04 20:23:54 +01001406 dmreq->ctx = ctx;
1407
1408 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1409
1410 iv = iv_of_dmreq(cc, dmreq);
1411 org_iv = org_iv_of_dmreq(cc, dmreq);
1412 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1413
1414 sector = org_sector_of_dmreq(cc, dmreq);
1415 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1416
1417 /* For skcipher we use only the first sg item */
1418 sg_in = &dmreq->sg_in[0];
1419 sg_out = &dmreq->sg_out[0];
1420
1421 sg_init_table(sg_in, 1);
Milan Broz8f0009a2017-03-16 15:39:44 +01001422 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
Milan Brozef43aa32017-01-04 20:23:54 +01001423
1424 sg_init_table(sg_out, 1);
Milan Broz8f0009a2017-03-16 15:39:44 +01001425 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
Milan Brozef43aa32017-01-04 20:23:54 +01001426
1427 if (cc->iv_gen_ops) {
1428 /* For READs use IV stored in integrity metadata */
1429 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1430 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1431 } else {
1432 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1433 if (r < 0)
1434 return r;
Milan Brozbbb16582020-01-03 09:20:22 +01001435 /* Data can be already preprocessed in generator */
1436 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1437 sg_in = sg_out;
Milan Brozef43aa32017-01-04 20:23:54 +01001438 /* Store generated IV in integrity metadata */
1439 if (cc->integrity_iv_size)
1440 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1441 }
1442 /* Working copy of IV, to be modified in crypto API */
1443 memcpy(iv, org_iv, cc->iv_size);
1444 }
1445
Milan Broz8f0009a2017-03-16 15:39:44 +01001446 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001447
1448 if (bio_data_dir(ctx->bio_in) == WRITE)
Herbert Xubbdb23b2016-01-24 21:16:36 +08001449 r = crypto_skcipher_encrypt(req);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001450 else
Herbert Xubbdb23b2016-01-24 21:16:36 +08001451 r = crypto_skcipher_decrypt(req);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001452
Milan Broz2dc53272011-01-13 19:59:54 +00001453 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
Milan Brozef43aa32017-01-04 20:23:54 +01001454 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1455
Milan Broz8f0009a2017-03-16 15:39:44 +01001456 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1457 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
Milan Broz2dc53272011-01-13 19:59:54 +00001458
Milan Broz3a7f6c92008-02-08 02:11:14 +00001459 return r;
Milan Broz01482b72008-02-08 02:11:04 +00001460}
1461
Milan Broz95497a92008-02-08 02:11:12 +00001462static void kcryptd_async_done(struct crypto_async_request *async_req,
1463 int error);
Andi Kleenc0297722011-01-13 19:59:53 +00001464
Ignat Korchagind68b2952021-01-04 14:59:48 +00001465static int crypt_alloc_req_skcipher(struct crypt_config *cc,
Milan Brozef43aa32017-01-04 20:23:54 +01001466 struct convert_context *ctx)
Milan Brozddd42ed2008-02-08 02:11:07 +00001467{
Mikulas Patockac66029f2012-07-27 15:08:05 +01001468 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
Andi Kleenc0297722011-01-13 19:59:53 +00001469
Ignat Korchagind68b2952021-01-04 14:59:48 +00001470 if (!ctx->r.req) {
1471 ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1472 if (!ctx->r.req)
1473 return -ENOMEM;
1474 }
Andi Kleenc0297722011-01-13 19:59:53 +00001475
Milan Brozef43aa32017-01-04 20:23:54 +01001476 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
Milan Broz54cea3f2015-05-15 17:00:25 +02001477
1478 /*
1479 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1480 * requests if driver request queue is full.
1481 */
Milan Brozef43aa32017-01-04 20:23:54 +01001482 skcipher_request_set_callback(ctx->r.req,
Mikulas Patocka432061b2018-09-05 09:17:45 -04001483 CRYPTO_TFM_REQ_MAY_BACKLOG,
Milan Brozef43aa32017-01-04 20:23:54 +01001484 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
Ignat Korchagind68b2952021-01-04 14:59:48 +00001485
1486 return 0;
Milan Brozddd42ed2008-02-08 02:11:07 +00001487}
1488
Ignat Korchagind68b2952021-01-04 14:59:48 +00001489static int crypt_alloc_req_aead(struct crypt_config *cc,
Milan Brozef43aa32017-01-04 20:23:54 +01001490 struct convert_context *ctx)
1491{
Ignat Korchagin004b8ae2021-01-19 20:40:15 +00001492 if (!ctx->r.req_aead) {
1493 ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1494 if (!ctx->r.req_aead)
Ignat Korchagind68b2952021-01-04 14:59:48 +00001495 return -ENOMEM;
1496 }
Milan Brozef43aa32017-01-04 20:23:54 +01001497
1498 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1499
1500 /*
1501 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1502 * requests if driver request queue is full.
1503 */
1504 aead_request_set_callback(ctx->r.req_aead,
Mikulas Patocka432061b2018-09-05 09:17:45 -04001505 CRYPTO_TFM_REQ_MAY_BACKLOG,
Milan Brozef43aa32017-01-04 20:23:54 +01001506 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
Ignat Korchagind68b2952021-01-04 14:59:48 +00001507
1508 return 0;
Milan Brozef43aa32017-01-04 20:23:54 +01001509}
1510
Ignat Korchagind68b2952021-01-04 14:59:48 +00001511static int crypt_alloc_req(struct crypt_config *cc,
Milan Brozef43aa32017-01-04 20:23:54 +01001512 struct convert_context *ctx)
1513{
Milan Broz33d2f092017-03-16 15:39:40 +01001514 if (crypt_integrity_aead(cc))
Ignat Korchagind68b2952021-01-04 14:59:48 +00001515 return crypt_alloc_req_aead(cc, ctx);
Milan Brozef43aa32017-01-04 20:23:54 +01001516 else
Ignat Korchagind68b2952021-01-04 14:59:48 +00001517 return crypt_alloc_req_skcipher(cc, ctx);
Milan Brozef43aa32017-01-04 20:23:54 +01001518}
1519
1520static void crypt_free_req_skcipher(struct crypt_config *cc,
1521 struct skcipher_request *req, struct bio *base_bio)
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001522{
1523 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1524
Herbert Xubbdb23b2016-01-24 21:16:36 +08001525 if ((struct skcipher_request *)(io + 1) != req)
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001526 mempool_free(req, &cc->req_pool);
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001527}
1528
Milan Brozef43aa32017-01-04 20:23:54 +01001529static void crypt_free_req_aead(struct crypt_config *cc,
1530 struct aead_request *req, struct bio *base_bio)
1531{
1532 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1533
1534 if ((struct aead_request *)(io + 1) != req)
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001535 mempool_free(req, &cc->req_pool);
Milan Brozef43aa32017-01-04 20:23:54 +01001536}
1537
1538static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1539{
Milan Broz33d2f092017-03-16 15:39:40 +01001540 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01001541 crypt_free_req_aead(cc, req, base_bio);
1542 else
1543 crypt_free_req_skcipher(cc, req, base_bio);
1544}
1545
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546/*
1547 * Encrypt / decrypt data from one bio to another one (can be the same one)
1548 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001549static blk_status_t crypt_convert(struct crypt_config *cc,
Ignat Korchagin8abec362021-01-04 14:59:47 +00001550 struct convert_context *ctx, bool atomic, bool reset_pending)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551{
Milan Brozef43aa32017-01-04 20:23:54 +01001552 unsigned int tag_offset = 0;
Mikulas Patockaff3af922017-03-23 10:23:14 -04001553 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
Milan Broz3f1e9072008-03-28 14:16:07 -07001554 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
Ignat Korchagin8abec362021-01-04 14:59:47 +00001556 /*
1557 * if reset_pending is set we are dealing with the bio for the first time,
1558 * else we're continuing to work on the previous bio, so don't mess with
1559 * the cc_pending counter
1560 */
1561 if (reset_pending)
1562 atomic_set(&ctx->cc_pending, 1);
Milan Brozc8081612008-10-10 13:37:08 +01001563
Kent Overstreet003b5c52013-10-11 15:45:43 -07001564 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
Ignat Korchagind68b2952021-01-04 14:59:48 +00001566 r = crypt_alloc_req(cc, ctx);
1567 if (r) {
1568 complete(&ctx->restart);
1569 return BLK_STS_DEV_RESOURCE;
1570 }
1571
Mikulas Patocka40b62292012-07-27 15:08:04 +01001572 atomic_inc(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -07001573
Milan Broz33d2f092017-03-16 15:39:40 +01001574 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01001575 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1576 else
1577 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001578
1579 switch (r) {
Milan Broz54cea3f2015-05-15 17:00:25 +02001580 /*
1581 * The request was queued by a crypto driver
1582 * but the driver request queue is full, let's wait.
1583 */
Milan Broz3a7f6c92008-02-08 02:11:14 +00001584 case -EBUSY:
Ignat Korchagin8abec362021-01-04 14:59:47 +00001585 if (in_interrupt()) {
1586 if (try_wait_for_completion(&ctx->restart)) {
1587 /*
1588 * we don't have to block to wait for completion,
1589 * so proceed
1590 */
1591 } else {
1592 /*
1593 * we can't wait for completion without blocking
1594 * exit and continue processing in a workqueue
1595 */
1596 ctx->r.req = NULL;
1597 ctx->cc_sector += sector_step;
1598 tag_offset++;
1599 return BLK_STS_DEV_RESOURCE;
1600 }
1601 } else {
1602 wait_for_completion(&ctx->restart);
1603 }
Wolfram Sang16735d02013-11-14 14:32:02 -08001604 reinit_completion(&ctx->restart);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001605 fallthrough;
Milan Broz54cea3f2015-05-15 17:00:25 +02001606 /*
1607 * The request is queued and processed asynchronously,
1608 * completion function kcryptd_async_done() will be called.
1609 */
Rabin Vincentc0403ec2015-05-05 15:15:56 +02001610 case -EINPROGRESS:
Milan Brozef43aa32017-01-04 20:23:54 +01001611 ctx->r.req = NULL;
Milan Broz8f0009a2017-03-16 15:39:44 +01001612 ctx->cc_sector += sector_step;
Mikulas Patocka583fe742017-04-18 16:51:54 -04001613 tag_offset++;
Milan Broz3a7f6c92008-02-08 02:11:14 +00001614 continue;
Milan Broz54cea3f2015-05-15 17:00:25 +02001615 /*
1616 * The request was already processed (synchronously).
1617 */
Milan Broz3f1e9072008-03-28 14:16:07 -07001618 case 0:
Mikulas Patocka40b62292012-07-27 15:08:04 +01001619 atomic_dec(&ctx->cc_pending);
Milan Broz8f0009a2017-03-16 15:39:44 +01001620 ctx->cc_sector += sector_step;
Mikulas Patocka583fe742017-04-18 16:51:54 -04001621 tag_offset++;
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01001622 if (!atomic)
1623 cond_resched();
Milan Broz3f1e9072008-03-28 14:16:07 -07001624 continue;
Milan Brozef43aa32017-01-04 20:23:54 +01001625 /*
1626 * There was a data integrity error.
1627 */
1628 case -EBADMSG:
1629 atomic_dec(&ctx->cc_pending);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001630 return BLK_STS_PROTECTION;
Milan Brozef43aa32017-01-04 20:23:54 +01001631 /*
1632 * There was an error while processing the request.
1633 */
Milan Broz3f1e9072008-03-28 14:16:07 -07001634 default:
Mikulas Patocka40b62292012-07-27 15:08:04 +01001635 atomic_dec(&ctx->cc_pending);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001636 return BLK_STS_IOERR;
Milan Broz3f1e9072008-03-28 14:16:07 -07001637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 }
1639
Milan Broz3f1e9072008-03-28 14:16:07 -07001640 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
1642
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001643static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1644
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645/*
1646 * Generate a new unfragmented bio with the given size
Mike Snitzer586b2862015-09-09 21:34:51 -04001647 * This should never violate the device limitations (but only because
1648 * max_segment_size is being constrained to PAGE_SIZE).
Mikulas Patocka7145c242015-02-13 08:24:41 -05001649 *
1650 * This function may be called concurrently. If we allocate from the mempool
1651 * concurrently, there is a possibility of deadlock. For example, if we have
1652 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
1653 * the mempool concurrently, it may deadlock in a situation where both processes
1654 * have allocated 128 pages and the mempool is exhausted.
1655 *
1656 * In order to avoid this scenario we allocate the pages under a mutex.
1657 *
1658 * In order to not degrade performance with excessive locking, we try
1659 * non-blocking allocations without a mutex first but on failure we fallback
1660 * to blocking allocations with a mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 */
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001662static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001664 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001665 struct bio *clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Mikulas Patocka7145c242015-02-13 08:24:41 -05001667 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1668 unsigned i, len, remaining_size;
Milan Broz91e10622007-12-13 14:16:10 +00001669 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Mikulas Patocka7145c242015-02-13 08:24:41 -05001671retry:
Mel Gormand0164ad2015-11-06 16:28:21 -08001672 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
Mikulas Patocka7145c242015-02-13 08:24:41 -05001673 mutex_lock(&cc->bio_alloc_lock);
1674
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001675 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
Milan Broz8b004452006-10-03 01:15:37 -07001676 if (!clone)
Milan Brozef43aa32017-01-04 20:23:54 +01001677 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
Olaf Kirch027581f2007-05-09 02:32:52 -07001679 clone_init(io, clone);
Milan Broz6a24c712006-10-03 01:15:40 -07001680
Mikulas Patocka7145c242015-02-13 08:24:41 -05001681 remaining_size = size;
1682
Olaf Kirchf97380b2007-05-09 02:32:54 -07001683 for (i = 0; i < nr_iovecs; i++) {
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001684 page = mempool_alloc(&cc->page_pool, gfp_mask);
Mikulas Patocka7145c242015-02-13 08:24:41 -05001685 if (!page) {
1686 crypt_free_buffer_pages(cc, clone);
1687 bio_put(clone);
Mel Gormand0164ad2015-11-06 16:28:21 -08001688 gfp_mask |= __GFP_DIRECT_RECLAIM;
Mikulas Patocka7145c242015-02-13 08:24:41 -05001689 goto retry;
1690 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
Mikulas Patocka7145c242015-02-13 08:24:41 -05001692 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
Ming Lei0dae7fe2016-10-29 16:08:06 +08001694 bio_add_page(clone, page, len, 0);
Milan Broz91e10622007-12-13 14:16:10 +00001695
Mikulas Patocka7145c242015-02-13 08:24:41 -05001696 remaining_size -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 }
1698
Milan Brozef43aa32017-01-04 20:23:54 +01001699 /* Allocate space for integrity tags */
1700 if (dm_crypt_integrity_io_alloc(io, clone)) {
1701 crypt_free_buffer_pages(cc, clone);
1702 bio_put(clone);
1703 clone = NULL;
1704 }
1705out:
Mel Gormand0164ad2015-11-06 16:28:21 -08001706 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
Mikulas Patocka7145c242015-02-13 08:24:41 -05001707 mutex_unlock(&cc->bio_alloc_lock);
1708
Milan Broz8b004452006-10-03 01:15:37 -07001709 return clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710}
1711
Neil Brown644bd2f2007-10-16 13:48:46 +02001712static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 struct bio_vec *bv;
Ming Lei6dc4f102019-02-15 19:13:19 +08001715 struct bvec_iter_all iter_all;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02001717 bio_for_each_segment_all(bv, clone, iter_all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 BUG_ON(!bv->bv_page);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001719 mempool_free(bv->bv_page, &cc->page_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 }
1721}
1722
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001723static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1724 struct bio *bio, sector_t sector)
Milan Brozdc440d1e2008-10-10 13:37:03 +01001725{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001726 io->cc = cc;
Milan Brozdc440d1e2008-10-10 13:37:03 +01001727 io->base_bio = bio;
1728 io->sector = sector;
1729 io->error = 0;
Milan Brozef43aa32017-01-04 20:23:54 +01001730 io->ctx.r.req = NULL;
1731 io->integrity_metadata = NULL;
1732 io->integrity_metadata_from_pool = false;
Mikulas Patocka40b62292012-07-27 15:08:04 +01001733 atomic_set(&io->io_pending, 0);
Milan Brozdc440d1e2008-10-10 13:37:03 +01001734}
1735
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001736static void crypt_inc_pending(struct dm_crypt_io *io)
1737{
Mikulas Patocka40b62292012-07-27 15:08:04 +01001738 atomic_inc(&io->io_pending);
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001739}
1740
Ignat Korchagin8e14f612021-01-09 15:17:06 +00001741static void kcryptd_io_bio_endio(struct work_struct *work)
1742{
1743 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1744 bio_endio(io->base_bio);
1745}
1746
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747/*
1748 * One of the bios was finished. Check for completion of
1749 * the whole request and correctly clean up the buffer.
1750 */
Milan Broz5742fd72008-02-08 02:10:43 +00001751static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001753 struct crypt_config *cc = io->cc;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001754 struct bio *base_bio = io->base_bio;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001755 blk_status_t error = io->error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
Mikulas Patocka40b62292012-07-27 15:08:04 +01001757 if (!atomic_dec_and_test(&io->io_pending))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 return;
1759
Milan Brozef43aa32017-01-04 20:23:54 +01001760 if (io->ctx.r.req)
1761 crypt_free_req(cc, io->ctx.r.req, base_bio);
1762
1763 if (unlikely(io->integrity_metadata_from_pool))
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001764 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
Milan Brozef43aa32017-01-04 20:23:54 +01001765 else
1766 kfree(io->integrity_metadata);
Milan Brozb35f8ca2009-03-16 17:44:36 +00001767
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001768 base_bio->bi_status = error;
Ignat Korchagin8e14f612021-01-09 15:17:06 +00001769
1770 /*
1771 * If we are running this function from our tasklet,
1772 * we can't call bio_endio() here, because it will call
1773 * clone_endio() from dm.c, which in turn will
1774 * free the current struct dm_crypt_io structure with
1775 * our tasklet. In this case we need to delay bio_endio()
1776 * execution to after the tasklet is done and dequeued.
1777 */
1778 if (tasklet_trylock(&io->tasklet)) {
1779 tasklet_unlock(&io->tasklet);
1780 bio_endio(base_bio);
1781 return;
1782 }
1783
1784 INIT_WORK(&io->work, kcryptd_io_bio_endio);
1785 queue_work(cc->io_queue, &io->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786}
1787
1788/*
Milan Brozcabf08e2007-10-19 22:38:58 +01001789 * kcryptd/kcryptd_io:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 *
1791 * Needed because it would be very unwise to do decryption in an
Milan Broz23541d22006-10-03 01:15:39 -07001792 * interrupt context.
Milan Brozcabf08e2007-10-19 22:38:58 +01001793 *
1794 * kcryptd performs the actual encryption or decryption.
1795 *
1796 * kcryptd_io performs the IO submission.
1797 *
1798 * They must be separated as otherwise the final stages could be
1799 * starved by new requests which can block in the first stages due
1800 * to memory allocation.
Andi Kleenc0297722011-01-13 19:59:53 +00001801 *
1802 * The work is done per CPU global for all dm-crypt instances.
1803 * They should not depend on each other and do not block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001805static void crypt_endio(struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001806{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001807 struct dm_crypt_io *io = clone->bi_private;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001808 struct crypt_config *cc = io->cc;
Milan Brozee7a4912008-02-08 02:10:46 +00001809 unsigned rw = bio_data_dir(clone);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001810 blk_status_t error;
Milan Broz8b004452006-10-03 01:15:37 -07001811
1812 /*
NeilBrown6712ecf2007-09-27 12:47:43 +02001813 * free the processed pages
Milan Broz8b004452006-10-03 01:15:37 -07001814 */
Milan Brozee7a4912008-02-08 02:10:46 +00001815 if (rw == WRITE)
Neil Brown644bd2f2007-10-16 13:48:46 +02001816 crypt_free_buffer_pages(cc, clone);
Milan Brozee7a4912008-02-08 02:10:46 +00001817
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001818 error = clone->bi_status;
Milan Brozee7a4912008-02-08 02:10:46 +00001819 bio_put(clone);
1820
Sasha Levin9b81c842015-08-10 19:05:18 -04001821 if (rw == READ && !error) {
Milan Brozee7a4912008-02-08 02:10:46 +00001822 kcryptd_queue_crypt(io);
1823 return;
NeilBrown6712ecf2007-09-27 12:47:43 +02001824 }
Milan Broz8b004452006-10-03 01:15:37 -07001825
Sasha Levin9b81c842015-08-10 19:05:18 -04001826 if (unlikely(error))
1827 io->error = error;
Milan Broz5742fd72008-02-08 02:10:43 +00001828
1829 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001830}
1831
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001832static void clone_init(struct dm_crypt_io *io, struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001833{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001834 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001835
1836 clone->bi_private = io;
1837 clone->bi_end_io = crypt_endio;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001838 bio_set_dev(clone, cc->dev->bdev);
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001839 clone->bi_opf = io->base_bio->bi_opf;
Milan Broz8b004452006-10-03 01:15:37 -07001840}
1841
Milan Broz20c82532011-01-13 19:59:53 +00001842static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
Milan Broz8b004452006-10-03 01:15:37 -07001843{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001844 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001845 struct bio *clone;
Milan Broz93e605c2006-10-03 01:15:38 -07001846
Milan Broz8b004452006-10-03 01:15:37 -07001847 /*
Mike Snitzer59779072015-04-09 16:53:24 -04001848 * We need the original biovec array in order to decrypt
1849 * the whole bio data *afterwards* -- thanks to immutable
1850 * biovecs we don't need to worry about the block layer
1851 * modifying the biovec array; so leverage bio_clone_fast().
Milan Broz8b004452006-10-03 01:15:37 -07001852 */
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001853 clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
Jens Axboe7eaceac2011-03-10 08:52:07 +01001854 if (!clone)
Milan Broz20c82532011-01-13 19:59:53 +00001855 return 1;
Milan Broz8b004452006-10-03 01:15:37 -07001856
Milan Broz20c82532011-01-13 19:59:53 +00001857 crypt_inc_pending(io);
1858
Milan Broz8b004452006-10-03 01:15:37 -07001859 clone_init(io, clone);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001860 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz8b004452006-10-03 01:15:37 -07001861
Milan Brozef43aa32017-01-04 20:23:54 +01001862 if (dm_crypt_integrity_io_alloc(io, clone)) {
1863 crypt_dec_pending(io);
1864 bio_put(clone);
1865 return 1;
1866 }
1867
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001868 submit_bio_noacct(clone);
Milan Broz20c82532011-01-13 19:59:53 +00001869 return 0;
Milan Broz8b004452006-10-03 01:15:37 -07001870}
1871
Mikulas Patockadc267622015-02-13 08:25:59 -05001872static void kcryptd_io_read_work(struct work_struct *work)
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001873{
1874 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1875
Mikulas Patockadc267622015-02-13 08:25:59 -05001876 crypt_inc_pending(io);
1877 if (kcryptd_io_read(io, GFP_NOIO))
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001878 io->error = BLK_STS_RESOURCE;
Mikulas Patockadc267622015-02-13 08:25:59 -05001879 crypt_dec_pending(io);
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001880}
1881
Mikulas Patockadc267622015-02-13 08:25:59 -05001882static void kcryptd_queue_read(struct dm_crypt_io *io)
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001883{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001884 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001885
Mikulas Patockadc267622015-02-13 08:25:59 -05001886 INIT_WORK(&io->work, kcryptd_io_read_work);
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001887 queue_work(cc->io_queue, &io->work);
1888}
1889
Mikulas Patockadc267622015-02-13 08:25:59 -05001890static void kcryptd_io_write(struct dm_crypt_io *io)
1891{
1892 struct bio *clone = io->ctx.bio_out;
1893
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001894 submit_bio_noacct(clone);
Mikulas Patockadc267622015-02-13 08:25:59 -05001895}
1896
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001897#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1898
Mikulas Patockadc267622015-02-13 08:25:59 -05001899static int dmcrypt_write(void *data)
1900{
1901 struct crypt_config *cc = data;
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001902 struct dm_crypt_io *io;
1903
Mikulas Patockadc267622015-02-13 08:25:59 -05001904 while (1) {
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001905 struct rb_root write_tree;
Mikulas Patockadc267622015-02-13 08:25:59 -05001906 struct blk_plug plug;
1907
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001908 spin_lock_irq(&cc->write_thread_lock);
Mikulas Patockadc267622015-02-13 08:25:59 -05001909continue_locked:
1910
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001911 if (!RB_EMPTY_ROOT(&cc->write_tree))
Mikulas Patockadc267622015-02-13 08:25:59 -05001912 goto pop_from_list;
1913
Rabin Vincentf659b102016-09-21 16:22:29 +02001914 set_current_state(TASK_INTERRUPTIBLE);
Mikulas Patockadc267622015-02-13 08:25:59 -05001915
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001916 spin_unlock_irq(&cc->write_thread_lock);
Mikulas Patockadc267622015-02-13 08:25:59 -05001917
Rabin Vincentf659b102016-09-21 16:22:29 +02001918 if (unlikely(kthread_should_stop())) {
Davidlohr Bueso642fa442017-01-03 13:43:14 -08001919 set_current_state(TASK_RUNNING);
Rabin Vincentf659b102016-09-21 16:22:29 +02001920 break;
1921 }
1922
Mikulas Patockadc267622015-02-13 08:25:59 -05001923 schedule();
1924
Davidlohr Bueso642fa442017-01-03 13:43:14 -08001925 set_current_state(TASK_RUNNING);
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001926 spin_lock_irq(&cc->write_thread_lock);
Mikulas Patockadc267622015-02-13 08:25:59 -05001927 goto continue_locked;
1928
1929pop_from_list:
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001930 write_tree = cc->write_tree;
1931 cc->write_tree = RB_ROOT;
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001932 spin_unlock_irq(&cc->write_thread_lock);
Mikulas Patockadc267622015-02-13 08:25:59 -05001933
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001934 BUG_ON(rb_parent(write_tree.rb_node));
1935
1936 /*
1937 * Note: we cannot walk the tree here with rb_next because
1938 * the structures may be freed when kcryptd_io_write is called.
1939 */
Mikulas Patockadc267622015-02-13 08:25:59 -05001940 blk_start_plug(&plug);
1941 do {
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001942 io = crypt_io_from_node(rb_first(&write_tree));
1943 rb_erase(&io->rb_node, &write_tree);
Mikulas Patockadc267622015-02-13 08:25:59 -05001944 kcryptd_io_write(io);
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001945 } while (!RB_EMPTY_ROOT(&write_tree));
Mikulas Patockadc267622015-02-13 08:25:59 -05001946 blk_finish_plug(&plug);
1947 }
1948 return 0;
1949}
1950
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001951static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
Milan Broz4e4eef62008-02-08 02:10:49 +00001952{
Milan Brozdec1ced2008-02-08 02:10:57 +00001953 struct bio *clone = io->ctx.bio_out;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001954 struct crypt_config *cc = io->cc;
Mikulas Patockadc267622015-02-13 08:25:59 -05001955 unsigned long flags;
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001956 sector_t sector;
1957 struct rb_node **rbp, *parent;
Milan Brozdec1ced2008-02-08 02:10:57 +00001958
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001959 if (unlikely(io->error)) {
Milan Brozdec1ced2008-02-08 02:10:57 +00001960 crypt_free_buffer_pages(cc, clone);
1961 bio_put(clone);
Milan Broz6c031f42008-10-10 13:37:06 +01001962 crypt_dec_pending(io);
Milan Brozdec1ced2008-02-08 02:10:57 +00001963 return;
1964 }
1965
1966 /* crypt_convert should have filled the clone bio */
Kent Overstreet003b5c52013-10-11 15:45:43 -07001967 BUG_ON(io->ctx.iter_out.bi_size);
Milan Brozdec1ced2008-02-08 02:10:57 +00001968
Kent Overstreet4f024f32013-10-11 15:44:27 -07001969 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz899c95d2008-02-08 02:11:02 +00001970
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01001971 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1972 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001973 submit_bio_noacct(clone);
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05001974 return;
1975 }
1976
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001977 spin_lock_irqsave(&cc->write_thread_lock, flags);
1978 if (RB_EMPTY_ROOT(&cc->write_tree))
1979 wake_up_process(cc->write_thread);
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05001980 rbp = &cc->write_tree.rb_node;
1981 parent = NULL;
1982 sector = io->sector;
1983 while (*rbp) {
1984 parent = *rbp;
1985 if (sector < crypt_io_from_node(parent)->sector)
1986 rbp = &(*rbp)->rb_left;
1987 else
1988 rbp = &(*rbp)->rb_right;
1989 }
1990 rb_link_node(&io->rb_node, parent, rbp);
1991 rb_insert_color(&io->rb_node, &cc->write_tree);
Mikulas Patockac7329ef2018-07-11 12:10:51 -04001992 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
Milan Broz4e4eef62008-02-08 02:10:49 +00001993}
1994
Damien Le Moal8e225f02020-07-08 18:28:08 +09001995static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1996 struct convert_context *ctx)
1997
1998{
1999 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
2000 return false;
2001
2002 /*
2003 * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
2004 * constraints so they do not need to be issued inline by
2005 * kcryptd_crypt_write_convert().
2006 */
2007 switch (bio_op(ctx->bio_in)) {
2008 case REQ_OP_WRITE:
2009 case REQ_OP_WRITE_SAME:
2010 case REQ_OP_WRITE_ZEROES:
2011 return true;
2012 default:
2013 return false;
2014 }
2015}
2016
Ignat Korchagin8abec362021-01-04 14:59:47 +00002017static void kcryptd_crypt_write_continue(struct work_struct *work)
2018{
2019 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2020 struct crypt_config *cc = io->cc;
2021 struct convert_context *ctx = &io->ctx;
2022 int crypt_finished;
2023 sector_t sector = io->sector;
2024 blk_status_t r;
2025
2026 wait_for_completion(&ctx->restart);
2027 reinit_completion(&ctx->restart);
2028
2029 r = crypt_convert(cc, &io->ctx, true, false);
2030 if (r)
2031 io->error = r;
2032 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2033 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2034 /* Wait for completion signaled by kcryptd_async_done() */
2035 wait_for_completion(&ctx->restart);
2036 crypt_finished = 1;
2037 }
2038
2039 /* Encryption was already finished, submit io now */
2040 if (crypt_finished) {
2041 kcryptd_crypt_write_io_submit(io, 0);
2042 io->sector = sector;
2043 }
2044
2045 crypt_dec_pending(io);
2046}
2047
Milan Brozfc5a5e92008-10-10 13:37:04 +01002048static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07002049{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01002050 struct crypt_config *cc = io->cc;
Damien Le Moal8e225f02020-07-08 18:28:08 +09002051 struct convert_context *ctx = &io->ctx;
Milan Broz8b004452006-10-03 01:15:37 -07002052 struct bio *clone;
Milan Brozc8081612008-10-10 13:37:08 +01002053 int crypt_finished;
Milan Brozb635b002008-10-21 17:45:00 +01002054 sector_t sector = io->sector;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002055 blk_status_t r;
Milan Broz8b004452006-10-03 01:15:37 -07002056
Milan Broz93e605c2006-10-03 01:15:38 -07002057 /*
Milan Brozfc5a5e92008-10-10 13:37:04 +01002058 * Prevent io from disappearing until this function completes.
2059 */
2060 crypt_inc_pending(io);
Damien Le Moal8e225f02020-07-08 18:28:08 +09002061 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
Milan Brozfc5a5e92008-10-10 13:37:04 +01002062
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05002063 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
2064 if (unlikely(!clone)) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002065 io->error = BLK_STS_IOERR;
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05002066 goto dec;
Milan Broz8b004452006-10-03 01:15:37 -07002067 }
Milan Broz899c95d2008-02-08 02:11:02 +00002068
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05002069 io->ctx.bio_out = clone;
2070 io->ctx.iter_out = clone->bi_iter;
2071
2072 sector += bio_sectors(clone);
2073
2074 crypt_inc_pending(io);
Damien Le Moal8e225f02020-07-08 18:28:08 +09002075 r = crypt_convert(cc, ctx,
Ignat Korchagin8abec362021-01-04 14:59:47 +00002076 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2077 /*
2078 * Crypto API backlogged the request, because its queue was full
2079 * and we're in softirq context, so continue from a workqueue
2080 * (TODO: is it actually possible to be in softirq in the write path?)
2081 */
2082 if (r == BLK_STS_DEV_RESOURCE) {
2083 INIT_WORK(&io->work, kcryptd_crypt_write_continue);
2084 queue_work(cc->crypt_queue, &io->work);
2085 return;
2086 }
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002087 if (r)
Milan Brozef43aa32017-01-04 20:23:54 +01002088 io->error = r;
Damien Le Moal8e225f02020-07-08 18:28:08 +09002089 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2090 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2091 /* Wait for completion signaled by kcryptd_async_done() */
2092 wait_for_completion(&ctx->restart);
2093 crypt_finished = 1;
2094 }
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05002095
2096 /* Encryption was already finished, submit io now */
2097 if (crypt_finished) {
2098 kcryptd_crypt_write_io_submit(io, 0);
2099 io->sector = sector;
2100 }
2101
2102dec:
Milan Broz899c95d2008-02-08 02:11:02 +00002103 crypt_dec_pending(io);
Milan Broz84131db2008-02-08 02:10:59 +00002104}
2105
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01002106static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
Milan Broz5742fd72008-02-08 02:10:43 +00002107{
Milan Broz5742fd72008-02-08 02:10:43 +00002108 crypt_dec_pending(io);
2109}
2110
Ignat Korchagin8abec362021-01-04 14:59:47 +00002111static void kcryptd_crypt_read_continue(struct work_struct *work)
2112{
2113 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2114 struct crypt_config *cc = io->cc;
2115 blk_status_t r;
2116
2117 wait_for_completion(&io->ctx.restart);
2118 reinit_completion(&io->ctx.restart);
2119
2120 r = crypt_convert(cc, &io->ctx, true, false);
2121 if (r)
2122 io->error = r;
2123
2124 if (atomic_dec_and_test(&io->ctx.cc_pending))
2125 kcryptd_crypt_read_done(io);
2126
2127 crypt_dec_pending(io);
2128}
2129
Milan Broz4e4eef62008-02-08 02:10:49 +00002130static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07002131{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01002132 struct crypt_config *cc = io->cc;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002133 blk_status_t r;
Milan Broz8b004452006-10-03 01:15:37 -07002134
Milan Broz3e1a8bd2008-10-10 13:37:02 +01002135 crypt_inc_pending(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00002136
Milan Broz53017032008-02-08 02:10:38 +00002137 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
Milan Broz0c395b02008-02-08 02:10:54 +00002138 io->sector);
Milan Broz8b004452006-10-03 01:15:37 -07002139
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01002140 r = crypt_convert(cc, &io->ctx,
Ignat Korchagin8abec362021-01-04 14:59:47 +00002141 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2142 /*
2143 * Crypto API backlogged the request, because its queue was full
2144 * and we're in softirq context, so continue from a workqueue
2145 */
2146 if (r == BLK_STS_DEV_RESOURCE) {
2147 INIT_WORK(&io->work, kcryptd_crypt_read_continue);
2148 queue_work(cc->crypt_queue, &io->work);
2149 return;
2150 }
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002151 if (r)
Milan Brozef43aa32017-01-04 20:23:54 +01002152 io->error = r;
Milan Broz5742fd72008-02-08 02:10:43 +00002153
Mikulas Patocka40b62292012-07-27 15:08:04 +01002154 if (atomic_dec_and_test(&io->ctx.cc_pending))
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01002155 kcryptd_crypt_read_done(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00002156
2157 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07002158}
2159
Milan Broz95497a92008-02-08 02:11:12 +00002160static void kcryptd_async_done(struct crypto_async_request *async_req,
2161 int error)
2162{
Huang Yingb2174ee2009-03-16 17:44:33 +00002163 struct dm_crypt_request *dmreq = async_req->data;
2164 struct convert_context *ctx = dmreq->ctx;
Milan Broz95497a92008-02-08 02:11:12 +00002165 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01002166 struct crypt_config *cc = io->cc;
Milan Broz95497a92008-02-08 02:11:12 +00002167
Milan Broz54cea3f2015-05-15 17:00:25 +02002168 /*
2169 * A request from crypto driver backlog is going to be processed now,
2170 * finish the completion and continue in crypt_convert().
2171 * (Callback will be called for the second time for this request.)
2172 */
Rabin Vincentc0403ec2015-05-05 15:15:56 +02002173 if (error == -EINPROGRESS) {
2174 complete(&ctx->restart);
Milan Broz95497a92008-02-08 02:11:12 +00002175 return;
Rabin Vincentc0403ec2015-05-05 15:15:56 +02002176 }
Milan Broz95497a92008-02-08 02:11:12 +00002177
Milan Broz2dc53272011-01-13 19:59:54 +00002178 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
Milan Brozef43aa32017-01-04 20:23:54 +01002179 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
Milan Broz2dc53272011-01-13 19:59:54 +00002180
Milan Brozef43aa32017-01-04 20:23:54 +01002181 if (error == -EBADMSG) {
Milan Brozf7101262019-05-15 16:22:30 +02002182 char b[BDEVNAME_SIZE];
Michael Weiß58d0f182021-09-04 11:59:30 +02002183 sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
2184
2185 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
2186 bio_devname(ctx->bio_in, b), s);
2187 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
2188 ctx->bio_in, s, 0);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002189 io->error = BLK_STS_PROTECTION;
Milan Brozef43aa32017-01-04 20:23:54 +01002190 } else if (error < 0)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002191 io->error = BLK_STS_IOERR;
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01002192
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04002193 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
Milan Broz95497a92008-02-08 02:11:12 +00002194
Mikulas Patocka40b62292012-07-27 15:08:04 +01002195 if (!atomic_dec_and_test(&ctx->cc_pending))
Rabin Vincentc0403ec2015-05-05 15:15:56 +02002196 return;
Milan Broz95497a92008-02-08 02:11:12 +00002197
Damien Le Moal8e225f02020-07-08 18:28:08 +09002198 /*
2199 * The request is fully completed: for inline writes, let
2200 * kcryptd_crypt_write_convert() do the IO submission.
2201 */
2202 if (bio_data_dir(io->base_bio) == READ) {
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01002203 kcryptd_crypt_read_done(io);
Damien Le Moal8e225f02020-07-08 18:28:08 +09002204 return;
2205 }
2206
2207 if (kcryptd_crypt_write_inline(cc, ctx)) {
2208 complete(&ctx->restart);
2209 return;
2210 }
2211
2212 kcryptd_crypt_write_io_submit(io, 1);
Milan Broz95497a92008-02-08 02:11:12 +00002213}
2214
Milan Broz4e4eef62008-02-08 02:10:49 +00002215static void kcryptd_crypt(struct work_struct *work)
2216{
2217 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2218
2219 if (bio_data_dir(io->base_bio) == READ)
2220 kcryptd_crypt_read_convert(io);
2221 else
2222 kcryptd_crypt_write_convert(io);
Milan Broz8b004452006-10-03 01:15:37 -07002223}
2224
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01002225static void kcryptd_crypt_tasklet(unsigned long work)
2226{
2227 kcryptd_crypt((struct work_struct *)work);
2228}
2229
Alasdair G Kergon395b1672008-02-08 02:10:52 +00002230static void kcryptd_queue_crypt(struct dm_crypt_io *io)
2231{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01002232 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00002233
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01002234 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2235 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
Ignat Korchaginc87a95d2021-01-13 19:17:17 +00002236 /*
Changbin Dud3703ef2021-08-14 09:09:09 +08002237 * in_hardirq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
Ignat Korchaginc87a95d2021-01-13 19:17:17 +00002238 * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
2239 * it is being executed with irqs disabled.
2240 */
Changbin Dud3703ef2021-08-14 09:09:09 +08002241 if (in_hardirq() || irqs_disabled()) {
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01002242 tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2243 tasklet_schedule(&io->tasklet);
2244 return;
2245 }
2246
2247 kcryptd_crypt(&io->work);
2248 return;
2249 }
2250
Alasdair G Kergon395b1672008-02-08 02:10:52 +00002251 INIT_WORK(&io->work, kcryptd_crypt);
2252 queue_work(cc->crypt_queue, &io->work);
2253}
2254
Milan Brozef43aa32017-01-04 20:23:54 +01002255static void crypt_free_tfms_aead(struct crypt_config *cc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256{
Milan Brozef43aa32017-01-04 20:23:54 +01002257 if (!cc->cipher_tfm.tfms_aead)
2258 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
Milan Brozef43aa32017-01-04 20:23:54 +01002260 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2261 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2262 cc->cipher_tfm.tfms_aead[0] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 }
2264
Milan Brozef43aa32017-01-04 20:23:54 +01002265 kfree(cc->cipher_tfm.tfms_aead);
2266 cc->cipher_tfm.tfms_aead = NULL;
2267}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
Milan Brozef43aa32017-01-04 20:23:54 +01002269static void crypt_free_tfms_skcipher(struct crypt_config *cc)
Milan Brozd1f96422011-01-13 19:59:54 +00002270{
Milan Brozd1f96422011-01-13 19:59:54 +00002271 unsigned i;
2272
Milan Brozef43aa32017-01-04 20:23:54 +01002273 if (!cc->cipher_tfm.tfms)
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002274 return;
2275
Milan Brozd1f96422011-01-13 19:59:54 +00002276 for (i = 0; i < cc->tfms_count; i++)
Milan Brozef43aa32017-01-04 20:23:54 +01002277 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2278 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2279 cc->cipher_tfm.tfms[i] = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00002280 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002281
Milan Brozef43aa32017-01-04 20:23:54 +01002282 kfree(cc->cipher_tfm.tfms);
2283 cc->cipher_tfm.tfms = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284}
2285
2286static void crypt_free_tfms(struct crypt_config *cc)
2287{
Milan Broz33d2f092017-03-16 15:39:40 +01002288 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01002289 crypt_free_tfms_aead(cc);
2290 else
2291 crypt_free_tfms_skcipher(cc);
Milan Brozd1f96422011-01-13 19:59:54 +00002292}
2293
Milan Brozef43aa32017-01-04 20:23:54 +01002294static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
Milan Brozd1f96422011-01-13 19:59:54 +00002295{
Milan Brozd1f96422011-01-13 19:59:54 +00002296 unsigned i;
2297 int err;
2298
Kees Cook6396bb22018-06-12 14:03:40 -07002299 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2300 sizeof(struct crypto_skcipher *),
2301 GFP_KERNEL);
Milan Brozef43aa32017-01-04 20:23:54 +01002302 if (!cc->cipher_tfm.tfms)
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002303 return -ENOMEM;
2304
Milan Brozd1f96422011-01-13 19:59:54 +00002305 for (i = 0; i < cc->tfms_count; i++) {
Mikulas Patockacd746932020-07-09 23:20:42 -07002306 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2307 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Brozef43aa32017-01-04 20:23:54 +01002308 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2309 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002310 crypt_free_tfms(cc);
Milan Brozd1f96422011-01-13 19:59:54 +00002311 return err;
2312 }
2313 }
2314
Eric Biggersaf331eb2018-12-05 20:53:00 -08002315 /*
2316 * dm-crypt performance can vary greatly depending on which crypto
2317 * algorithm implementation is used. Help people debug performance
2318 * problems by logging the ->cra_driver_name.
2319 */
Milan Broz7a1cd722019-05-15 16:23:43 +02002320 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
Eric Biggersaf331eb2018-12-05 20:53:00 -08002321 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
Milan Brozd1f96422011-01-13 19:59:54 +00002322 return 0;
2323}
2324
Milan Brozef43aa32017-01-04 20:23:54 +01002325static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2326{
Milan Brozef43aa32017-01-04 20:23:54 +01002327 int err;
2328
2329 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2330 if (!cc->cipher_tfm.tfms)
2331 return -ENOMEM;
2332
Mikulas Patockacd746932020-07-09 23:20:42 -07002333 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2334 CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Brozef43aa32017-01-04 20:23:54 +01002335 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2336 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2337 crypt_free_tfms(cc);
2338 return err;
2339 }
2340
Milan Broz7a1cd722019-05-15 16:23:43 +02002341 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
Eric Biggersaf331eb2018-12-05 20:53:00 -08002342 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
Milan Brozef43aa32017-01-04 20:23:54 +01002343 return 0;
2344}
2345
2346static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2347{
Milan Broz33d2f092017-03-16 15:39:40 +01002348 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01002349 return crypt_alloc_tfms_aead(cc, ciphermode);
2350 else
2351 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2352}
2353
2354static unsigned crypt_subkey_size(struct crypt_config *cc)
2355{
2356 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2357}
2358
2359static unsigned crypt_authenckey_size(struct crypt_config *cc)
2360{
2361 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2362}
2363
2364/*
2365 * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
2366 * the key must be for some reason in special format.
2367 * This funcion converts cc->key to this special format.
2368 */
2369static void crypt_copy_authenckey(char *p, const void *key,
2370 unsigned enckeylen, unsigned authkeylen)
2371{
2372 struct crypto_authenc_key_param *param;
2373 struct rtattr *rta;
2374
2375 rta = (struct rtattr *)p;
2376 param = RTA_DATA(rta);
2377 param->enckeylen = cpu_to_be32(enckeylen);
2378 rta->rta_len = RTA_LENGTH(sizeof(*param));
2379 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
2380 p += RTA_SPACE(sizeof(*param));
2381 memcpy(p, key + enckeylen, authkeylen);
2382 p += authkeylen;
2383 memcpy(p, key, enckeylen);
2384}
2385
Mikulas Patocka671ea6b2016-08-25 07:12:54 -04002386static int crypt_setkey(struct crypt_config *cc)
Andi Kleenc0297722011-01-13 19:59:53 +00002387{
Milan Brozda31a072013-10-28 23:21:03 +01002388 unsigned subkey_size;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002389 int err = 0, i, r;
Andi Kleenc0297722011-01-13 19:59:53 +00002390
Milan Brozda31a072013-10-28 23:21:03 +01002391 /* Ignore extra keys (which are used for IV etc) */
Milan Brozef43aa32017-01-04 20:23:54 +01002392 subkey_size = crypt_subkey_size(cc);
Milan Brozda31a072013-10-28 23:21:03 +01002393
Milan Broz27c70032018-01-03 22:48:59 +01002394 if (crypt_integrity_hmac(cc)) {
2395 if (subkey_size < cc->key_mac_size)
2396 return -EINVAL;
2397
Milan Brozef43aa32017-01-04 20:23:54 +01002398 crypt_copy_authenckey(cc->authenc_key, cc->key,
2399 subkey_size - cc->key_mac_size,
2400 cc->key_mac_size);
Milan Broz27c70032018-01-03 22:48:59 +01002401 }
2402
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002403 for (i = 0; i < cc->tfms_count; i++) {
Milan Broz33d2f092017-03-16 15:39:40 +01002404 if (crypt_integrity_hmac(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01002405 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2406 cc->authenc_key, crypt_authenckey_size(cc));
Milan Broz33d2f092017-03-16 15:39:40 +01002407 else if (crypt_integrity_aead(cc))
2408 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2409 cc->key + (i * subkey_size),
2410 subkey_size);
Milan Brozef43aa32017-01-04 20:23:54 +01002411 else
2412 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2413 cc->key + (i * subkey_size),
2414 subkey_size);
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002415 if (r)
2416 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +00002417 }
2418
Milan Brozef43aa32017-01-04 20:23:54 +01002419 if (crypt_integrity_hmac(cc))
2420 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2421
Andi Kleenc0297722011-01-13 19:59:53 +00002422 return err;
2423}
2424
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002425#ifdef CONFIG_KEYS
2426
Ondrej Kozina027c4312016-12-01 18:20:52 +01002427static bool contains_whitespace(const char *str)
2428{
2429 while (*str)
2430 if (isspace(*str++))
2431 return true;
2432 return false;
2433}
2434
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002435static int set_key_user(struct crypt_config *cc, struct key *key)
2436{
2437 const struct user_key_payload *ukp;
2438
2439 ukp = user_key_payload_locked(key);
2440 if (!ukp)
2441 return -EKEYREVOKED;
2442
2443 if (cc->key_size != ukp->datalen)
2444 return -EINVAL;
2445
2446 memcpy(cc->key, ukp->data, cc->key_size);
2447
2448 return 0;
2449}
2450
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002451static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2452{
2453 const struct encrypted_key_payload *ekp;
2454
2455 ekp = key->payload.data[0];
2456 if (!ekp)
2457 return -EKEYREVOKED;
2458
2459 if (cc->key_size != ekp->decrypted_datalen)
2460 return -EINVAL;
2461
2462 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2463
2464 return 0;
2465}
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002466
Ahmad Fatoum363880c2021-01-22 09:43:21 +01002467static int set_key_trusted(struct crypt_config *cc, struct key *key)
2468{
2469 const struct trusted_key_payload *tkp;
2470
2471 tkp = key->payload.data[0];
2472 if (!tkp)
2473 return -EKEYREVOKED;
2474
2475 if (cc->key_size != tkp->key_len)
2476 return -EINVAL;
2477
2478 memcpy(cc->key, tkp->key, cc->key_size);
2479
2480 return 0;
2481}
2482
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002483static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2484{
2485 char *new_key_string, *key_desc;
2486 int ret;
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002487 struct key_type *type;
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002488 struct key *key;
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002489 int (*set_key)(struct crypt_config *cc, struct key *key);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002490
Ondrej Kozina027c4312016-12-01 18:20:52 +01002491 /*
2492 * Reject key_string with whitespace. dm core currently lacks code for
2493 * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
2494 */
2495 if (contains_whitespace(key_string)) {
2496 DMERR("whitespace chars not allowed in key string");
2497 return -EINVAL;
2498 }
2499
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002500 /* look for next ':' separating key_type from key_description */
2501 key_desc = strpbrk(key_string, ":");
2502 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2503 return -EINVAL;
2504
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002505 if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2506 type = &key_type_logon;
2507 set_key = set_key_user;
2508 } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2509 type = &key_type_user;
2510 set_key = set_key_user;
Ahmad Fatoum831475c2021-01-22 09:43:20 +01002511 } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS) &&
2512 !strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002513 type = &key_type_encrypted;
2514 set_key = set_key_encrypted;
Ahmad Fatoum363880c2021-01-22 09:43:21 +01002515 } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
2516 !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
2517 type = &key_type_trusted;
2518 set_key = set_key_trusted;
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002519 } else {
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002520 return -EINVAL;
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002521 }
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002522
2523 new_key_string = kstrdup(key_string, GFP_KERNEL);
2524 if (!new_key_string)
2525 return -ENOMEM;
2526
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002527 key = request_key(type, key_desc + 1, NULL);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002528 if (IS_ERR(key)) {
Waiman Long453431a2020-08-06 23:18:13 -07002529 kfree_sensitive(new_key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002530 return PTR_ERR(key);
2531 }
2532
Ondrej Kozinaf5b0cba2017-01-31 15:47:11 +01002533 down_read(&key->sem);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002534
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002535 ret = set_key(cc, key);
2536 if (ret < 0) {
Ondrej Kozinaf5b0cba2017-01-31 15:47:11 +01002537 up_read(&key->sem);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002538 key_put(key);
Waiman Long453431a2020-08-06 23:18:13 -07002539 kfree_sensitive(new_key_string);
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002540 return ret;
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002541 }
2542
Ondrej Kozinaf5b0cba2017-01-31 15:47:11 +01002543 up_read(&key->sem);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002544 key_put(key);
2545
2546 /* clear the flag since following operations may invalidate previously valid key */
2547 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2548
2549 ret = crypt_setkey(cc);
2550
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002551 if (!ret) {
2552 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
Waiman Long453431a2020-08-06 23:18:13 -07002553 kfree_sensitive(cc->key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002554 cc->key_string = new_key_string;
2555 } else
Waiman Long453431a2020-08-06 23:18:13 -07002556 kfree_sensitive(new_key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002557
2558 return ret;
2559}
2560
2561static int get_key_size(char **key_string)
2562{
2563 char *colon, dummy;
2564 int ret;
2565
2566 if (*key_string[0] != ':')
2567 return strlen(*key_string) >> 1;
2568
2569 /* look for next ':' in key string */
2570 colon = strpbrk(*key_string + 1, ":");
2571 if (!colon)
2572 return -EINVAL;
2573
2574 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2575 return -EINVAL;
2576
2577 *key_string = colon;
2578
2579 /* remaining key string should be :<logon|user>:<key_desc> */
2580
2581 return ret;
2582}
2583
2584#else
2585
2586static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2587{
2588 return -EINVAL;
2589}
2590
2591static int get_key_size(char **key_string)
2592{
2593 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2594}
2595
Dmitry Baryshkov27f54112020-04-20 16:46:59 +03002596#endif /* CONFIG_KEYS */
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002597
Milan Broze48d4bb2006-10-03 01:15:37 -07002598static int crypt_set_key(struct crypt_config *cc, char *key)
2599{
Milan Brozde8be5a2011-03-24 13:54:27 +00002600 int r = -EINVAL;
2601 int key_string_len = strlen(key);
2602
Milan Broz69a8cfc2011-01-13 19:59:49 +00002603 /* Hyphen (which gives a key_size of zero) means there is no key. */
2604 if (!cc->key_size && strcmp(key, "-"))
Milan Brozde8be5a2011-03-24 13:54:27 +00002605 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07002606
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002607 /* ':' means the key is in kernel keyring, short-circuit normal key processing */
2608 if (key[0] == ':') {
2609 r = crypt_set_keyring_key(cc, key + 1);
2610 goto out;
2611 }
2612
Ondrej Kozina265e9092016-11-02 15:02:08 +01002613 /* clear the flag since following operations may invalidate previously valid key */
2614 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2615
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002616 /* wipe references to any kernel keyring key */
Waiman Long453431a2020-08-06 23:18:13 -07002617 kfree_sensitive(cc->key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002618 cc->key_string = NULL;
2619
Andy Shevchenkoe944e032017-04-27 16:52:04 +03002620 /* Decode key from its hex representation. */
2621 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
Milan Brozde8be5a2011-03-24 13:54:27 +00002622 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07002623
Mikulas Patocka671ea6b2016-08-25 07:12:54 -04002624 r = crypt_setkey(cc);
Ondrej Kozina265e9092016-11-02 15:02:08 +01002625 if (!r)
2626 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
Milan Brozde8be5a2011-03-24 13:54:27 +00002627
2628out:
2629 /* Hex key string not needed after here, so wipe it. */
2630 memset(key, '0', key_string_len);
2631
2632 return r;
Milan Broze48d4bb2006-10-03 01:15:37 -07002633}
2634
2635static int crypt_wipe_key(struct crypt_config *cc)
2636{
Ondrej Kozinac82feee2017-04-24 14:21:53 +02002637 int r;
2638
Milan Broze48d4bb2006-10-03 01:15:37 -07002639 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
Ondrej Kozinac82feee2017-04-24 14:21:53 +02002640 get_random_bytes(&cc->key, cc->key_size);
Milan Broz4a52ffc2019-07-09 15:22:12 +02002641
2642 /* Wipe IV private keys */
2643 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2644 r = cc->iv_gen_ops->wipe(cc);
2645 if (r)
2646 return r;
2647 }
2648
Waiman Long453431a2020-08-06 23:18:13 -07002649 kfree_sensitive(cc->key_string);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01002650 cc->key_string = NULL;
Ondrej Kozinac82feee2017-04-24 14:21:53 +02002651 r = crypt_setkey(cc);
2652 memset(&cc->key, 0, cc->key_size * sizeof(u8));
Andi Kleenc0297722011-01-13 19:59:53 +00002653
Ondrej Kozinac82feee2017-04-24 14:21:53 +02002654 return r;
Milan Broze48d4bb2006-10-03 01:15:37 -07002655}
2656
Mikulas Patocka50593532017-08-13 22:45:08 -04002657static void crypt_calculate_pages_per_client(void)
2658{
Arun KSca79b0c2018-12-28 00:34:29 -08002659 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
Mikulas Patocka50593532017-08-13 22:45:08 -04002660
2661 if (!dm_crypt_clients_n)
2662 return;
2663
2664 pages /= dm_crypt_clients_n;
2665 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2666 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2667 dm_crypt_pages_per_client = pages;
2668}
2669
2670static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2671{
2672 struct crypt_config *cc = pool_data;
2673 struct page *page;
2674
Arne Welzel528b16b2021-08-14 00:40:38 +02002675 /*
2676 * Note, percpu_counter_read_positive() may over (and under) estimate
2677 * the current usage by at most (batch - 1) * num_online_cpus() pages,
2678 * but avoids potential spinlock contention of an exact result.
2679 */
2680 if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
Mikulas Patocka50593532017-08-13 22:45:08 -04002681 likely(gfp_mask & __GFP_NORETRY))
2682 return NULL;
2683
2684 page = alloc_page(gfp_mask);
2685 if (likely(page != NULL))
2686 percpu_counter_add(&cc->n_allocated_pages, 1);
2687
2688 return page;
2689}
2690
2691static void crypt_page_free(void *page, void *pool_data)
2692{
2693 struct crypt_config *cc = pool_data;
2694
2695 __free_page(page);
2696 percpu_counter_sub(&cc->n_allocated_pages, 1);
2697}
2698
Milan Broz28513fc2010-08-12 04:14:06 +01002699static void crypt_dtr(struct dm_target *ti)
2700{
2701 struct crypt_config *cc = ti->private;
2702
2703 ti->private = NULL;
2704
2705 if (!cc)
2706 return;
2707
Rabin Vincentf659b102016-09-21 16:22:29 +02002708 if (cc->write_thread)
Mikulas Patockadc267622015-02-13 08:25:59 -05002709 kthread_stop(cc->write_thread);
2710
Milan Broz28513fc2010-08-12 04:14:06 +01002711 if (cc->io_queue)
2712 destroy_workqueue(cc->io_queue);
2713 if (cc->crypt_queue)
2714 destroy_workqueue(cc->crypt_queue);
2715
Mikulas Patockafd2d2312012-07-27 15:08:05 +01002716 crypt_free_tfms(cc);
2717
Kent Overstreet6f1c8192018-05-20 18:25:53 -04002718 bioset_exit(&cc->bs);
Milan Broz28513fc2010-08-12 04:14:06 +01002719
Kent Overstreet6f1c8192018-05-20 18:25:53 -04002720 mempool_exit(&cc->page_pool);
2721 mempool_exit(&cc->req_pool);
2722 mempool_exit(&cc->tag_pool);
2723
Kent Overstreetd00a11d2018-06-02 13:45:04 -04002724 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2725 percpu_counter_destroy(&cc->n_allocated_pages);
2726
Milan Broz28513fc2010-08-12 04:14:06 +01002727 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2728 cc->iv_gen_ops->dtr(cc);
2729
Milan Broz28513fc2010-08-12 04:14:06 +01002730 if (cc->dev)
2731 dm_put_device(ti, cc->dev);
2732
Waiman Long453431a2020-08-06 23:18:13 -07002733 kfree_sensitive(cc->cipher_string);
2734 kfree_sensitive(cc->key_string);
2735 kfree_sensitive(cc->cipher_auth);
2736 kfree_sensitive(cc->authenc_key);
Milan Broz28513fc2010-08-12 04:14:06 +01002737
Mike Snitzerd5ffebd2018-01-05 21:17:20 -05002738 mutex_destroy(&cc->bio_alloc_lock);
2739
Milan Broz28513fc2010-08-12 04:14:06 +01002740 /* Must zero key material before freeing */
Waiman Long453431a2020-08-06 23:18:13 -07002741 kfree_sensitive(cc);
Mikulas Patocka50593532017-08-13 22:45:08 -04002742
2743 spin_lock(&dm_crypt_clients_lock);
2744 WARN_ON(!dm_crypt_clients_n);
2745 dm_crypt_clients_n--;
2746 crypt_calculate_pages_per_client();
2747 spin_unlock(&dm_crypt_clients_lock);
Michael Weiß58d0f182021-09-04 11:59:30 +02002748
2749 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
Milan Broz28513fc2010-08-12 04:14:06 +01002750}
2751
Milan Broze889f972017-03-16 15:39:39 +01002752static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753{
Milan Broz5ebaee62010-08-12 04:14:07 +01002754 struct crypt_config *cc = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Milan Broz33d2f092017-03-16 15:39:40 +01002756 if (crypt_integrity_aead(cc))
Milan Broze889f972017-03-16 15:39:39 +01002757 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2758 else
2759 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
Milan Broz5ebaee62010-08-12 04:14:07 +01002761 if (cc->iv_size)
2762 /* at least a 64 bit sector number should fit in our buffer */
2763 cc->iv_size = max(cc->iv_size,
2764 (unsigned int)(sizeof(u64) / sizeof(u8)));
2765 else if (ivmode) {
2766 DMWARN("Selected cipher does not support IVs");
2767 ivmode = NULL;
2768 }
2769
2770 /* Choose ivmode, see comments at iv code. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 if (ivmode == NULL)
2772 cc->iv_gen_ops = NULL;
2773 else if (strcmp(ivmode, "plain") == 0)
2774 cc->iv_gen_ops = &crypt_iv_plain_ops;
Milan Broz61afef62009-12-10 23:52:25 +00002775 else if (strcmp(ivmode, "plain64") == 0)
2776 cc->iv_gen_ops = &crypt_iv_plain64_ops;
Milan Broz7e3fd852017-06-06 09:07:01 +02002777 else if (strcmp(ivmode, "plain64be") == 0)
2778 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 else if (strcmp(ivmode, "essiv") == 0)
2780 cc->iv_gen_ops = &crypt_iv_essiv_ops;
Rik Snel48527fa2006-09-03 08:56:39 +10002781 else if (strcmp(ivmode, "benbi") == 0)
2782 cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel46b47732007-05-09 02:32:55 -07002783 else if (strcmp(ivmode, "null") == 0)
2784 cc->iv_gen_ops = &crypt_iv_null_ops;
Milan Brozb9411d72019-07-09 15:22:14 +02002785 else if (strcmp(ivmode, "eboiv") == 0)
2786 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
Milan Brozbbb16582020-01-03 09:20:22 +01002787 else if (strcmp(ivmode, "elephant") == 0) {
2788 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2789 cc->key_parts = 2;
2790 cc->key_extra_size = cc->key_size / 2;
2791 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2792 return -EINVAL;
2793 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2794 } else if (strcmp(ivmode, "lmk") == 0) {
Milan Broz34745782011-01-13 19:59:55 +00002795 cc->iv_gen_ops = &crypt_iv_lmk_ops;
Milan Brozed04d982013-10-28 23:21:04 +01002796 /*
2797 * Version 2 and 3 is recognised according
Milan Broz34745782011-01-13 19:59:55 +00002798 * to length of provided multi-key string.
2799 * If present (version 3), last key is used as IV seed.
Milan Brozed04d982013-10-28 23:21:04 +01002800 * All keys (including IV seed) are always the same size.
Milan Broz34745782011-01-13 19:59:55 +00002801 */
Milan Brozda31a072013-10-28 23:21:03 +01002802 if (cc->key_size % cc->key_parts) {
Milan Broz34745782011-01-13 19:59:55 +00002803 cc->key_parts++;
Milan Brozda31a072013-10-28 23:21:03 +01002804 cc->key_extra_size = cc->key_size / cc->key_parts;
2805 }
Milan Brozed04d982013-10-28 23:21:04 +01002806 } else if (strcmp(ivmode, "tcw") == 0) {
2807 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2808 cc->key_parts += 2; /* IV + whitening */
2809 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
Milan Broze889f972017-03-16 15:39:39 +01002810 } else if (strcmp(ivmode, "random") == 0) {
2811 cc->iv_gen_ops = &crypt_iv_random_ops;
2812 /* Need storage space in integrity fields. */
2813 cc->integrity_iv_size = cc->iv_size;
Milan Broz34745782011-01-13 19:59:55 +00002814 } else {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002815 ti->error = "Invalid IV mode";
Milan Broze889f972017-03-16 15:39:39 +01002816 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 }
2818
Milan Broze889f972017-03-16 15:39:39 +01002819 return 0;
2820}
2821
Milan Broz33d2f092017-03-16 15:39:40 +01002822/*
Milan Broz33d2f092017-03-16 15:39:40 +01002823 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
2824 * The HMAC is needed to calculate tag size (HMAC digest size).
2825 * This should be probably done by crypto-api calls (once available...)
2826 */
2827static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2828{
2829 char *start, *end, *mac_alg = NULL;
2830 struct crypto_ahash *mac;
2831
2832 if (!strstarts(cipher_api, "authenc("))
2833 return 0;
2834
2835 start = strchr(cipher_api, '(');
2836 end = strchr(cipher_api, ',');
2837 if (!start || !end || ++start > end)
2838 return -EINVAL;
2839
2840 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2841 if (!mac_alg)
2842 return -ENOMEM;
2843 strncpy(mac_alg, start, end - start);
2844
Mikulas Patockacd746932020-07-09 23:20:42 -07002845 mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
Milan Broz33d2f092017-03-16 15:39:40 +01002846 kfree(mac_alg);
2847
2848 if (IS_ERR(mac))
2849 return PTR_ERR(mac);
2850
2851 cc->key_mac_size = crypto_ahash_digestsize(mac);
2852 crypto_free_ahash(mac);
2853
2854 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2855 if (!cc->authenc_key)
2856 return -ENOMEM;
2857
2858 return 0;
2859}
2860
2861static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2862 char **ivmode, char **ivopts)
Milan Broz5ebaee62010-08-12 04:14:07 +01002863{
2864 struct crypt_config *cc = ti->private;
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002865 char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
Milan Broz33d2f092017-03-16 15:39:40 +01002866 int ret = -EINVAL;
2867
2868 cc->tfms_count = 1;
2869
2870 /*
2871 * New format (capi: prefix)
2872 * capi:cipher_api_spec-iv:ivopts
2873 */
2874 tmp = &cipher_in[strlen("capi:")];
Milan Broz1856b9f2019-01-09 11:57:14 +01002875
2876 /* Separate IV options if present, it can contain another '-' in hash name */
2877 *ivopts = strrchr(tmp, ':');
2878 if (*ivopts) {
2879 **ivopts = '\0';
2880 (*ivopts)++;
2881 }
2882 /* Parse IV mode */
2883 *ivmode = strrchr(tmp, '-');
2884 if (*ivmode) {
2885 **ivmode = '\0';
2886 (*ivmode)++;
2887 }
2888 /* The rest is crypto API spec */
2889 cipher_api = tmp;
Milan Broz33d2f092017-03-16 15:39:40 +01002890
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002891 /* Alloc AEAD, can be used only in new format. */
2892 if (crypt_integrity_aead(cc)) {
2893 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2894 if (ret < 0) {
2895 ti->error = "Invalid AEAD cipher spec";
2896 return -ENOMEM;
2897 }
2898 }
2899
Milan Broz33d2f092017-03-16 15:39:40 +01002900 if (*ivmode && !strcmp(*ivmode, "lmk"))
2901 cc->tfms_count = 64;
2902
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002903 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2904 if (!*ivopts) {
2905 ti->error = "Digest algorithm missing for ESSIV mode";
2906 return -EINVAL;
2907 }
2908 ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2909 cipher_api, *ivopts);
2910 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2911 ti->error = "Cannot allocate cipher string";
2912 return -ENOMEM;
2913 }
2914 cipher_api = buf;
2915 }
2916
Milan Broz33d2f092017-03-16 15:39:40 +01002917 cc->key_parts = cc->tfms_count;
2918
2919 /* Allocate cipher */
2920 ret = crypt_alloc_tfms(cc, cipher_api);
2921 if (ret < 0) {
2922 ti->error = "Error allocating crypto tfm";
2923 return ret;
2924 }
2925
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002926 if (crypt_integrity_aead(cc))
Milan Broz33d2f092017-03-16 15:39:40 +01002927 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002928 else
Milan Broz33d2f092017-03-16 15:39:40 +01002929 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2930
Milan Broz33d2f092017-03-16 15:39:40 +01002931 return 0;
2932}
2933
2934static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2935 char **ivmode, char **ivopts)
2936{
2937 struct crypt_config *cc = ti->private;
2938 char *tmp, *cipher, *chainmode, *keycount;
Milan Broz5ebaee62010-08-12 04:14:07 +01002939 char *cipher_api = NULL;
2940 int ret = -EINVAL;
2941 char dummy;
2942
Milan Broz33d2f092017-03-16 15:39:40 +01002943 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
Milan Broz5ebaee62010-08-12 04:14:07 +01002944 ti->error = "Bad cipher specification";
2945 return -EINVAL;
2946 }
2947
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 /*
Milan Broz5ebaee62010-08-12 04:14:07 +01002949 * Legacy dm-crypt cipher specification
2950 * cipher[:keycount]-mode-iv:ivopts
2951 */
2952 tmp = cipher_in;
2953 keycount = strsep(&tmp, "-");
2954 cipher = strsep(&keycount, ":");
2955
Milan Broz69a8cfc2011-01-13 19:59:49 +00002956 if (!keycount)
Milan Broz5ebaee62010-08-12 04:14:07 +01002957 cc->tfms_count = 1;
2958 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2959 !is_power_of_2(cc->tfms_count)) {
2960 ti->error = "Bad cipher key count specification";
2961 return -EINVAL;
2962 }
Milan Broz28513fc2010-08-12 04:14:06 +01002963 cc->key_parts = cc->tfms_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964
Milan Brozddd42ed2008-02-08 02:11:07 +00002965 chainmode = strsep(&tmp, "-");
Milan Broz1856b9f2019-01-09 11:57:14 +01002966 *ivmode = strsep(&tmp, ":");
2967 *ivopts = tmp;
Milan Brozddd42ed2008-02-08 02:11:07 +00002968
2969 /*
2970 * For compatibility with the original dm-crypt mapping format, if
2971 * only the cipher name is supplied, use cbc-plain.
Milan Broz28513fc2010-08-12 04:14:06 +01002972 */
Milan Broz33d2f092017-03-16 15:39:40 +01002973 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
Milan Brozcabf08e2007-10-19 22:38:58 +01002974 chainmode = "cbc";
Milan Broz33d2f092017-03-16 15:39:40 +01002975 *ivmode = "plain";
Milan Brozcabf08e2007-10-19 22:38:58 +01002976 }
2977
Milan Broz33d2f092017-03-16 15:39:40 +01002978 if (strcmp(chainmode, "ecb") && !*ivmode) {
Andi Kleenc0297722011-01-13 19:59:53 +00002979 ti->error = "IV mechanism required";
2980 return -EINVAL;
2981 }
2982
Milan Brozcabf08e2007-10-19 22:38:58 +01002983 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
Milan Broz9934a8b2007-10-19 22:38:57 +01002984 if (!cipher_api)
Milan Broz28513fc2010-08-12 04:14:06 +01002985 goto bad_mem;
Milan Broz9934a8b2007-10-19 22:38:57 +01002986
Ard Biesheuvela1a262b2019-08-19 17:17:37 +03002987 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2988 if (!*ivopts) {
2989 ti->error = "Digest algorithm missing for ESSIV mode";
2990 kfree(cipher_api);
2991 return -EINVAL;
2992 }
2993 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2994 "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2995 } else {
2996 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2997 "%s(%s)", chainmode, cipher);
2998 }
2999 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 kfree(cipher_api);
Milan Broz28513fc2010-08-12 04:14:06 +01003001 goto bad_mem;
3002 }
3003
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 /* Allocate cipher */
3005 ret = crypt_alloc_tfms(cc, cipher_api);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 if (ret < 0) {
3007 ti->error = "Error allocating crypto tfm";
Milan Broz33d2f092017-03-16 15:39:40 +01003008 kfree(cipher_api);
3009 return ret;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01003010 }
Jeffy Chenbd86e322017-09-27 20:28:57 +08003011 kfree(cipher_api);
Mikulas Patocka647c7db2009-06-22 10:12:23 +01003012
Milan Broz33d2f092017-03-16 15:39:40 +01003013 return 0;
3014bad_mem:
3015 ti->error = "Cannot allocate cipher strings";
3016 return -ENOMEM;
3017}
3018
3019static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
3020{
3021 struct crypt_config *cc = ti->private;
3022 char *ivmode = NULL, *ivopts = NULL;
3023 int ret;
3024
3025 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
3026 if (!cc->cipher_string) {
3027 ti->error = "Cannot allocate cipher strings";
3028 return -ENOMEM;
3029 }
3030
3031 if (strstarts(cipher_in, "capi:"))
3032 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
3033 else
3034 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
3035 if (ret)
3036 return ret;
3037
Mikulas Patocka647c7db2009-06-22 10:12:23 +01003038 /* Initialize IV */
Milan Broze889f972017-03-16 15:39:39 +01003039 ret = crypt_ctr_ivmode(ti, ivmode);
3040 if (ret < 0)
Milan Broz33d2f092017-03-16 15:39:40 +01003041 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042
Milan Brozda31a072013-10-28 23:21:03 +01003043 /* Initialize and set key */
3044 ret = crypt_set_key(cc, key);
3045 if (ret < 0) {
3046 ti->error = "Error decoding and setting key";
Milan Broz33d2f092017-03-16 15:39:40 +01003047 return ret;
Milan Brozda31a072013-10-28 23:21:03 +01003048 }
3049
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 /* Allocate IV */
3051 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
3052 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
3053 if (ret < 0) {
3054 ti->error = "Error creating IV";
Milan Broz33d2f092017-03-16 15:39:40 +01003055 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 }
3057 }
3058
3059 /* Initialize IV (set keys for ESSIV etc) */
3060 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
3061 ret = cc->iv_gen_ops->init(cc);
3062 if (ret < 0) {
3063 ti->error = "Error initialising IV";
Milan Broz33d2f092017-03-16 15:39:40 +01003064 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 }
3066 }
3067
Ondrej Kozinadc949022018-01-12 16:30:32 +01003068 /* wipe the kernel key payload copy */
3069 if (cc->key_string)
3070 memset(cc->key, 0, cc->key_size * sizeof(u8));
3071
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074
Milan Brozef43aa32017-01-04 20:23:54 +01003075static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
3076{
3077 struct crypt_config *cc = ti->private;
3078 struct dm_arg_set as;
Eric Biggers5916a222017-06-22 11:32:45 -07003079 static const struct dm_arg _args[] = {
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01003080 {0, 8, "Invalid number of feature args"},
Milan Brozef43aa32017-01-04 20:23:54 +01003081 };
3082 unsigned int opt_params, val;
3083 const char *opt_string, *sval;
Milan Broz8f0009a2017-03-16 15:39:44 +01003084 char dummy;
Milan Brozef43aa32017-01-04 20:23:54 +01003085 int ret;
3086
3087 /* Optional parameters */
3088 as.argc = argc;
3089 as.argv = argv;
3090
3091 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
3092 if (ret)
3093 return ret;
3094
3095 while (opt_params--) {
3096 opt_string = dm_shift_arg(&as);
3097 if (!opt_string) {
3098 ti->error = "Not enough feature arguments";
3099 return -EINVAL;
3100 }
3101
3102 if (!strcasecmp(opt_string, "allow_discards"))
3103 ti->num_discard_bios = 1;
3104
3105 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
3106 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3107
3108 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
3109 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01003110 else if (!strcasecmp(opt_string, "no_read_workqueue"))
3111 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3112 else if (!strcasecmp(opt_string, "no_write_workqueue"))
3113 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
Milan Brozef43aa32017-01-04 20:23:54 +01003114 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
3115 if (val == 0 || val > MAX_TAG_SIZE) {
3116 ti->error = "Invalid integrity arguments";
3117 return -EINVAL;
3118 }
3119 cc->on_disk_tag_size = val;
3120 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
3121 if (!strcasecmp(sval, "aead")) {
3122 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
Milan Brozef43aa32017-01-04 20:23:54 +01003123 } else if (strcasecmp(sval, "none")) {
3124 ti->error = "Unknown integrity profile";
3125 return -EINVAL;
3126 }
3127
3128 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
3129 if (!cc->cipher_auth)
3130 return -ENOMEM;
Mikulas Patockaff3af922017-03-23 10:23:14 -04003131 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
Milan Broz8f0009a2017-03-16 15:39:44 +01003132 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
3133 cc->sector_size > 4096 ||
Mikulas Patockaff3af922017-03-23 10:23:14 -04003134 (cc->sector_size & (cc->sector_size - 1))) {
Milan Broz8f0009a2017-03-16 15:39:44 +01003135 ti->error = "Invalid feature value for sector_size";
3136 return -EINVAL;
3137 }
Milan Broz783874b2017-09-13 15:45:56 +02003138 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
3139 ti->error = "Device size is not multiple of sector_size feature";
3140 return -EINVAL;
3141 }
Mikulas Patockaff3af922017-03-23 10:23:14 -04003142 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
Milan Broz8f0009a2017-03-16 15:39:44 +01003143 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
3144 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3145 else {
Milan Brozef43aa32017-01-04 20:23:54 +01003146 ti->error = "Invalid feature arguments";
3147 return -EINVAL;
3148 }
3149 }
3150
3151 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152}
3153
Damien Le Moal8e225f02020-07-08 18:28:08 +09003154#ifdef CONFIG_BLK_DEV_ZONED
Damien Le Moal8e225f02020-07-08 18:28:08 +09003155static int crypt_report_zones(struct dm_target *ti,
3156 struct dm_report_zones_args *args, unsigned int nr_zones)
3157{
3158 struct crypt_config *cc = ti->private;
Damien Le Moal8e225f02020-07-08 18:28:08 +09003159
Damien Le Moal912e8872021-05-26 06:24:57 +09003160 return dm_report_zones(cc->dev->bdev, cc->start,
3161 cc->start + dm_target_offset(ti, args->next_sector),
3162 args, nr_zones);
Damien Le Moal8e225f02020-07-08 18:28:08 +09003163}
Mike Snitzere3290b92021-02-10 17:38:30 -05003164#else
3165#define crypt_report_zones NULL
Damien Le Moal8e225f02020-07-08 18:28:08 +09003166#endif
3167
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168/*
3169 * Construct an encryption mapping:
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003170 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 */
3172static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3173{
3174 struct crypt_config *cc;
Michał Mirosławed0302e2018-10-09 22:13:43 +02003175 const char *devname = dm_table_device_name(ti->table);
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003176 int key_size;
Milan Brozef43aa32017-01-04 20:23:54 +01003177 unsigned int align_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 unsigned long long tmpll;
3179 int ret;
Milan Brozef43aa32017-01-04 20:23:54 +01003180 size_t iv_size_padding, additional_req_size;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01003181 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182
Milan Broz772ae5f2011-08-02 12:32:08 +01003183 if (argc < 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 ti->error = "Not enough arguments";
3185 return -EINVAL;
3186 }
3187
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003188 key_size = get_key_size(&argv[1]);
3189 if (key_size < 0) {
3190 ti->error = "Cannot parse key size";
3191 return -EINVAL;
3192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193
Zhengyuan Liu9c81c992019-06-12 14:14:45 +08003194 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 if (!cc) {
3196 ti->error = "Cannot allocate encryption context";
3197 return -ENOMEM;
3198 }
3199 cc->key_size = key_size;
Milan Broz8f0009a2017-03-16 15:39:44 +01003200 cc->sector_size = (1 << SECTOR_SHIFT);
Mikulas Patockaff3af922017-03-23 10:23:14 -04003201 cc->sector_shift = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202
3203 ti->private = cc;
Milan Brozef43aa32017-01-04 20:23:54 +01003204
Mikulas Patocka50593532017-08-13 22:45:08 -04003205 spin_lock(&dm_crypt_clients_lock);
3206 dm_crypt_clients_n++;
3207 crypt_calculate_pages_per_client();
3208 spin_unlock(&dm_crypt_clients_lock);
3209
3210 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3211 if (ret < 0)
3212 goto bad;
3213
Milan Brozef43aa32017-01-04 20:23:54 +01003214 /* Optional parameters need to be read before cipher constructor */
3215 if (argc > 5) {
3216 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
3217 if (ret)
3218 goto bad;
3219 }
3220
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
3222 if (ret < 0)
3223 goto bad;
3224
Milan Broz33d2f092017-03-16 15:39:40 +01003225 if (crypt_integrity_aead(cc)) {
Milan Brozef43aa32017-01-04 20:23:54 +01003226 cc->dmreq_start = sizeof(struct aead_request);
3227 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3228 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3229 } else {
3230 cc->dmreq_start = sizeof(struct skcipher_request);
3231 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3232 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3233 }
Mikulas Patockad49ec522014-08-28 11:09:31 -04003234 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3235
Milan Brozef43aa32017-01-04 20:23:54 +01003236 if (align_mask < CRYPTO_MINALIGN) {
Mikulas Patockad49ec522014-08-28 11:09:31 -04003237 /* Allocate the padding exactly */
3238 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
Milan Brozef43aa32017-01-04 20:23:54 +01003239 & align_mask;
Mikulas Patockad49ec522014-08-28 11:09:31 -04003240 } else {
3241 /*
3242 * If the cipher requires greater alignment than kmalloc
3243 * alignment, we don't know the exact position of the
3244 * initialization vector. We must assume worst case.
3245 */
Milan Brozef43aa32017-01-04 20:23:54 +01003246 iv_size_padding = align_mask;
Mikulas Patockad49ec522014-08-28 11:09:31 -04003247 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
Milan Brozef43aa32017-01-04 20:23:54 +01003249 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
3250 additional_req_size = sizeof(struct dm_crypt_request) +
3251 iv_size_padding + cc->iv_size +
3252 cc->iv_size +
3253 sizeof(uint64_t) +
3254 sizeof(unsigned int);
3255
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003256 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3257 if (ret) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 ti->error = "Cannot allocate crypt request mempool";
3259 goto bad;
3260 }
3261
Mike Snitzer30187e12016-01-31 13:28:26 -05003262 cc->per_bio_data_size = ti->per_io_data_size =
Milan Brozef43aa32017-01-04 20:23:54 +01003263 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
Mikulas Patockad49ec522014-08-28 11:09:31 -04003264 ARCH_KMALLOC_MINALIGN);
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04003265
Christoph Hellwiga8affc02021-03-11 12:01:37 +01003266 ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003267 if (ret) {
Milan Broz8b004452006-10-03 01:15:37 -07003268 ti->error = "Cannot allocate page mempool";
Milan Broze48d4bb2006-10-03 01:15:37 -07003269 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270 }
Milan Broze48d4bb2006-10-03 01:15:37 -07003271
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003272 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3273 if (ret) {
Milan Broz0c395b02008-02-08 02:10:54 +00003274 ti->error = "Cannot allocate crypt bioset";
Milan Brozcabf08e2007-10-19 22:38:58 +01003275 goto bad;
Milan Broz93e605c2006-10-03 01:15:38 -07003276 }
Milan Brozcabf08e2007-10-19 22:38:58 +01003277
Mikulas Patocka7145c242015-02-13 08:24:41 -05003278 mutex_init(&cc->bio_alloc_lock);
3279
Milan Brozcabf08e2007-10-19 22:38:58 +01003280 ret = -EINVAL;
Milan Broz8f0009a2017-03-16 15:39:44 +01003281 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
3282 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
Milan Brozcabf08e2007-10-19 22:38:58 +01003283 ti->error = "Invalid iv_offset sector";
3284 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 }
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08003286 cc->iv_offset = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287
Vivek Goyale80d1c82015-07-31 09:20:36 -04003288 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3289 if (ret) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 ti->error = "Device lookup failed";
3291 goto bad;
3292 }
3293
Vivek Goyale80d1c82015-07-31 09:20:36 -04003294 ret = -EINVAL;
Milan Brozef87bfc2018-11-07 22:24:55 +01003295 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 ti->error = "Invalid device sector";
3297 goto bad;
3298 }
3299 cc->start = tmpll;
3300
Damien Le Moal8e225f02020-07-08 18:28:08 +09003301 if (bdev_is_zoned(cc->dev->bdev)) {
Damien Le Moalf34ee1d2021-05-26 06:25:01 +09003302 /*
3303 * For zoned block devices, we need to preserve the issuer write
3304 * ordering. To do so, disable write workqueues and force inline
3305 * encryption completion.
3306 */
Damien Le Moal8e225f02020-07-08 18:28:08 +09003307 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3308 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
Damien Le Moalf34ee1d2021-05-26 06:25:01 +09003309
3310 /*
3311 * All zone append writes to a zone of a zoned block device will
3312 * have the same BIO sector, the start of the zone. When the
3313 * cypher IV mode uses sector values, all data targeting a
3314 * zone will be encrypted using the first sector numbers of the
3315 * zone. This will not result in write errors but will
3316 * cause most reads to fail as reads will use the sector values
3317 * for the actual data locations, resulting in IV mismatch.
3318 * To avoid this problem, ask DM core to emulate zone append
3319 * operations with regular writes.
3320 */
3321 DMDEBUG("Zone append operations will be emulated");
3322 ti->emulate_zone_append = true;
Damien Le Moal8e225f02020-07-08 18:28:08 +09003323 }
3324
Milan Broz33d2f092017-03-16 15:39:40 +01003325 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
Milan Brozef43aa32017-01-04 20:23:54 +01003326 ret = crypt_integrity_ctr(cc, ti);
Milan Broz772ae5f2011-08-02 12:32:08 +01003327 if (ret)
3328 goto bad;
3329
Milan Brozef43aa32017-01-04 20:23:54 +01003330 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3331 if (!cc->tag_pool_max_sectors)
3332 cc->tag_pool_max_sectors = 1;
Milan Broz772ae5f2011-08-02 12:32:08 +01003333
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003334 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
Milan Brozef43aa32017-01-04 20:23:54 +01003335 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003336 if (ret) {
Milan Brozef43aa32017-01-04 20:23:54 +01003337 ti->error = "Cannot allocate integrity tags mempool";
3338 goto bad;
Milan Broz772ae5f2011-08-02 12:32:08 +01003339 }
Mikulas Patocka583fe742017-04-18 16:51:54 -04003340
3341 cc->tag_pool_max_sectors <<= cc->sector_shift;
Milan Broz772ae5f2011-08-02 12:32:08 +01003342 }
3343
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 ret = -ENOMEM;
Mike Snitzerf612b212019-11-20 17:27:39 -05003345 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 if (!cc->io_queue) {
3347 ti->error = "Couldn't create kcryptd io queue";
3348 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 }
Christophe Saout37af6562006-10-30 20:39:08 +01003350
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003351 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
Mike Snitzer48b07772020-12-28 16:13:52 -05003352 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
Michał Mirosławed0302e2018-10-09 22:13:43 +02003353 1, devname);
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003354 else
Mike Snitzer48b07772020-12-28 16:13:52 -05003355 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3356 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
Michał Mirosławed0302e2018-10-09 22:13:43 +02003357 num_online_cpus(), devname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 if (!cc->crypt_queue) {
3359 ti->error = "Couldn't create kcryptd queue";
3360 goto bad;
3361 }
3362
Mikulas Patockac7329ef2018-07-11 12:10:51 -04003363 spin_lock_init(&cc->write_thread_lock);
Mikulas Patockab3c5fd32015-02-13 08:27:41 -05003364 cc->write_tree = RB_ROOT;
Mikulas Patockadc267622015-02-13 08:25:59 -05003365
Cai Huoqinga5217c12021-10-21 16:39:01 +08003366 cc->write_thread = kthread_run(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
Mikulas Patockadc267622015-02-13 08:25:59 -05003367 if (IS_ERR(cc->write_thread)) {
3368 ret = PTR_ERR(cc->write_thread);
3369 cc->write_thread = NULL;
3370 ti->error = "Couldn't spawn write thread";
3371 goto bad;
3372 }
Mikulas Patockadc267622015-02-13 08:25:59 -05003373
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00003374 ti->num_flush_bios = 1;
Mikulas Patockaa666e5c2021-02-10 15:26:23 -05003375 ti->limit_swap_bios = true;
Milan Broz983c7db2011-09-25 23:26:21 +01003376
Michael Weiß58d0f182021-09-04 11:59:30 +02003377 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 return 0;
3379
3380bad:
Michael Weiß58d0f182021-09-04 11:59:30 +02003381 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 crypt_dtr(ti);
3383 return ret;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01003384}
3385
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00003386static int crypt_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387{
3388 struct dm_crypt_io *io;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01003389 struct crypt_config *cc = ti->private;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01003390
Milan Broz772ae5f2011-08-02 12:32:08 +01003391 /*
Mike Christie28a8f0d2016-06-05 14:32:25 -05003392 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
3393 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
Mike Christiee6047142016-06-05 14:32:04 -05003394 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
Milan Broz772ae5f2011-08-02 12:32:08 +01003395 */
Jens Axboe1eff9d32016-08-05 15:35:16 -06003396 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
Mike Christie28a8f0d2016-06-05 14:32:25 -05003397 bio_op(bio) == REQ_OP_DISCARD)) {
Christoph Hellwig74d46992017-08-23 19:10:32 +02003398 bio_set_dev(bio, cc->dev->bdev);
Milan Broz772ae5f2011-08-02 12:32:08 +01003399 if (bio_sectors(bio))
Kent Overstreet4f024f32013-10-11 15:44:27 -07003400 bio->bi_iter.bi_sector = cc->start +
3401 dm_target_offset(ti, bio->bi_iter.bi_sector);
Mikulas Patocka647c7db2009-06-22 10:12:23 +01003402 return DM_MAPIO_REMAPPED;
3403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404
Mikulas Patocka4e870e92016-08-30 16:38:42 -04003405 /*
3406 * Check if bio is too large, split as needed.
3407 */
Christoph Hellwiga8affc02021-03-11 12:01:37 +01003408 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) &&
Milan Brozef43aa32017-01-04 20:23:54 +01003409 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
Christoph Hellwiga8affc02021-03-11 12:01:37 +01003410 dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT));
Mikulas Patocka4e870e92016-08-30 16:38:42 -04003411
Milan Broz8f0009a2017-03-16 15:39:44 +01003412 /*
3413 * Ensure that bio is a multiple of internal sector encryption size
3414 * and is aligned to this size as defined in IO hints.
3415 */
3416 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
Christoph Hellwig846785e2017-06-03 09:38:02 +02003417 return DM_MAPIO_KILL;
Milan Broz8f0009a2017-03-16 15:39:44 +01003418
3419 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
Christoph Hellwig846785e2017-06-03 09:38:02 +02003420 return DM_MAPIO_KILL;
Milan Broz8f0009a2017-03-16 15:39:44 +01003421
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04003422 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3423 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
Milan Brozef43aa32017-01-04 20:23:54 +01003424
3425 if (cc->on_disk_tag_size) {
Mikulas Patocka583fe742017-04-18 16:51:54 -04003426 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
Milan Brozef43aa32017-01-04 20:23:54 +01003427
3428 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
Mikulas Patocka583fe742017-04-18 16:51:54 -04003429 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
Milan Brozef43aa32017-01-04 20:23:54 +01003430 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
3431 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3432 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003433 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
Milan Brozef43aa32017-01-04 20:23:54 +01003434 io->integrity_metadata_from_pool = true;
3435 }
3436 }
3437
Milan Broz33d2f092017-03-16 15:39:40 +01003438 if (crypt_integrity_aead(cc))
Milan Brozef43aa32017-01-04 20:23:54 +01003439 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3440 else
3441 io->ctx.r.req = (struct skcipher_request *)(io + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442
Milan Broz20c82532011-01-13 19:59:53 +00003443 if (bio_data_dir(io->base_bio) == READ) {
3444 if (kcryptd_io_read(io, GFP_NOWAIT))
Mikulas Patockadc267622015-02-13 08:25:59 -05003445 kcryptd_queue_read(io);
Milan Broz20c82532011-01-13 19:59:53 +00003446 } else
Andrew Morton4ee218c2006-03-27 01:17:48 -08003447 kcryptd_queue_crypt(io);
3448
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 return DM_MAPIO_SUBMITTED;
3450}
3451
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003452static void crypt_status(struct dm_target *ti, status_type_t type,
3453 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454{
Milan Broz5ebaee62010-08-12 04:14:07 +01003455 struct crypt_config *cc = ti->private;
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003456 unsigned i, sz = 0;
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003457 int num_feature_args = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
3459 switch (type) {
3460 case STATUSTYPE_INFO:
3461 result[0] = '\0';
3462 break;
3463
3464 case STATUSTYPE_TABLE:
Milan Broz7dbcd132011-01-13 19:59:52 +00003465 DMEMIT("%s ", cc->cipher_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003467 if (cc->key_size > 0) {
3468 if (cc->key_string)
3469 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3470 else
3471 for (i = 0; i < cc->key_size; i++)
3472 DMEMIT("%02x", cc->key[i]);
3473 } else
Mikulas Patockafd7c0922013-03-01 22:45:44 +00003474 DMEMIT("-");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475
3476 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3477 cc->dev->name, (unsigned long long)cc->start);
Milan Broz772ae5f2011-08-02 12:32:08 +01003478
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003479 num_feature_args += !!ti->num_discard_bios;
3480 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05003481 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01003482 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3483 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
Mikulas Patockaff3af922017-03-23 10:23:14 -04003484 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
Milan Broz8f0009a2017-03-16 15:39:44 +01003485 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
Milan Brozef43aa32017-01-04 20:23:54 +01003486 if (cc->on_disk_tag_size)
3487 num_feature_args++;
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003488 if (num_feature_args) {
3489 DMEMIT(" %d", num_feature_args);
3490 if (ti->num_discard_bios)
3491 DMEMIT(" allow_discards");
3492 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3493 DMEMIT(" same_cpu_crypt");
Mikulas Patocka0f5d8e62015-02-13 08:27:08 -05003494 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3495 DMEMIT(" submit_from_crypt_cpus");
Ignat Korchagin39d42fa2020-07-06 18:37:31 +01003496 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3497 DMEMIT(" no_read_workqueue");
3498 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3499 DMEMIT(" no_write_workqueue");
Milan Brozef43aa32017-01-04 20:23:54 +01003500 if (cc->on_disk_tag_size)
3501 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
Milan Broz8f0009a2017-03-16 15:39:44 +01003502 if (cc->sector_size != (1 << SECTOR_SHIFT))
3503 DMEMIT(" sector_size:%d", cc->sector_size);
3504 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3505 DMEMIT(" iv_large_sectors");
Mikulas Patockaf3396c582015-02-13 08:23:09 -05003506 }
Tushar Sugandhi8ec45662021-07-12 17:49:03 -07003507 break;
Milan Broz772ae5f2011-08-02 12:32:08 +01003508
Tushar Sugandhi8ec45662021-07-12 17:49:03 -07003509 case STATUSTYPE_IMA:
3510 DMEMIT_TARGET_NAME_VERSION(ti->type);
3511 DMEMIT(",allow_discards=%c", ti->num_discard_bios ? 'y' : 'n');
3512 DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n');
3513 DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ?
3514 'y' : 'n');
3515 DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ?
3516 'y' : 'n');
3517 DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags) ?
3518 'y' : 'n');
3519 DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ?
3520 'y' : 'n');
3521
3522 if (cc->on_disk_tag_size)
3523 DMEMIT(",integrity_tag_size=%u,cipher_auth=%s",
3524 cc->on_disk_tag_size, cc->cipher_auth);
3525 if (cc->sector_size != (1 << SECTOR_SHIFT))
3526 DMEMIT(",sector_size=%d", cc->sector_size);
3527 if (cc->cipher_string)
3528 DMEMIT(",cipher_string=%s", cc->cipher_string);
3529
3530 DMEMIT(",key_size=%u", cc->key_size);
3531 DMEMIT(",key_parts=%u", cc->key_parts);
3532 DMEMIT(",key_extra_size=%u", cc->key_extra_size);
3533 DMEMIT(",key_mac_size=%u", cc->key_mac_size);
3534 DMEMIT(";");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535 break;
3536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537}
3538
Milan Broze48d4bb2006-10-03 01:15:37 -07003539static void crypt_postsuspend(struct dm_target *ti)
3540{
3541 struct crypt_config *cc = ti->private;
3542
3543 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3544}
3545
3546static int crypt_preresume(struct dm_target *ti)
3547{
3548 struct crypt_config *cc = ti->private;
3549
3550 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3551 DMERR("aborting resume - crypt key is not set.");
3552 return -EAGAIN;
3553 }
3554
3555 return 0;
3556}
3557
3558static void crypt_resume(struct dm_target *ti)
3559{
3560 struct crypt_config *cc = ti->private;
3561
3562 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3563}
3564
3565/* Message interface
3566 * key set <key>
3567 * key wipe
3568 */
Mike Snitzer1eb5fa82018-02-28 15:59:59 -05003569static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
3570 char *result, unsigned maxlen)
Milan Broze48d4bb2006-10-03 01:15:37 -07003571{
3572 struct crypt_config *cc = ti->private;
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003573 int key_size, ret = -EINVAL;
Milan Broze48d4bb2006-10-03 01:15:37 -07003574
3575 if (argc < 2)
3576 goto error;
3577
Mike Snitzer498f0102011-08-02 12:32:04 +01003578 if (!strcasecmp(argv[0], "key")) {
Milan Broze48d4bb2006-10-03 01:15:37 -07003579 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3580 DMWARN("not suspended during key manipulation.");
3581 return -EINVAL;
3582 }
Mike Snitzer498f0102011-08-02 12:32:04 +01003583 if (argc == 3 && !strcasecmp(argv[1], "set")) {
Ondrej Kozinac538f6e2016-11-21 15:58:51 +01003584 /* The key size may not be changed. */
3585 key_size = get_key_size(&argv[2]);
3586 if (key_size < 0 || cc->key_size != key_size) {
3587 memset(argv[2], '0', strlen(argv[2]));
3588 return -EINVAL;
3589 }
3590
Milan Broz542da312009-12-10 23:51:57 +00003591 ret = crypt_set_key(cc, argv[2]);
3592 if (ret)
3593 return ret;
3594 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3595 ret = cc->iv_gen_ops->init(cc);
Ondrej Kozinadc949022018-01-12 16:30:32 +01003596 /* wipe the kernel key payload copy */
3597 if (cc->key_string)
3598 memset(cc->key, 0, cc->key_size * sizeof(u8));
Milan Broz542da312009-12-10 23:51:57 +00003599 return ret;
3600 }
Milan Broz4a52ffc2019-07-09 15:22:12 +02003601 if (argc == 2 && !strcasecmp(argv[1], "wipe"))
Milan Broze48d4bb2006-10-03 01:15:37 -07003602 return crypt_wipe_key(cc);
3603 }
3604
3605error:
3606 DMWARN("unrecognised message received.");
3607 return -EINVAL;
3608}
3609
Mike Snitzeraf4874e2009-06-22 10:12:33 +01003610static int crypt_iterate_devices(struct dm_target *ti,
3611 iterate_devices_callout_fn fn, void *data)
3612{
3613 struct crypt_config *cc = ti->private;
3614
Mike Snitzer5dea2712009-07-23 20:30:42 +01003615 return fn(ti, cc->dev, cc->start, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01003616}
3617
Mike Snitzer586b2862015-09-09 21:34:51 -04003618static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3619{
Milan Broz8f0009a2017-03-16 15:39:44 +01003620 struct crypt_config *cc = ti->private;
3621
Mike Snitzer586b2862015-09-09 21:34:51 -04003622 /*
3623 * Unfortunate constraint that is required to avoid the potential
3624 * for exceeding underlying device's max_segments limits -- due to
3625 * crypt_alloc_buffer() possibly allocating pages for the encryption
3626 * bio that are not as physically contiguous as the original bio.
3627 */
3628 limits->max_segment_size = PAGE_SIZE;
Milan Broz8f0009a2017-03-16 15:39:44 +01003629
Mikulas Patockabc9e9cf2018-08-10 11:23:56 -04003630 limits->logical_block_size =
Eric Biggers64611a152020-06-04 12:01:26 -07003631 max_t(unsigned, limits->logical_block_size, cc->sector_size);
Mikulas Patockabc9e9cf2018-08-10 11:23:56 -04003632 limits->physical_block_size =
3633 max_t(unsigned, limits->physical_block_size, cc->sector_size);
3634 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
Mike Snitzer586b2862015-09-09 21:34:51 -04003635}
3636
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637static struct target_type crypt_target = {
3638 .name = "crypt",
Ahmad Fatoum363880c2021-01-22 09:43:21 +01003639 .version = {1, 23, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 .module = THIS_MODULE,
3641 .ctr = crypt_ctr,
3642 .dtr = crypt_dtr,
Damien Le Moal8e225f02020-07-08 18:28:08 +09003643 .features = DM_TARGET_ZONED_HM,
3644 .report_zones = crypt_report_zones,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 .map = crypt_map,
3646 .status = crypt_status,
Milan Broze48d4bb2006-10-03 01:15:37 -07003647 .postsuspend = crypt_postsuspend,
3648 .preresume = crypt_preresume,
3649 .resume = crypt_resume,
3650 .message = crypt_message,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01003651 .iterate_devices = crypt_iterate_devices,
Mike Snitzer586b2862015-09-09 21:34:51 -04003652 .io_hints = crypt_io_hints,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653};
3654
3655static int __init dm_crypt_init(void)
3656{
3657 int r;
3658
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659 r = dm_register_target(&crypt_target);
Mikulas Patocka94f5e022015-02-13 08:25:26 -05003660 if (r < 0)
Alasdair G Kergon72d94862006-06-26 00:27:35 -07003661 DMERR("register failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 return r;
3664}
3665
3666static void __exit dm_crypt_exit(void)
3667{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00003668 dm_unregister_target(&crypt_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669}
3670
3671module_init(dm_crypt_init);
3672module_exit(dm_crypt_exit);
3673
Jana Saoutbf142992014-06-24 14:27:04 -04003674MODULE_AUTHOR("Jana Saout <jana@saout.de>");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3676MODULE_LICENSE("GPL");