blob: 4c3bc16d37509014517b01104bf12411fb53f03d [file] [log] [blame]
Mikulas Patocka7eada902017-01-04 20:23:53 +01001/*
2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
5 *
6 * This file is released under the GPL.
7 */
8
Mark Rutlandd3e632f2017-10-23 14:07:11 -07009#include <linux/compiler.h>
Mikulas Patocka7eada902017-01-04 20:23:53 +010010#include <linux/module.h>
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/vmalloc.h>
14#include <linux/sort.h>
15#include <linux/rbtree.h>
16#include <linux/delay.h>
17#include <linux/random.h>
18#include <crypto/hash.h>
19#include <crypto/skcipher.h>
20#include <linux/async_tx.h>
Mikulas Patockaafa53df2018-03-15 16:02:31 -040021#include <linux/dm-bufio.h>
Mikulas Patocka7eada902017-01-04 20:23:53 +010022
23#define DM_MSG_PREFIX "integrity"
24
25#define DEFAULT_INTERLEAVE_SECTORS 32768
26#define DEFAULT_JOURNAL_SIZE_FACTOR 7
27#define DEFAULT_BUFFER_SECTORS 128
28#define DEFAULT_JOURNAL_WATERMARK 50
29#define DEFAULT_SYNC_MSEC 10000
30#define DEFAULT_MAX_JOURNAL_SECTORS 131072
Mikulas Patocka56b67a42017-04-18 16:51:50 -040031#define MIN_LOG2_INTERLEAVE_SECTORS 3
32#define MAX_LOG2_INTERLEAVE_SECTORS 31
Mikulas Patocka7eada902017-01-04 20:23:53 +010033#define METADATA_WORKQUEUE_MAX_ACTIVE 16
Mikulas Patockaa3fcf722018-07-03 20:13:33 +020034#define RECALC_SECTORS 8192
35#define RECALC_WRITE_SUPER 16
Mikulas Patocka7eada902017-01-04 20:23:53 +010036
37/*
38 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
39 * so it should not be enabled in the official kernel
40 */
41//#define DEBUG_PRINT
42//#define INTERNAL_VERIFY
43
44/*
45 * On disk structures
46 */
47
48#define SB_MAGIC "integrt"
Mikulas Patocka1f9fc0b2018-07-03 20:13:31 +020049#define SB_VERSION_1 1
50#define SB_VERSION_2 2
Mikulas Patocka7eada902017-01-04 20:23:53 +010051#define SB_SECTORS 8
Mikulas Patocka9d609f852017-04-18 16:51:52 -040052#define MAX_SECTORS_PER_BLOCK 8
Mikulas Patocka7eada902017-01-04 20:23:53 +010053
54struct superblock {
55 __u8 magic[8];
56 __u8 version;
57 __u8 log2_interleave_sectors;
58 __u16 integrity_tag_size;
59 __u32 journal_sections;
60 __u64 provided_data_sectors; /* userspace uses this value */
61 __u32 flags;
Mikulas Patocka9d609f852017-04-18 16:51:52 -040062 __u8 log2_sectors_per_block;
Mikulas Patockaa3fcf722018-07-03 20:13:33 +020063 __u8 pad[3];
64 __u64 recalc_sector;
Mikulas Patocka7eada902017-01-04 20:23:53 +010065};
66
67#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
Mikulas Patockaa3fcf722018-07-03 20:13:33 +020068#define SB_FLAG_RECALCULATING 0x2
Mikulas Patocka7eada902017-01-04 20:23:53 +010069
70#define JOURNAL_ENTRY_ROUNDUP 8
71
72typedef __u64 commit_id_t;
73#define JOURNAL_MAC_PER_SECTOR 8
74
75struct journal_entry {
76 union {
77 struct {
78 __u32 sector_lo;
79 __u32 sector_hi;
80 } s;
81 __u64 sector;
82 } u;
Mikulas Patocka9d609f852017-04-18 16:51:52 -040083 commit_id_t last_bytes[0];
84 /* __u8 tag[0]; */
Mikulas Patocka7eada902017-01-04 20:23:53 +010085};
86
Mikulas Patocka9d609f852017-04-18 16:51:52 -040087#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
88
Mikulas Patocka7eada902017-01-04 20:23:53 +010089#if BITS_PER_LONG == 64
Mark Rutlandd3e632f2017-10-23 14:07:11 -070090#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
Mikulas Patocka7eada902017-01-04 20:23:53 +010091#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
92#elif defined(CONFIG_LBDAF)
Mark Rutlandd3e632f2017-10-23 14:07:11 -070093#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
Mikulas Patocka7eada902017-01-04 20:23:53 +010094#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
95#else
Mark Rutlandd3e632f2017-10-23 14:07:11 -070096#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
Mikulas Patocka7eada902017-01-04 20:23:53 +010097#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
98#endif
99#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
100#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
101#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
102#define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
103
104#define JOURNAL_BLOCK_SECTORS 8
105#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
106#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
107
108struct journal_sector {
109 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
110 __u8 mac[JOURNAL_MAC_PER_SECTOR];
111 commit_id_t commit_id;
112};
113
Mikulas Patocka9d609f852017-04-18 16:51:52 -0400114#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
Mikulas Patocka7eada902017-01-04 20:23:53 +0100115
116#define METADATA_PADDING_SECTORS 8
117
118#define N_COMMIT_IDS 4
119
120static unsigned char prev_commit_seq(unsigned char seq)
121{
122 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
123}
124
125static unsigned char next_commit_seq(unsigned char seq)
126{
127 return (seq + 1) % N_COMMIT_IDS;
128}
129
130/*
131 * In-memory structures
132 */
133
134struct journal_node {
135 struct rb_node node;
136 sector_t sector;
137};
138
139struct alg_spec {
140 char *alg_string;
141 char *key_string;
142 __u8 *key;
143 unsigned key_size;
144};
145
146struct dm_integrity_c {
147 struct dm_dev *dev;
Mikulas Patocka356d9d52018-07-03 20:13:30 +0200148 struct dm_dev *meta_dev;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100149 unsigned tag_size;
150 __s8 log2_tag_size;
151 sector_t start;
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400152 mempool_t journal_io_mempool;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100153 struct dm_io_client *io;
154 struct dm_bufio_client *bufio;
155 struct workqueue_struct *metadata_wq;
156 struct superblock *sb;
157 unsigned journal_pages;
158 struct page_list *journal;
159 struct page_list *journal_io;
160 struct page_list *journal_xor;
161
162 struct crypto_skcipher *journal_crypt;
163 struct scatterlist **journal_scatterlist;
164 struct scatterlist **journal_io_scatterlist;
165 struct skcipher_request **sk_requests;
166
167 struct crypto_shash *journal_mac;
168
169 struct journal_node *journal_tree;
170 struct rb_root journal_tree_root;
171
172 sector_t provided_data_sectors;
173
174 unsigned short journal_entry_size;
175 unsigned char journal_entries_per_sector;
176 unsigned char journal_section_entries;
Mikulas Patocka9d609f852017-04-18 16:51:52 -0400177 unsigned short journal_section_sectors;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100178 unsigned journal_sections;
179 unsigned journal_entries;
Mikulas Patocka356d9d52018-07-03 20:13:30 +0200180 sector_t data_device_sectors;
181 sector_t meta_device_sectors;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100182 unsigned initial_sectors;
183 unsigned metadata_run;
184 __s8 log2_metadata_run;
185 __u8 log2_buffer_sectors;
Mikulas Patocka9d609f852017-04-18 16:51:52 -0400186 __u8 sectors_per_block;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100187
188 unsigned char mode;
Mikulas Patockac21b1632018-07-03 20:13:25 +0200189 int suspending;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100190
191 int failed;
192
193 struct crypto_shash *internal_hash;
194
195 /* these variables are locked with endio_wait.lock */
196 struct rb_root in_progress;
Mikulas Patocka724376a2018-07-03 20:13:27 +0200197 struct list_head wait_list;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100198 wait_queue_head_t endio_wait;
199 struct workqueue_struct *wait_wq;
200
201 unsigned char commit_seq;
202 commit_id_t commit_ids[N_COMMIT_IDS];
203
204 unsigned committed_section;
205 unsigned n_committed_sections;
206
207 unsigned uncommitted_section;
208 unsigned n_uncommitted_sections;
209
210 unsigned free_section;
211 unsigned char free_section_entry;
212 unsigned free_sectors;
213
214 unsigned free_sectors_threshold;
215
216 struct workqueue_struct *commit_wq;
217 struct work_struct commit_work;
218
219 struct workqueue_struct *writer_wq;
220 struct work_struct writer_work;
221
Mikulas Patockaa3fcf722018-07-03 20:13:33 +0200222 struct workqueue_struct *recalc_wq;
223 struct work_struct recalc_work;
224 u8 *recalc_buffer;
225 u8 *recalc_tags;
226
Mikulas Patocka7eada902017-01-04 20:23:53 +0100227 struct bio_list flush_bio_list;
228
229 unsigned long autocommit_jiffies;
230 struct timer_list autocommit_timer;
231 unsigned autocommit_msec;
232
233 wait_queue_head_t copy_to_journal_wait;
234
235 struct completion crypto_backoff;
236
237 bool journal_uptodate;
238 bool just_formatted;
239
240 struct alg_spec internal_hash_alg;
241 struct alg_spec journal_crypt_alg;
242 struct alg_spec journal_mac_alg;
Mikulas Patocka3f2e5392017-07-21 12:00:00 -0400243
244 atomic64_t number_of_mismatches;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100245};
246
247struct dm_integrity_range {
248 sector_t logical_sector;
249 unsigned n_sectors;
Mikulas Patocka724376a2018-07-03 20:13:27 +0200250 bool waiting;
251 union {
252 struct rb_node node;
253 struct {
254 struct task_struct *task;
255 struct list_head wait_entry;
256 };
257 };
Mikulas Patocka7eada902017-01-04 20:23:53 +0100258};
259
260struct dm_integrity_io {
261 struct work_struct work;
262
263 struct dm_integrity_c *ic;
264 bool write;
265 bool fua;
266
267 struct dm_integrity_range range;
268
269 sector_t metadata_block;
270 unsigned metadata_offset;
271
272 atomic_t in_flight;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200273 blk_status_t bi_status;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100274
275 struct completion *completion;
276
Christoph Hellwig74d46992017-08-23 19:10:32 +0200277 struct gendisk *orig_bi_disk;
278 u8 orig_bi_partno;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100279 bio_end_io_t *orig_bi_end_io;
280 struct bio_integrity_payload *orig_bi_integrity;
281 struct bvec_iter orig_bi_iter;
282};
283
284struct journal_completion {
285 struct dm_integrity_c *ic;
286 atomic_t in_flight;
287 struct completion comp;
288};
289
290struct journal_io {
291 struct dm_integrity_range range;
292 struct journal_completion *comp;
293};
294
295static struct kmem_cache *journal_io_cache;
296
297#define JOURNAL_IO_MEMPOOL 32
298
299#ifdef DEBUG_PRINT
300#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
301static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
302{
303 va_list args;
304 va_start(args, msg);
305 vprintk(msg, args);
306 va_end(args);
307 if (len)
308 pr_cont(":");
309 while (len) {
310 pr_cont(" %02x", *bytes);
311 bytes++;
312 len--;
313 }
314 pr_cont("\n");
315}
316#define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
317#else
318#define DEBUG_print(x, ...) do { } while (0)
319#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
320#endif
321
322/*
323 * DM Integrity profile, protection is performed layer above (dm-crypt)
324 */
Bhumika Goyal7c373d662017-08-06 22:54:00 +0530325static const struct blk_integrity_profile dm_integrity_profile = {
Mikulas Patocka7eada902017-01-04 20:23:53 +0100326 .name = "DM-DIF-EXT-TAG",
327 .generate_fn = NULL,
328 .verify_fn = NULL,
329};
330
331static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
332static void integrity_bio_wait(struct work_struct *w);
333static void dm_integrity_dtr(struct dm_target *ti);
334
335static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
336{
Mikulas Patocka3f2e5392017-07-21 12:00:00 -0400337 if (err == -EILSEQ)
338 atomic64_inc(&ic->number_of_mismatches);
Mikulas Patocka7eada902017-01-04 20:23:53 +0100339 if (!cmpxchg(&ic->failed, 0, err))
340 DMERR("Error on %s: %d", msg, err);
341}
342
343static int dm_integrity_failed(struct dm_integrity_c *ic)
344{
Mark Rutlandd3e632f2017-10-23 14:07:11 -0700345 return READ_ONCE(ic->failed);
Mikulas Patocka7eada902017-01-04 20:23:53 +0100346}
347
348static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
349 unsigned j, unsigned char seq)
350{
351 /*
352 * Xor the number with section and sector, so that if a piece of
353 * journal is written at wrong place, it is detected.
354 */
355 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
356}
357
358static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
359 sector_t *area, sector_t *offset)
360{
Mikulas Patocka356d9d52018-07-03 20:13:30 +0200361 if (!ic->meta_dev) {
362 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
363 *area = data_sector >> log2_interleave_sectors;
364 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
365 } else {
366 *area = 0;
367 *offset = data_sector;
368 }
Mikulas Patocka7eada902017-01-04 20:23:53 +0100369}
370
Mikulas Patocka9d609f852017-04-18 16:51:52 -0400371#define sector_to_block(ic, n) \
372do { \
373 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
374 (n) >>= (ic)->sb->log2_sectors_per_block; \
375} while (0)
376
Mikulas Patocka7eada902017-01-04 20:23:53 +0100377static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
378 sector_t offset, unsigned *metadata_offset)
379{
380 __u64 ms;
381 unsigned mo;
382
383 ms = area << ic->sb->log2_interleave_sectors;
384 if (likely(ic->log2_metadata_run >= 0))
385 ms += area << ic->log2_metadata_run;
386 else
387 ms += area * ic->metadata_run;
388 ms >>= ic->log2_buffer_sectors;
389
Mikulas Patocka9d609f852017-04-18 16:51:52 -0400390 sector_to_block(ic, offset);
391
Mikulas Patocka7eada902017-01-04 20:23:53 +0100392 if (likely(ic->log2_tag_size >= 0)) {
393 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
394 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
395 } else {
396 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
397 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
398 }
399 *metadata_offset = mo;
400 return ms;
401}
402
403static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
404{
405 sector_t result;
406
Mikulas Patocka356d9d52018-07-03 20:13:30 +0200407 if (ic->meta_dev)
408 return offset;
409
Mikulas Patocka7eada902017-01-04 20:23:53 +0100410 result = area << ic->sb->log2_interleave_sectors;
411 if (likely(ic->log2_metadata_run >= 0))
412 result += (area + 1) << ic->log2_metadata_run;
413 else
414 result += (area + 1) * ic->metadata_run;
415
416 result += (sector_t)ic->initial_sectors + offset;
Mikulas Patocka71e9ddb2018-07-03 20:13:29 +0200417 result += ic->start;
418
Mikulas Patocka7eada902017-01-04 20:23:53 +0100419 return result;
420}
421
422static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
423{
424 if (unlikely(*sec_ptr >= ic->journal_sections))
425 *sec_ptr -= ic->journal_sections;
426}
427
Mikulas Patocka1f9fc0b2018-07-03 20:13:31 +0200428static void sb_set_version(struct dm_integrity_c *ic)
429{
Mikulas Patockaa3fcf722018-07-03 20:13:33 +0200430 if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
Mikulas Patocka1f9fc0b2018-07-03 20:13:31 +0200431 ic->sb->version = SB_VERSION_2;
432 else
433 ic->sb->version = SB_VERSION_1;
434}
435
Mikulas Patocka7eada902017-01-04 20:23:53 +0100436static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
437{
438 struct dm_io_request io_req;
439 struct dm_io_region io_loc;
440
441 io_req.bi_op = op;
442 io_req.bi_op_flags = op_flags;
443 io_req.mem.type = DM_IO_KMEM;
444 io_req.mem.ptr.addr = ic->sb;
445 io_req.notify.fn = NULL;
446 io_req.client = ic->io;
Mikulas Patocka356d9d52018-07-03 20:13:30 +0200447 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100448 io_loc.sector = ic->start;
449 io_loc.count = SB_SECTORS;
450
451 return dm_io(&io_req, 1, &io_loc, NULL);
452}
453
454static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
455 bool e, const char *function)
456{
457#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
458 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
459
460 if (unlikely(section >= ic->journal_sections) ||
461 unlikely(offset >= limit)) {
462 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
463 function, section, offset, ic->journal_sections, limit);
464 BUG();
465 }
466#endif
467}
468
469static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
470 unsigned *pl_index, unsigned *pl_offset)
471{
472 unsigned sector;
473
Mikulas Patocka56b67a42017-04-18 16:51:50 -0400474 access_journal_check(ic, section, offset, false, "page_list_location");
Mikulas Patocka7eada902017-01-04 20:23:53 +0100475
476 sector = section * ic->journal_section_sectors + offset;
477
478 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
479 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
480}
481
482static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
483 unsigned section, unsigned offset, unsigned *n_sectors)
484{
485 unsigned pl_index, pl_offset;
486 char *va;
487
488 page_list_location(ic, section, offset, &pl_index, &pl_offset);
489
490 if (n_sectors)
491 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
492
493 va = lowmem_page_address(pl[pl_index].page);
494
495 return (struct journal_sector *)(va + pl_offset);
496}
497
498static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
499{
500 return access_page_list(ic, ic->journal, section, offset, NULL);
501}
502
503static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
504{
505 unsigned rel_sector, offset;
506 struct journal_sector *js;
507
508 access_journal_check(ic, section, n, true, "access_journal_entry");
509
510 rel_sector = n % JOURNAL_BLOCK_SECTORS;
511 offset = n / JOURNAL_BLOCK_SECTORS;
512
513 js = access_journal(ic, section, rel_sector);
514 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
515}
516
517static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
518{
Mikulas Patocka9d609f852017-04-18 16:51:52 -0400519 n <<= ic->sb->log2_sectors_per_block;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100520
Mikulas Patocka9d609f852017-04-18 16:51:52 -0400521 n += JOURNAL_BLOCK_SECTORS;
522
523 access_journal_check(ic, section, n, false, "access_journal_data");
524
525 return access_journal(ic, section, n);
Mikulas Patocka7eada902017-01-04 20:23:53 +0100526}
527
528static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
529{
530 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
531 int r;
532 unsigned j, size;
533
534 desc->tfm = ic->journal_mac;
Mikulas Patocka432061b2018-09-05 09:17:45 -0400535 desc->flags = 0;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100536
537 r = crypto_shash_init(desc);
538 if (unlikely(r)) {
539 dm_integrity_io_error(ic, "crypto_shash_init", r);
540 goto err;
541 }
542
543 for (j = 0; j < ic->journal_section_entries; j++) {
544 struct journal_entry *je = access_journal_entry(ic, section, j);
545 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
546 if (unlikely(r)) {
547 dm_integrity_io_error(ic, "crypto_shash_update", r);
548 goto err;
549 }
550 }
551
552 size = crypto_shash_digestsize(ic->journal_mac);
553
554 if (likely(size <= JOURNAL_MAC_SIZE)) {
555 r = crypto_shash_final(desc, result);
556 if (unlikely(r)) {
557 dm_integrity_io_error(ic, "crypto_shash_final", r);
558 goto err;
559 }
560 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
561 } else {
Kees Cook6d39a122018-08-07 14:18:39 -0700562 __u8 digest[HASH_MAX_DIGESTSIZE];
563
564 if (WARN_ON(size > sizeof(digest))) {
565 dm_integrity_io_error(ic, "digest_size", -EINVAL);
566 goto err;
567 }
Mikulas Patocka7eada902017-01-04 20:23:53 +0100568 r = crypto_shash_final(desc, digest);
569 if (unlikely(r)) {
570 dm_integrity_io_error(ic, "crypto_shash_final", r);
571 goto err;
572 }
573 memcpy(result, digest, JOURNAL_MAC_SIZE);
574 }
575
576 return;
577err:
578 memset(result, 0, JOURNAL_MAC_SIZE);
579}
580
581static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
582{
583 __u8 result[JOURNAL_MAC_SIZE];
584 unsigned j;
585
586 if (!ic->journal_mac)
587 return;
588
589 section_mac(ic, section, result);
590
591 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
592 struct journal_sector *js = access_journal(ic, section, j);
593
594 if (likely(wr))
595 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
596 else {
597 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
598 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
599 }
600 }
601}
602
603static void complete_journal_op(void *context)
604{
605 struct journal_completion *comp = context;
606 BUG_ON(!atomic_read(&comp->in_flight));
607 if (likely(atomic_dec_and_test(&comp->in_flight)))
608 complete(&comp->comp);
609}
610
611static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
612 unsigned n_sections, struct journal_completion *comp)
613{
614 struct async_submit_ctl submit;
615 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
616 unsigned pl_index, pl_offset, section_index;
617 struct page_list *source_pl, *target_pl;
618
619 if (likely(encrypt)) {
620 source_pl = ic->journal;
621 target_pl = ic->journal_io;
622 } else {
623 source_pl = ic->journal_io;
624 target_pl = ic->journal;
625 }
626
627 page_list_location(ic, section, 0, &pl_index, &pl_offset);
628
629 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
630
631 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
632
633 section_index = pl_index;
634
635 do {
636 size_t this_step;
637 struct page *src_pages[2];
638 struct page *dst_page;
639
640 while (unlikely(pl_index == section_index)) {
641 unsigned dummy;
642 if (likely(encrypt))
643 rw_section_mac(ic, section, true);
644 section++;
645 n_sections--;
646 if (!n_sections)
647 break;
648 page_list_location(ic, section, 0, &section_index, &dummy);
649 }
650
651 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
652 dst_page = target_pl[pl_index].page;
653 src_pages[0] = source_pl[pl_index].page;
654 src_pages[1] = ic->journal_xor[pl_index].page;
655
656 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
657
658 pl_index++;
659 pl_offset = 0;
660 n_bytes -= this_step;
661 } while (n_bytes);
662
663 BUG_ON(n_sections);
664
665 async_tx_issue_pending_all();
666}
667
668static void complete_journal_encrypt(struct crypto_async_request *req, int err)
669{
670 struct journal_completion *comp = req->data;
671 if (unlikely(err)) {
672 if (likely(err == -EINPROGRESS)) {
673 complete(&comp->ic->crypto_backoff);
674 return;
675 }
676 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
677 }
678 complete_journal_op(comp);
679}
680
681static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
682{
683 int r;
Mikulas Patocka432061b2018-09-05 09:17:45 -0400684 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Mikulas Patocka7eada902017-01-04 20:23:53 +0100685 complete_journal_encrypt, comp);
686 if (likely(encrypt))
687 r = crypto_skcipher_encrypt(req);
688 else
689 r = crypto_skcipher_decrypt(req);
690 if (likely(!r))
691 return false;
692 if (likely(r == -EINPROGRESS))
693 return true;
694 if (likely(r == -EBUSY)) {
695 wait_for_completion(&comp->ic->crypto_backoff);
696 reinit_completion(&comp->ic->crypto_backoff);
697 return true;
698 }
699 dm_integrity_io_error(comp->ic, "encrypt", r);
700 return false;
701}
702
703static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
704 unsigned n_sections, struct journal_completion *comp)
705{
706 struct scatterlist **source_sg;
707 struct scatterlist **target_sg;
708
709 atomic_add(2, &comp->in_flight);
710
711 if (likely(encrypt)) {
712 source_sg = ic->journal_scatterlist;
713 target_sg = ic->journal_io_scatterlist;
714 } else {
715 source_sg = ic->journal_io_scatterlist;
716 target_sg = ic->journal_scatterlist;
717 }
718
719 do {
720 struct skcipher_request *req;
721 unsigned ivsize;
722 char *iv;
723
724 if (likely(encrypt))
725 rw_section_mac(ic, section, true);
726
727 req = ic->sk_requests[section];
728 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
729 iv = req->iv;
730
731 memcpy(iv, iv + ivsize, ivsize);
732
733 req->src = source_sg[section];
734 req->dst = target_sg[section];
735
736 if (unlikely(do_crypt(encrypt, req, comp)))
737 atomic_inc(&comp->in_flight);
738
739 section++;
740 n_sections--;
741 } while (n_sections);
742
743 atomic_dec(&comp->in_flight);
744 complete_journal_op(comp);
745}
746
747static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
748 unsigned n_sections, struct journal_completion *comp)
749{
750 if (ic->journal_xor)
751 return xor_journal(ic, encrypt, section, n_sections, comp);
752 else
753 return crypt_journal(ic, encrypt, section, n_sections, comp);
754}
755
756static void complete_journal_io(unsigned long error, void *context)
757{
758 struct journal_completion *comp = context;
759 if (unlikely(error != 0))
760 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
761 complete_journal_op(comp);
762}
763
764static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
765 unsigned n_sections, struct journal_completion *comp)
766{
767 struct dm_io_request io_req;
768 struct dm_io_region io_loc;
769 unsigned sector, n_sectors, pl_index, pl_offset;
770 int r;
771
772 if (unlikely(dm_integrity_failed(ic))) {
773 if (comp)
774 complete_journal_io(-1UL, comp);
775 return;
776 }
777
778 sector = section * ic->journal_section_sectors;
779 n_sectors = n_sections * ic->journal_section_sectors;
780
781 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
782 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
783
784 io_req.bi_op = op;
785 io_req.bi_op_flags = op_flags;
786 io_req.mem.type = DM_IO_PAGE_LIST;
787 if (ic->journal_io)
788 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
789 else
790 io_req.mem.ptr.pl = &ic->journal[pl_index];
791 io_req.mem.offset = pl_offset;
792 if (likely(comp != NULL)) {
793 io_req.notify.fn = complete_journal_io;
794 io_req.notify.context = comp;
795 } else {
796 io_req.notify.fn = NULL;
797 }
798 io_req.client = ic->io;
Mikulas Patocka356d9d52018-07-03 20:13:30 +0200799 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100800 io_loc.sector = ic->start + SB_SECTORS + sector;
801 io_loc.count = n_sectors;
802
803 r = dm_io(&io_req, 1, &io_loc, NULL);
804 if (unlikely(r)) {
805 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
806 if (comp) {
807 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
808 complete_journal_io(-1UL, comp);
809 }
810 }
811}
812
813static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
814{
815 struct journal_completion io_comp;
816 struct journal_completion crypt_comp_1;
817 struct journal_completion crypt_comp_2;
818 unsigned i;
819
820 io_comp.ic = ic;
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +0200821 init_completion(&io_comp.comp);
Mikulas Patocka7eada902017-01-04 20:23:53 +0100822
823 if (commit_start + commit_sections <= ic->journal_sections) {
824 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
825 if (ic->journal_io) {
826 crypt_comp_1.ic = ic;
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +0200827 init_completion(&crypt_comp_1.comp);
Mikulas Patocka7eada902017-01-04 20:23:53 +0100828 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
829 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
830 wait_for_completion_io(&crypt_comp_1.comp);
831 } else {
832 for (i = 0; i < commit_sections; i++)
833 rw_section_mac(ic, commit_start + i, true);
834 }
Jan Karaff0361b2017-05-31 09:44:32 +0200835 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
836 commit_sections, &io_comp);
Mikulas Patocka7eada902017-01-04 20:23:53 +0100837 } else {
838 unsigned to_end;
839 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
840 to_end = ic->journal_sections - commit_start;
841 if (ic->journal_io) {
842 crypt_comp_1.ic = ic;
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +0200843 init_completion(&crypt_comp_1.comp);
Mikulas Patocka7eada902017-01-04 20:23:53 +0100844 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
845 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
846 if (try_wait_for_completion(&crypt_comp_1.comp)) {
847 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +0200848 reinit_completion(&crypt_comp_1.comp);
Mikulas Patocka7eada902017-01-04 20:23:53 +0100849 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
850 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
851 wait_for_completion_io(&crypt_comp_1.comp);
852 } else {
853 crypt_comp_2.ic = ic;
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +0200854 init_completion(&crypt_comp_2.comp);
Mikulas Patocka7eada902017-01-04 20:23:53 +0100855 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
856 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
857 wait_for_completion_io(&crypt_comp_1.comp);
858 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
859 wait_for_completion_io(&crypt_comp_2.comp);
860 }
861 } else {
862 for (i = 0; i < to_end; i++)
863 rw_section_mac(ic, commit_start + i, true);
864 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
865 for (i = 0; i < commit_sections - to_end; i++)
866 rw_section_mac(ic, i, true);
867 }
868 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
869 }
870
871 wait_for_completion_io(&io_comp.comp);
872}
873
874static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
875 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
876{
877 struct dm_io_request io_req;
878 struct dm_io_region io_loc;
879 int r;
880 unsigned sector, pl_index, pl_offset;
881
Mikulas Patocka9d609f852017-04-18 16:51:52 -0400882 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
883
Mikulas Patocka7eada902017-01-04 20:23:53 +0100884 if (unlikely(dm_integrity_failed(ic))) {
885 fn(-1UL, data);
886 return;
887 }
888
889 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
890
891 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
892 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
893
894 io_req.bi_op = REQ_OP_WRITE;
895 io_req.bi_op_flags = 0;
896 io_req.mem.type = DM_IO_PAGE_LIST;
897 io_req.mem.ptr.pl = &ic->journal[pl_index];
898 io_req.mem.offset = pl_offset;
899 io_req.notify.fn = fn;
900 io_req.notify.context = data;
901 io_req.client = ic->io;
902 io_loc.bdev = ic->dev->bdev;
Mikulas Patocka71e9ddb2018-07-03 20:13:29 +0200903 io_loc.sector = target;
Mikulas Patocka7eada902017-01-04 20:23:53 +0100904 io_loc.count = n_sectors;
905
906 r = dm_io(&io_req, 1, &io_loc, NULL);
907 if (unlikely(r)) {
908 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
909 fn(-1UL, data);
910 }
911}
912
Mikulas Patocka724376a2018-07-03 20:13:27 +0200913static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
914{
915 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
Mikulas Patocka4ed319c2019-04-05 15:26:39 -0400916 range1->logical_sector + range1->n_sectors > range2->logical_sector;
Mikulas Patocka724376a2018-07-03 20:13:27 +0200917}
918
919static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
Mikulas Patocka7eada902017-01-04 20:23:53 +0100920{
921 struct rb_node **n = &ic->in_progress.rb_node;
922 struct rb_node *parent;
923
Mikulas Patocka9d609f852017-04-18 16:51:52 -0400924 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
925
Mikulas Patocka724376a2018-07-03 20:13:27 +0200926 if (likely(check_waiting)) {
927 struct dm_integrity_range *range;
928 list_for_each_entry(range, &ic->wait_list, wait_entry) {
929 if (unlikely(ranges_overlap(range, new_range)))
930 return false;
931 }
932 }
933
Mikulas Patocka7eada902017-01-04 20:23:53 +0100934 parent = NULL;
935
936 while (*n) {
937 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
938
939 parent = *n;
940 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
941 n = &range->node.rb_left;
942 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
943 n = &range->node.rb_right;
944 } else {
945 return false;
946 }
947 }
948
949 rb_link_node(&new_range->node, parent, n);
950 rb_insert_color(&new_range->node, &ic->in_progress);
951
952 return true;
953}
954
955static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
956{
957 rb_erase(&range->node, &ic->in_progress);
Mikulas Patocka724376a2018-07-03 20:13:27 +0200958 while (unlikely(!list_empty(&ic->wait_list))) {
959 struct dm_integrity_range *last_range =
960 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
961 struct task_struct *last_range_task;
Mikulas Patocka724376a2018-07-03 20:13:27 +0200962 last_range_task = last_range->task;
963 list_del(&last_range->wait_entry);
964 if (!add_new_range(ic, last_range, false)) {
965 last_range->task = last_range_task;
966 list_add(&last_range->wait_entry, &ic->wait_list);
967 break;
968 }
969 last_range->waiting = false;
970 wake_up_process(last_range_task);
971 }
Mikulas Patocka7eada902017-01-04 20:23:53 +0100972}
973
974static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
975{
976 unsigned long flags;
977
978 spin_lock_irqsave(&ic->endio_wait.lock, flags);
979 remove_range_unlocked(ic, range);
980 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
981}
982
Mikulas Patocka724376a2018-07-03 20:13:27 +0200983static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
984{
985 new_range->waiting = true;
986 list_add_tail(&new_range->wait_entry, &ic->wait_list);
987 new_range->task = current;
988 do {
989 __set_current_state(TASK_UNINTERRUPTIBLE);
990 spin_unlock_irq(&ic->endio_wait.lock);
991 io_schedule();
992 spin_lock_irq(&ic->endio_wait.lock);
993 } while (unlikely(new_range->waiting));
994}
995
Mikulas Patocka7eada902017-01-04 20:23:53 +0100996static void init_journal_node(struct journal_node *node)
997{
998 RB_CLEAR_NODE(&node->node);
999 node->sector = (sector_t)-1;
1000}
1001
1002static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1003{
1004 struct rb_node **link;
1005 struct rb_node *parent;
1006
1007 node->sector = sector;
1008 BUG_ON(!RB_EMPTY_NODE(&node->node));
1009
1010 link = &ic->journal_tree_root.rb_node;
1011 parent = NULL;
1012
1013 while (*link) {
1014 struct journal_node *j;
1015 parent = *link;
1016 j = container_of(parent, struct journal_node, node);
1017 if (sector < j->sector)
1018 link = &j->node.rb_left;
1019 else
1020 link = &j->node.rb_right;
1021 }
1022
1023 rb_link_node(&node->node, parent, link);
1024 rb_insert_color(&node->node, &ic->journal_tree_root);
1025}
1026
1027static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1028{
1029 BUG_ON(RB_EMPTY_NODE(&node->node));
1030 rb_erase(&node->node, &ic->journal_tree_root);
1031 init_journal_node(node);
1032}
1033
1034#define NOT_FOUND (-1U)
1035
1036static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1037{
1038 struct rb_node *n = ic->journal_tree_root.rb_node;
1039 unsigned found = NOT_FOUND;
1040 *next_sector = (sector_t)-1;
1041 while (n) {
1042 struct journal_node *j = container_of(n, struct journal_node, node);
1043 if (sector == j->sector) {
1044 found = j - ic->journal_tree;
1045 }
1046 if (sector < j->sector) {
1047 *next_sector = j->sector;
1048 n = j->node.rb_left;
1049 } else {
1050 n = j->node.rb_right;
1051 }
1052 }
1053
1054 return found;
1055}
1056
1057static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1058{
1059 struct journal_node *node, *next_node;
1060 struct rb_node *next;
1061
1062 if (unlikely(pos >= ic->journal_entries))
1063 return false;
1064 node = &ic->journal_tree[pos];
1065 if (unlikely(RB_EMPTY_NODE(&node->node)))
1066 return false;
1067 if (unlikely(node->sector != sector))
1068 return false;
1069
1070 next = rb_next(&node->node);
1071 if (unlikely(!next))
1072 return true;
1073
1074 next_node = container_of(next, struct journal_node, node);
1075 return next_node->sector != sector;
1076}
1077
1078static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1079{
1080 struct rb_node *next;
1081 struct journal_node *next_node;
1082 unsigned next_section;
1083
1084 BUG_ON(RB_EMPTY_NODE(&node->node));
1085
1086 next = rb_next(&node->node);
1087 if (unlikely(!next))
1088 return false;
1089
1090 next_node = container_of(next, struct journal_node, node);
1091
1092 if (next_node->sector != node->sector)
1093 return false;
1094
1095 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1096 if (next_section >= ic->committed_section &&
1097 next_section < ic->committed_section + ic->n_committed_sections)
1098 return true;
1099 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1100 return true;
1101
1102 return false;
1103}
1104
1105#define TAG_READ 0
1106#define TAG_WRITE 1
1107#define TAG_CMP 2
1108
1109static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1110 unsigned *metadata_offset, unsigned total_size, int op)
1111{
1112 do {
1113 unsigned char *data, *dp;
1114 struct dm_buffer *b;
1115 unsigned to_copy;
1116 int r;
1117
1118 r = dm_integrity_failed(ic);
1119 if (unlikely(r))
1120 return r;
1121
1122 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
Chengguang Xu5e3d0e32019-02-13 13:46:56 +08001123 if (IS_ERR(data))
Mikulas Patocka7eada902017-01-04 20:23:53 +01001124 return PTR_ERR(data);
1125
1126 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1127 dp = data + *metadata_offset;
1128 if (op == TAG_READ) {
1129 memcpy(tag, dp, to_copy);
1130 } else if (op == TAG_WRITE) {
1131 memcpy(dp, tag, to_copy);
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -04001132 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001133 } else {
1134 /* e.g.: op == TAG_CMP */
1135 if (unlikely(memcmp(dp, tag, to_copy))) {
1136 unsigned i;
1137
1138 for (i = 0; i < to_copy; i++) {
1139 if (dp[i] != tag[i])
1140 break;
1141 total_size--;
1142 }
1143 dm_bufio_release(b);
1144 return total_size;
1145 }
1146 }
1147 dm_bufio_release(b);
1148
1149 tag += to_copy;
1150 *metadata_offset += to_copy;
1151 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1152 (*metadata_block)++;
1153 *metadata_offset = 0;
1154 }
1155 total_size -= to_copy;
1156 } while (unlikely(total_size));
1157
1158 return 0;
1159}
1160
1161static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1162{
1163 int r;
1164 r = dm_bufio_write_dirty_buffers(ic->bufio);
1165 if (unlikely(r))
1166 dm_integrity_io_error(ic, "writing tags", r);
1167}
1168
1169static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1170{
1171 DECLARE_WAITQUEUE(wait, current);
1172 __add_wait_queue(&ic->endio_wait, &wait);
1173 __set_current_state(TASK_UNINTERRUPTIBLE);
1174 spin_unlock_irq(&ic->endio_wait.lock);
1175 io_schedule();
1176 spin_lock_irq(&ic->endio_wait.lock);
1177 __remove_wait_queue(&ic->endio_wait, &wait);
1178}
1179
Kees Cook8376d3c2017-10-16 17:01:48 -07001180static void autocommit_fn(struct timer_list *t)
Mikulas Patocka7eada902017-01-04 20:23:53 +01001181{
Kees Cook8376d3c2017-10-16 17:01:48 -07001182 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001183
1184 if (likely(!dm_integrity_failed(ic)))
1185 queue_work(ic->commit_wq, &ic->commit_work);
1186}
1187
1188static void schedule_autocommit(struct dm_integrity_c *ic)
1189{
1190 if (!timer_pending(&ic->autocommit_timer))
1191 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1192}
1193
1194static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1195{
1196 struct bio *bio;
Mike Snitzer7def52b2017-06-19 10:55:47 -04001197 unsigned long flags;
1198
1199 spin_lock_irqsave(&ic->endio_wait.lock, flags);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001200 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1201 bio_list_add(&ic->flush_bio_list, bio);
Mike Snitzer7def52b2017-06-19 10:55:47 -04001202 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1203
Mikulas Patocka7eada902017-01-04 20:23:53 +01001204 queue_work(ic->commit_wq, &ic->commit_work);
1205}
1206
1207static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1208{
1209 int r = dm_integrity_failed(ic);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001210 if (unlikely(r) && !bio->bi_status)
1211 bio->bi_status = errno_to_blk_status(r);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001212 bio_endio(bio);
1213}
1214
1215static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1216{
1217 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1218
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001219 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
Mikulas Patocka7eada902017-01-04 20:23:53 +01001220 submit_flush_bio(ic, dio);
1221 else
1222 do_endio(ic, bio);
1223}
1224
1225static void dec_in_flight(struct dm_integrity_io *dio)
1226{
1227 if (atomic_dec_and_test(&dio->in_flight)) {
1228 struct dm_integrity_c *ic = dio->ic;
1229 struct bio *bio;
1230
1231 remove_range(ic, &dio->range);
1232
1233 if (unlikely(dio->write))
1234 schedule_autocommit(ic);
1235
1236 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1237
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001238 if (unlikely(dio->bi_status) && !bio->bi_status)
1239 bio->bi_status = dio->bi_status;
1240 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01001241 dio->range.logical_sector += dio->range.n_sectors;
1242 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1243 INIT_WORK(&dio->work, integrity_bio_wait);
1244 queue_work(ic->wait_wq, &dio->work);
1245 return;
1246 }
1247 do_endio_flush(ic, dio);
1248 }
1249}
1250
1251static void integrity_end_io(struct bio *bio)
1252{
1253 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1254
1255 bio->bi_iter = dio->orig_bi_iter;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001256 bio->bi_disk = dio->orig_bi_disk;
1257 bio->bi_partno = dio->orig_bi_partno;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001258 if (dio->orig_bi_integrity) {
1259 bio->bi_integrity = dio->orig_bi_integrity;
1260 bio->bi_opf |= REQ_INTEGRITY;
1261 }
1262 bio->bi_end_io = dio->orig_bi_end_io;
1263
1264 if (dio->completion)
1265 complete(dio->completion);
1266
1267 dec_in_flight(dio);
1268}
1269
1270static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1271 const char *data, char *result)
1272{
1273 __u64 sector_le = cpu_to_le64(sector);
1274 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1275 int r;
1276 unsigned digest_size;
1277
1278 req->tfm = ic->internal_hash;
1279 req->flags = 0;
1280
1281 r = crypto_shash_init(req);
1282 if (unlikely(r < 0)) {
1283 dm_integrity_io_error(ic, "crypto_shash_init", r);
1284 goto failed;
1285 }
1286
1287 r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1288 if (unlikely(r < 0)) {
1289 dm_integrity_io_error(ic, "crypto_shash_update", r);
1290 goto failed;
1291 }
1292
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001293 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001294 if (unlikely(r < 0)) {
1295 dm_integrity_io_error(ic, "crypto_shash_update", r);
1296 goto failed;
1297 }
1298
1299 r = crypto_shash_final(req, result);
1300 if (unlikely(r < 0)) {
1301 dm_integrity_io_error(ic, "crypto_shash_final", r);
1302 goto failed;
1303 }
1304
1305 digest_size = crypto_shash_digestsize(ic->internal_hash);
1306 if (unlikely(digest_size < ic->tag_size))
1307 memset(result + digest_size, 0, ic->tag_size - digest_size);
1308
1309 return;
1310
1311failed:
1312 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1313 get_random_bytes(result, ic->tag_size);
1314}
1315
1316static void integrity_metadata(struct work_struct *w)
1317{
1318 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1319 struct dm_integrity_c *ic = dio->ic;
1320
1321 int r;
1322
1323 if (ic->internal_hash) {
1324 struct bvec_iter iter;
1325 struct bio_vec bv;
1326 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1327 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1328 char *checksums;
Mikulas Patocka56b67a42017-04-18 16:51:50 -04001329 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
Kees Cook6d39a122018-08-07 14:18:39 -07001330 char checksums_onstack[HASH_MAX_DIGESTSIZE];
Mikulas Patocka7eada902017-01-04 20:23:53 +01001331 unsigned sectors_to_process = dio->range.n_sectors;
1332 sector_t sector = dio->range.logical_sector;
1333
Mikulas Patockac2bcb2b2017-03-17 12:40:51 -04001334 if (unlikely(ic->mode == 'R'))
1335 goto skip_io;
1336
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001337 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
Mikulas Patocka7eada902017-01-04 20:23:53 +01001338 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
Kees Cook6d39a122018-08-07 14:18:39 -07001339 if (!checksums) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01001340 checksums = checksums_onstack;
Kees Cook6d39a122018-08-07 14:18:39 -07001341 if (WARN_ON(extra_space &&
1342 digest_size > sizeof(checksums_onstack))) {
1343 r = -EINVAL;
1344 goto error;
1345 }
1346 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01001347
1348 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1349 unsigned pos;
1350 char *mem, *checksums_ptr;
1351
1352again:
1353 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1354 pos = 0;
1355 checksums_ptr = checksums;
1356 do {
1357 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1358 checksums_ptr += ic->tag_size;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001359 sectors_to_process -= ic->sectors_per_block;
1360 pos += ic->sectors_per_block << SECTOR_SHIFT;
1361 sector += ic->sectors_per_block;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001362 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1363 kunmap_atomic(mem);
1364
1365 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1366 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1367 if (unlikely(r)) {
1368 if (r > 0) {
Mikulas Patocka22555742019-03-06 08:29:34 -05001369 DMERR_LIMIT("Checksum failed at sector 0x%llx",
1370 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
Mikulas Patocka7eada902017-01-04 20:23:53 +01001371 r = -EILSEQ;
Mikulas Patocka3f2e5392017-07-21 12:00:00 -04001372 atomic64_inc(&ic->number_of_mismatches);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001373 }
1374 if (likely(checksums != checksums_onstack))
1375 kfree(checksums);
1376 goto error;
1377 }
1378
1379 if (!sectors_to_process)
1380 break;
1381
1382 if (unlikely(pos < bv.bv_len)) {
1383 bv.bv_offset += pos;
1384 bv.bv_len -= pos;
1385 goto again;
1386 }
1387 }
1388
1389 if (likely(checksums != checksums_onstack))
1390 kfree(checksums);
1391 } else {
1392 struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1393
1394 if (bip) {
1395 struct bio_vec biv;
1396 struct bvec_iter iter;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001397 unsigned data_to_process = dio->range.n_sectors;
1398 sector_to_block(ic, data_to_process);
1399 data_to_process *= ic->tag_size;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001400
1401 bip_for_each_vec(biv, bip, iter) {
1402 unsigned char *tag;
1403 unsigned this_len;
1404
1405 BUG_ON(PageHighMem(biv.bv_page));
1406 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1407 this_len = min(biv.bv_len, data_to_process);
1408 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1409 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1410 if (unlikely(r))
1411 goto error;
1412 data_to_process -= this_len;
1413 if (!data_to_process)
1414 break;
1415 }
1416 }
1417 }
Mikulas Patockac2bcb2b2017-03-17 12:40:51 -04001418skip_io:
Mikulas Patocka7eada902017-01-04 20:23:53 +01001419 dec_in_flight(dio);
1420 return;
1421error:
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001422 dio->bi_status = errno_to_blk_status(r);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001423 dec_in_flight(dio);
1424}
1425
1426static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1427{
1428 struct dm_integrity_c *ic = ti->private;
1429 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001430 struct bio_integrity_payload *bip;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001431
1432 sector_t area, offset;
1433
1434 dio->ic = ic;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001435 dio->bi_status = 0;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001436
1437 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1438 submit_flush_bio(ic, dio);
1439 return DM_MAPIO_SUBMITTED;
1440 }
1441
1442 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1443 dio->write = bio_op(bio) == REQ_OP_WRITE;
1444 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1445 if (unlikely(dio->fua)) {
1446 /*
1447 * Don't pass down the FUA flag because we have to flush
1448 * disk cache anyway.
1449 */
1450 bio->bi_opf &= ~REQ_FUA;
1451 }
1452 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1453 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1454 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1455 (unsigned long long)ic->provided_data_sectors);
Christoph Hellwig846785e2017-06-03 09:38:02 +02001456 return DM_MAPIO_KILL;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001457 }
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001458 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1459 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1460 ic->sectors_per_block,
1461 (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
Christoph Hellwig846785e2017-06-03 09:38:02 +02001462 return DM_MAPIO_KILL;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001463 }
1464
1465 if (ic->sectors_per_block > 1) {
1466 struct bvec_iter iter;
1467 struct bio_vec bv;
1468 bio_for_each_segment(bv, bio, iter) {
Mikulas Patocka95b13692017-11-07 10:40:40 -05001469 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001470 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1471 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
Christoph Hellwig846785e2017-06-03 09:38:02 +02001472 return DM_MAPIO_KILL;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001473 }
1474 }
1475 }
1476
1477 bip = bio_integrity(bio);
1478 if (!ic->internal_hash) {
1479 if (bip) {
1480 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1481 if (ic->log2_tag_size >= 0)
1482 wanted_tag_size <<= ic->log2_tag_size;
1483 else
1484 wanted_tag_size *= ic->tag_size;
1485 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1486 DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
Christoph Hellwig846785e2017-06-03 09:38:02 +02001487 return DM_MAPIO_KILL;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001488 }
1489 }
1490 } else {
1491 if (unlikely(bip != NULL)) {
1492 DMERR("Unexpected integrity data when using internal hash");
Christoph Hellwig846785e2017-06-03 09:38:02 +02001493 return DM_MAPIO_KILL;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001494 }
1495 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01001496
Mikulas Patockac2bcb2b2017-03-17 12:40:51 -04001497 if (unlikely(ic->mode == 'R') && unlikely(dio->write))
Christoph Hellwig846785e2017-06-03 09:38:02 +02001498 return DM_MAPIO_KILL;
Mikulas Patockac2bcb2b2017-03-17 12:40:51 -04001499
Mikulas Patocka7eada902017-01-04 20:23:53 +01001500 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1501 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1502 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1503
1504 dm_integrity_map_continue(dio, true);
1505 return DM_MAPIO_SUBMITTED;
1506}
1507
1508static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1509 unsigned journal_section, unsigned journal_entry)
1510{
1511 struct dm_integrity_c *ic = dio->ic;
1512 sector_t logical_sector;
1513 unsigned n_sectors;
1514
1515 logical_sector = dio->range.logical_sector;
1516 n_sectors = dio->range.n_sectors;
1517 do {
1518 struct bio_vec bv = bio_iovec(bio);
1519 char *mem;
1520
1521 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1522 bv.bv_len = n_sectors << SECTOR_SHIFT;
1523 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1524 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1525retry_kmap:
1526 mem = kmap_atomic(bv.bv_page);
1527 if (likely(dio->write))
1528 flush_dcache_page(bv.bv_page);
1529
1530 do {
1531 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1532
1533 if (unlikely(!dio->write)) {
1534 struct journal_sector *js;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001535 char *mem_ptr;
1536 unsigned s;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001537
1538 if (unlikely(journal_entry_is_inprogress(je))) {
1539 flush_dcache_page(bv.bv_page);
1540 kunmap_atomic(mem);
1541
1542 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1543 goto retry_kmap;
1544 }
1545 smp_rmb();
1546 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1547 js = access_journal_data(ic, journal_section, journal_entry);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001548 mem_ptr = mem + bv.bv_offset;
1549 s = 0;
1550 do {
1551 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1552 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1553 js++;
1554 mem_ptr += 1 << SECTOR_SHIFT;
1555 } while (++s < ic->sectors_per_block);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001556#ifdef INTERNAL_VERIFY
1557 if (ic->internal_hash) {
Kees Cook6d39a122018-08-07 14:18:39 -07001558 char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
Mikulas Patocka7eada902017-01-04 20:23:53 +01001559
1560 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001561 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
Mikulas Patocka22555742019-03-06 08:29:34 -05001562 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1563 (unsigned long long)logical_sector);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001564 }
1565 }
1566#endif
1567 }
1568
1569 if (!ic->internal_hash) {
1570 struct bio_integrity_payload *bip = bio_integrity(bio);
1571 unsigned tag_todo = ic->tag_size;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001572 char *tag_ptr = journal_entry_tag(ic, je);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001573
1574 if (bip) do {
1575 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1576 unsigned tag_now = min(biv.bv_len, tag_todo);
1577 char *tag_addr;
1578 BUG_ON(PageHighMem(biv.bv_page));
1579 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1580 if (likely(dio->write))
1581 memcpy(tag_ptr, tag_addr, tag_now);
1582 else
1583 memcpy(tag_addr, tag_ptr, tag_now);
1584 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1585 tag_ptr += tag_now;
1586 tag_todo -= tag_now;
1587 } while (unlikely(tag_todo)); else {
1588 if (likely(dio->write))
1589 memset(tag_ptr, 0, tag_todo);
1590 }
1591 }
1592
1593 if (likely(dio->write)) {
1594 struct journal_sector *js;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001595 unsigned s;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001596
1597 js = access_journal_data(ic, journal_section, journal_entry);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001598 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1599
1600 s = 0;
1601 do {
1602 je->last_bytes[s] = js[s].commit_id;
1603 } while (++s < ic->sectors_per_block);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001604
1605 if (ic->internal_hash) {
1606 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1607 if (unlikely(digest_size > ic->tag_size)) {
Kees Cook6d39a122018-08-07 14:18:39 -07001608 char checksums_onstack[HASH_MAX_DIGESTSIZE];
Mikulas Patocka7eada902017-01-04 20:23:53 +01001609 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001610 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001611 } else
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001612 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
Mikulas Patocka7eada902017-01-04 20:23:53 +01001613 }
1614
1615 journal_entry_set_sector(je, logical_sector);
1616 }
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001617 logical_sector += ic->sectors_per_block;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001618
1619 journal_entry++;
1620 if (unlikely(journal_entry == ic->journal_section_entries)) {
1621 journal_entry = 0;
1622 journal_section++;
1623 wraparound_section(ic, &journal_section);
1624 }
1625
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001626 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1627 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001628
1629 if (unlikely(!dio->write))
1630 flush_dcache_page(bv.bv_page);
1631 kunmap_atomic(mem);
1632 } while (n_sectors);
1633
1634 if (likely(dio->write)) {
1635 smp_mb();
1636 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1637 wake_up(&ic->copy_to_journal_wait);
Mark Rutlandd3e632f2017-10-23 14:07:11 -07001638 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01001639 queue_work(ic->commit_wq, &ic->commit_work);
1640 } else {
1641 schedule_autocommit(ic);
1642 }
1643 } else {
1644 remove_range(ic, &dio->range);
1645 }
1646
1647 if (unlikely(bio->bi_iter.bi_size)) {
1648 sector_t area, offset;
1649
1650 dio->range.logical_sector = logical_sector;
1651 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1652 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1653 return true;
1654 }
1655
1656 return false;
1657}
1658
1659static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1660{
1661 struct dm_integrity_c *ic = dio->ic;
1662 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1663 unsigned journal_section, journal_entry;
1664 unsigned journal_read_pos;
1665 struct completion read_comp;
1666 bool need_sync_io = ic->internal_hash && !dio->write;
1667
1668 if (need_sync_io && from_map) {
1669 INIT_WORK(&dio->work, integrity_bio_wait);
1670 queue_work(ic->metadata_wq, &dio->work);
1671 return;
1672 }
1673
1674lock_retry:
1675 spin_lock_irq(&ic->endio_wait.lock);
1676retry:
1677 if (unlikely(dm_integrity_failed(ic))) {
1678 spin_unlock_irq(&ic->endio_wait.lock);
1679 do_endio(ic, bio);
1680 return;
1681 }
1682 dio->range.n_sectors = bio_sectors(bio);
1683 journal_read_pos = NOT_FOUND;
1684 if (likely(ic->mode == 'J')) {
1685 if (dio->write) {
1686 unsigned next_entry, i, pos;
Mikulas Patocka9dd59722017-07-19 11:23:40 -04001687 unsigned ws, we, range_sectors;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001688
Mikulas Patocka9dd59722017-07-19 11:23:40 -04001689 dio->range.n_sectors = min(dio->range.n_sectors,
1690 ic->free_sectors << ic->sb->log2_sectors_per_block);
Mikulas Patocka518748b2018-07-03 20:13:26 +02001691 if (unlikely(!dio->range.n_sectors)) {
1692 if (from_map)
1693 goto offload_to_thread;
1694 sleep_on_endio_wait(ic);
1695 goto retry;
1696 }
Mikulas Patocka9dd59722017-07-19 11:23:40 -04001697 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1698 ic->free_sectors -= range_sectors;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001699 journal_section = ic->free_section;
1700 journal_entry = ic->free_section_entry;
1701
Mikulas Patocka9dd59722017-07-19 11:23:40 -04001702 next_entry = ic->free_section_entry + range_sectors;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001703 ic->free_section_entry = next_entry % ic->journal_section_entries;
1704 ic->free_section += next_entry / ic->journal_section_entries;
1705 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1706 wraparound_section(ic, &ic->free_section);
1707
1708 pos = journal_section * ic->journal_section_entries + journal_entry;
1709 ws = journal_section;
1710 we = journal_entry;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001711 i = 0;
1712 do {
Mikulas Patocka7eada902017-01-04 20:23:53 +01001713 struct journal_entry *je;
1714
1715 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1716 pos++;
1717 if (unlikely(pos >= ic->journal_entries))
1718 pos = 0;
1719
1720 je = access_journal_entry(ic, ws, we);
1721 BUG_ON(!journal_entry_is_unused(je));
1722 journal_entry_set_inprogress(je);
1723 we++;
1724 if (unlikely(we == ic->journal_section_entries)) {
1725 we = 0;
1726 ws++;
1727 wraparound_section(ic, &ws);
1728 }
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001729 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001730
1731 spin_unlock_irq(&ic->endio_wait.lock);
1732 goto journal_read_write;
1733 } else {
1734 sector_t next_sector;
1735 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1736 if (likely(journal_read_pos == NOT_FOUND)) {
1737 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1738 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1739 } else {
1740 unsigned i;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001741 unsigned jp = journal_read_pos + 1;
1742 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1743 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
Mikulas Patocka7eada902017-01-04 20:23:53 +01001744 break;
1745 }
1746 dio->range.n_sectors = i;
1747 }
1748 }
1749 }
Mikulas Patocka724376a2018-07-03 20:13:27 +02001750 if (unlikely(!add_new_range(ic, &dio->range, true))) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01001751 /*
1752 * We must not sleep in the request routine because it could
1753 * stall bios on current->bio_list.
1754 * So, we offload the bio to a workqueue if we have to sleep.
1755 */
Mikulas Patocka7eada902017-01-04 20:23:53 +01001756 if (from_map) {
Mikulas Patocka518748b2018-07-03 20:13:26 +02001757offload_to_thread:
Mikulas Patocka7eada902017-01-04 20:23:53 +01001758 spin_unlock_irq(&ic->endio_wait.lock);
1759 INIT_WORK(&dio->work, integrity_bio_wait);
1760 queue_work(ic->wait_wq, &dio->work);
1761 return;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001762 }
Mikulas Patocka724376a2018-07-03 20:13:27 +02001763 wait_and_add_new_range(ic, &dio->range);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001764 }
1765 spin_unlock_irq(&ic->endio_wait.lock);
1766
1767 if (unlikely(journal_read_pos != NOT_FOUND)) {
1768 journal_section = journal_read_pos / ic->journal_section_entries;
1769 journal_entry = journal_read_pos % ic->journal_section_entries;
1770 goto journal_read_write;
1771 }
1772
1773 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1774
1775 if (need_sync_io) {
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +02001776 init_completion(&read_comp);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001777 dio->completion = &read_comp;
1778 } else
1779 dio->completion = NULL;
1780
1781 dio->orig_bi_iter = bio->bi_iter;
1782
Christoph Hellwig74d46992017-08-23 19:10:32 +02001783 dio->orig_bi_disk = bio->bi_disk;
1784 dio->orig_bi_partno = bio->bi_partno;
1785 bio_set_dev(bio, ic->dev->bdev);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001786
1787 dio->orig_bi_integrity = bio_integrity(bio);
1788 bio->bi_integrity = NULL;
1789 bio->bi_opf &= ~REQ_INTEGRITY;
1790
1791 dio->orig_bi_end_io = bio->bi_end_io;
1792 bio->bi_end_io = integrity_end_io;
1793
1794 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001795 generic_make_request(bio);
1796
1797 if (need_sync_io) {
1798 wait_for_completion_io(&read_comp);
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02001799 if (unlikely(ic->recalc_wq != NULL) &&
1800 ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
1801 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
1802 goto skip_check;
Hyunchul Leeb7e326f2017-07-31 16:22:20 +09001803 if (likely(!bio->bi_status))
1804 integrity_metadata(&dio->work);
1805 else
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02001806skip_check:
Hyunchul Leeb7e326f2017-07-31 16:22:20 +09001807 dec_in_flight(dio);
1808
Mikulas Patocka7eada902017-01-04 20:23:53 +01001809 } else {
1810 INIT_WORK(&dio->work, integrity_metadata);
1811 queue_work(ic->metadata_wq, &dio->work);
1812 }
1813
1814 return;
1815
1816journal_read_write:
1817 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
1818 goto lock_retry;
1819
1820 do_endio_flush(ic, dio);
1821}
1822
1823
1824static void integrity_bio_wait(struct work_struct *w)
1825{
1826 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1827
1828 dm_integrity_map_continue(dio, false);
1829}
1830
1831static void pad_uncommitted(struct dm_integrity_c *ic)
1832{
1833 if (ic->free_section_entry) {
1834 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
1835 ic->free_section_entry = 0;
1836 ic->free_section++;
1837 wraparound_section(ic, &ic->free_section);
1838 ic->n_uncommitted_sections++;
1839 }
Mikulas Patockaaa03a912017-07-21 13:16:06 -04001840 WARN_ON(ic->journal_sections * ic->journal_section_entries !=
1841 (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001842}
1843
1844static void integrity_commit(struct work_struct *w)
1845{
1846 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
1847 unsigned commit_start, commit_sections;
1848 unsigned i, j, n;
1849 struct bio *flushes;
1850
1851 del_timer(&ic->autocommit_timer);
1852
1853 spin_lock_irq(&ic->endio_wait.lock);
1854 flushes = bio_list_get(&ic->flush_bio_list);
1855 if (unlikely(ic->mode != 'J')) {
1856 spin_unlock_irq(&ic->endio_wait.lock);
1857 dm_integrity_flush_buffers(ic);
1858 goto release_flush_bios;
1859 }
1860
1861 pad_uncommitted(ic);
1862 commit_start = ic->uncommitted_section;
1863 commit_sections = ic->n_uncommitted_sections;
1864 spin_unlock_irq(&ic->endio_wait.lock);
1865
1866 if (!commit_sections)
1867 goto release_flush_bios;
1868
1869 i = commit_start;
1870 for (n = 0; n < commit_sections; n++) {
1871 for (j = 0; j < ic->journal_section_entries; j++) {
1872 struct journal_entry *je;
1873 je = access_journal_entry(ic, i, j);
1874 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1875 }
1876 for (j = 0; j < ic->journal_section_sectors; j++) {
1877 struct journal_sector *js;
1878 js = access_journal(ic, i, j);
1879 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
1880 }
1881 i++;
1882 if (unlikely(i >= ic->journal_sections))
1883 ic->commit_seq = next_commit_seq(ic->commit_seq);
1884 wraparound_section(ic, &i);
1885 }
1886 smp_rmb();
1887
1888 write_journal(ic, commit_start, commit_sections);
1889
1890 spin_lock_irq(&ic->endio_wait.lock);
1891 ic->uncommitted_section += commit_sections;
1892 wraparound_section(ic, &ic->uncommitted_section);
1893 ic->n_uncommitted_sections -= commit_sections;
1894 ic->n_committed_sections += commit_sections;
1895 spin_unlock_irq(&ic->endio_wait.lock);
1896
Mark Rutlandd3e632f2017-10-23 14:07:11 -07001897 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
Mikulas Patocka7eada902017-01-04 20:23:53 +01001898 queue_work(ic->writer_wq, &ic->writer_work);
1899
1900release_flush_bios:
1901 while (flushes) {
1902 struct bio *next = flushes->bi_next;
1903 flushes->bi_next = NULL;
1904 do_endio(ic, flushes);
1905 flushes = next;
1906 }
1907}
1908
1909static void complete_copy_from_journal(unsigned long error, void *context)
1910{
1911 struct journal_io *io = context;
1912 struct journal_completion *comp = io->comp;
1913 struct dm_integrity_c *ic = comp->ic;
1914 remove_range(ic, &io->range);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001915 mempool_free(io, &ic->journal_io_mempool);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001916 if (unlikely(error != 0))
1917 dm_integrity_io_error(ic, "copying from journal", -EIO);
1918 complete_journal_op(comp);
1919}
1920
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001921static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
1922 struct journal_entry *je)
1923{
1924 unsigned s = 0;
1925 do {
1926 js->commit_id = je->last_bytes[s];
1927 js++;
1928 } while (++s < ic->sectors_per_block);
1929}
1930
Mikulas Patocka7eada902017-01-04 20:23:53 +01001931static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1932 unsigned write_sections, bool from_replay)
1933{
1934 unsigned i, j, n;
1935 struct journal_completion comp;
Mikulas Patockaa7c3e62b2017-07-19 11:24:08 -04001936 struct blk_plug plug;
1937
1938 blk_start_plug(&plug);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001939
1940 comp.ic = ic;
1941 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +02001942 init_completion(&comp.comp);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001943
1944 i = write_start;
1945 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
1946#ifndef INTERNAL_VERIFY
1947 if (unlikely(from_replay))
1948#endif
1949 rw_section_mac(ic, i, false);
1950 for (j = 0; j < ic->journal_section_entries; j++) {
1951 struct journal_entry *je = access_journal_entry(ic, i, j);
1952 sector_t sec, area, offset;
1953 unsigned k, l, next_loop;
1954 sector_t metadata_block;
1955 unsigned metadata_offset;
1956 struct journal_io *io;
1957
1958 if (journal_entry_is_unused(je))
1959 continue;
1960 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
1961 sec = journal_entry_get_sector(je);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001962 if (unlikely(from_replay)) {
1963 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
1964 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
1965 sec &= ~(sector_t)(ic->sectors_per_block - 1);
1966 }
1967 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01001968 get_area_and_offset(ic, sec, &area, &offset);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001969 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001970 for (k = j + 1; k < ic->journal_section_entries; k++) {
1971 struct journal_entry *je2 = access_journal_entry(ic, i, k);
1972 sector_t sec2, area2, offset2;
1973 if (journal_entry_is_unused(je2))
1974 break;
1975 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
1976 sec2 = journal_entry_get_sector(je2);
1977 get_area_and_offset(ic, sec2, &area2, &offset2);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001978 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
Mikulas Patocka7eada902017-01-04 20:23:53 +01001979 break;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001980 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001981 }
1982 next_loop = k - 1;
1983
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001984 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001985 io->comp = &comp;
1986 io->range.logical_sector = sec;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04001987 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
Mikulas Patocka7eada902017-01-04 20:23:53 +01001988
1989 spin_lock_irq(&ic->endio_wait.lock);
Mikulas Patocka724376a2018-07-03 20:13:27 +02001990 if (unlikely(!add_new_range(ic, &io->range, true)))
1991 wait_and_add_new_range(ic, &io->range);
Mikulas Patocka7eada902017-01-04 20:23:53 +01001992
1993 if (likely(!from_replay)) {
1994 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
1995
1996 /* don't write if there is newer committed sector */
1997 while (j < k && find_newer_committed_node(ic, &section_node[j])) {
1998 struct journal_entry *je2 = access_journal_entry(ic, i, j);
1999
2000 journal_entry_set_unused(je2);
2001 remove_journal_node(ic, &section_node[j]);
2002 j++;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002003 sec += ic->sectors_per_block;
2004 offset += ic->sectors_per_block;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002005 }
2006 while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2007 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2008
2009 journal_entry_set_unused(je2);
2010 remove_journal_node(ic, &section_node[k - 1]);
2011 k--;
2012 }
2013 if (j == k) {
2014 remove_range_unlocked(ic, &io->range);
2015 spin_unlock_irq(&ic->endio_wait.lock);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04002016 mempool_free(io, &ic->journal_io_mempool);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002017 goto skip_io;
2018 }
2019 for (l = j; l < k; l++) {
2020 remove_journal_node(ic, &section_node[l]);
2021 }
2022 }
2023 spin_unlock_irq(&ic->endio_wait.lock);
2024
2025 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2026 for (l = j; l < k; l++) {
2027 int r;
2028 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2029
2030 if (
2031#ifndef INTERNAL_VERIFY
2032 unlikely(from_replay) &&
2033#endif
2034 ic->internal_hash) {
Kees Cook6d39a122018-08-07 14:18:39 -07002035 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
Mikulas Patocka7eada902017-01-04 20:23:53 +01002036
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002037 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
Mikulas Patocka7eada902017-01-04 20:23:53 +01002038 (char *)access_journal_data(ic, i, l), test_tag);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002039 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
Mikulas Patocka7eada902017-01-04 20:23:53 +01002040 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2041 }
2042
2043 journal_entry_set_unused(je2);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002044 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
Mikulas Patocka7eada902017-01-04 20:23:53 +01002045 ic->tag_size, TAG_WRITE);
2046 if (unlikely(r)) {
2047 dm_integrity_io_error(ic, "reading tags", r);
2048 }
2049 }
2050
2051 atomic_inc(&comp.in_flight);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002052 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2053 (k - j) << ic->sb->log2_sectors_per_block,
2054 get_data_sector(ic, area, offset),
Mikulas Patocka7eada902017-01-04 20:23:53 +01002055 complete_copy_from_journal, io);
2056skip_io:
2057 j = next_loop;
2058 }
2059 }
2060
2061 dm_bufio_write_dirty_buffers_async(ic->bufio);
2062
Mikulas Patockaa7c3e62b2017-07-19 11:24:08 -04002063 blk_finish_plug(&plug);
2064
Mikulas Patocka7eada902017-01-04 20:23:53 +01002065 complete_journal_op(&comp);
2066 wait_for_completion_io(&comp.comp);
2067
2068 dm_integrity_flush_buffers(ic);
2069}
2070
2071static void integrity_writer(struct work_struct *w)
2072{
2073 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2074 unsigned write_start, write_sections;
2075
2076 unsigned prev_free_sectors;
2077
2078 /* the following test is not needed, but it tests the replay code */
Mikulas Patocka747829a2018-07-03 20:13:32 +02002079 if (READ_ONCE(ic->suspending) && !ic->meta_dev)
Mikulas Patocka7eada902017-01-04 20:23:53 +01002080 return;
2081
2082 spin_lock_irq(&ic->endio_wait.lock);
2083 write_start = ic->committed_section;
2084 write_sections = ic->n_committed_sections;
2085 spin_unlock_irq(&ic->endio_wait.lock);
2086
2087 if (!write_sections)
2088 return;
2089
2090 do_journal_write(ic, write_start, write_sections, false);
2091
2092 spin_lock_irq(&ic->endio_wait.lock);
2093
2094 ic->committed_section += write_sections;
2095 wraparound_section(ic, &ic->committed_section);
2096 ic->n_committed_sections -= write_sections;
2097
2098 prev_free_sectors = ic->free_sectors;
2099 ic->free_sectors += write_sections * ic->journal_section_entries;
2100 if (unlikely(!prev_free_sectors))
2101 wake_up_locked(&ic->endio_wait);
2102
2103 spin_unlock_irq(&ic->endio_wait.lock);
2104}
2105
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02002106static void recalc_write_super(struct dm_integrity_c *ic)
2107{
2108 int r;
2109
2110 dm_integrity_flush_buffers(ic);
2111 if (dm_integrity_failed(ic))
2112 return;
2113
2114 sb_set_version(ic);
2115 r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2116 if (unlikely(r))
2117 dm_integrity_io_error(ic, "writing superblock", r);
2118}
2119
2120static void integrity_recalc(struct work_struct *w)
2121{
2122 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2123 struct dm_integrity_range range;
2124 struct dm_io_request io_req;
2125 struct dm_io_region io_loc;
2126 sector_t area, offset;
2127 sector_t metadata_block;
2128 unsigned metadata_offset;
2129 __u8 *t;
2130 unsigned i;
2131 int r;
2132 unsigned super_counter = 0;
2133
2134 spin_lock_irq(&ic->endio_wait.lock);
2135
2136next_chunk:
2137
2138 if (unlikely(READ_ONCE(ic->suspending)))
2139 goto unlock_ret;
2140
2141 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2142 if (unlikely(range.logical_sector >= ic->provided_data_sectors))
2143 goto unlock_ret;
2144
2145 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2146 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2147 if (!ic->meta_dev)
2148 range.n_sectors = min(range.n_sectors, (1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2149
2150 if (unlikely(!add_new_range(ic, &range, true)))
2151 wait_and_add_new_range(ic, &range);
2152
2153 spin_unlock_irq(&ic->endio_wait.lock);
2154
2155 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2156 recalc_write_super(ic);
2157 super_counter = 0;
2158 }
2159
2160 if (unlikely(dm_integrity_failed(ic)))
2161 goto err;
2162
2163 io_req.bi_op = REQ_OP_READ;
2164 io_req.bi_op_flags = 0;
2165 io_req.mem.type = DM_IO_VMA;
2166 io_req.mem.ptr.addr = ic->recalc_buffer;
2167 io_req.notify.fn = NULL;
2168 io_req.client = ic->io;
2169 io_loc.bdev = ic->dev->bdev;
2170 io_loc.sector = get_data_sector(ic, area, offset);
2171 io_loc.count = range.n_sectors;
2172
2173 r = dm_io(&io_req, 1, &io_loc, NULL);
2174 if (unlikely(r)) {
2175 dm_integrity_io_error(ic, "reading data", r);
2176 goto err;
2177 }
2178
2179 t = ic->recalc_tags;
2180 for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
2181 integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2182 t += ic->tag_size;
2183 }
2184
2185 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2186
2187 r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2188 if (unlikely(r)) {
2189 dm_integrity_io_error(ic, "writing tags", r);
2190 goto err;
2191 }
2192
2193 spin_lock_irq(&ic->endio_wait.lock);
2194 remove_range_unlocked(ic, &range);
2195 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2196 goto next_chunk;
2197
2198err:
2199 remove_range(ic, &range);
2200 return;
2201
2202unlock_ret:
2203 spin_unlock_irq(&ic->endio_wait.lock);
2204
2205 recalc_write_super(ic);
2206}
2207
Mikulas Patocka7eada902017-01-04 20:23:53 +01002208static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2209 unsigned n_sections, unsigned char commit_seq)
2210{
2211 unsigned i, j, n;
2212
2213 if (!n_sections)
2214 return;
2215
2216 for (n = 0; n < n_sections; n++) {
2217 i = start_section + n;
2218 wraparound_section(ic, &i);
2219 for (j = 0; j < ic->journal_section_sectors; j++) {
2220 struct journal_sector *js = access_journal(ic, i, j);
2221 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2222 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2223 }
2224 for (j = 0; j < ic->journal_section_entries; j++) {
2225 struct journal_entry *je = access_journal_entry(ic, i, j);
2226 journal_entry_set_unused(je);
2227 }
2228 }
2229
2230 write_journal(ic, start_section, n_sections);
2231}
2232
2233static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2234{
2235 unsigned char k;
2236 for (k = 0; k < N_COMMIT_IDS; k++) {
2237 if (dm_integrity_commit_id(ic, i, j, k) == id)
2238 return k;
2239 }
2240 dm_integrity_io_error(ic, "journal commit id", -EIO);
2241 return -EIO;
2242}
2243
2244static void replay_journal(struct dm_integrity_c *ic)
2245{
2246 unsigned i, j;
2247 bool used_commit_ids[N_COMMIT_IDS];
2248 unsigned max_commit_id_sections[N_COMMIT_IDS];
2249 unsigned write_start, write_sections;
2250 unsigned continue_section;
2251 bool journal_empty;
2252 unsigned char unused, last_used, want_commit_seq;
2253
Mikulas Patockac2bcb2b2017-03-17 12:40:51 -04002254 if (ic->mode == 'R')
2255 return;
2256
Mikulas Patocka7eada902017-01-04 20:23:53 +01002257 if (ic->journal_uptodate)
2258 return;
2259
2260 last_used = 0;
2261 write_start = 0;
2262
2263 if (!ic->just_formatted) {
2264 DEBUG_print("reading journal\n");
2265 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2266 if (ic->journal_io)
2267 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2268 if (ic->journal_io) {
2269 struct journal_completion crypt_comp;
2270 crypt_comp.ic = ic;
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +02002271 init_completion(&crypt_comp.comp);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002272 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2273 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2274 wait_for_completion(&crypt_comp.comp);
2275 }
2276 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2277 }
2278
2279 if (dm_integrity_failed(ic))
2280 goto clear_journal;
2281
2282 journal_empty = true;
2283 memset(used_commit_ids, 0, sizeof used_commit_ids);
2284 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2285 for (i = 0; i < ic->journal_sections; i++) {
2286 for (j = 0; j < ic->journal_section_sectors; j++) {
2287 int k;
2288 struct journal_sector *js = access_journal(ic, i, j);
2289 k = find_commit_seq(ic, i, j, js->commit_id);
2290 if (k < 0)
2291 goto clear_journal;
2292 used_commit_ids[k] = true;
2293 max_commit_id_sections[k] = i;
2294 }
2295 if (journal_empty) {
2296 for (j = 0; j < ic->journal_section_entries; j++) {
2297 struct journal_entry *je = access_journal_entry(ic, i, j);
2298 if (!journal_entry_is_unused(je)) {
2299 journal_empty = false;
2300 break;
2301 }
2302 }
2303 }
2304 }
2305
2306 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2307 unused = N_COMMIT_IDS - 1;
2308 while (unused && !used_commit_ids[unused - 1])
2309 unused--;
2310 } else {
2311 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2312 if (!used_commit_ids[unused])
2313 break;
2314 if (unused == N_COMMIT_IDS) {
2315 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2316 goto clear_journal;
2317 }
2318 }
2319 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2320 unused, used_commit_ids[0], used_commit_ids[1],
2321 used_commit_ids[2], used_commit_ids[3]);
2322
2323 last_used = prev_commit_seq(unused);
2324 want_commit_seq = prev_commit_seq(last_used);
2325
2326 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2327 journal_empty = true;
2328
2329 write_start = max_commit_id_sections[last_used] + 1;
2330 if (unlikely(write_start >= ic->journal_sections))
2331 want_commit_seq = next_commit_seq(want_commit_seq);
2332 wraparound_section(ic, &write_start);
2333
2334 i = write_start;
2335 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2336 for (j = 0; j < ic->journal_section_sectors; j++) {
2337 struct journal_sector *js = access_journal(ic, i, j);
2338
2339 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2340 /*
2341 * This could be caused by crash during writing.
2342 * We won't replay the inconsistent part of the
2343 * journal.
2344 */
2345 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2346 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2347 goto brk;
2348 }
2349 }
2350 i++;
2351 if (unlikely(i >= ic->journal_sections))
2352 want_commit_seq = next_commit_seq(want_commit_seq);
2353 wraparound_section(ic, &i);
2354 }
2355brk:
2356
2357 if (!journal_empty) {
2358 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2359 write_sections, write_start, want_commit_seq);
2360 do_journal_write(ic, write_start, write_sections, true);
2361 }
2362
2363 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2364 continue_section = write_start;
2365 ic->commit_seq = want_commit_seq;
2366 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2367 } else {
2368 unsigned s;
2369 unsigned char erase_seq;
2370clear_journal:
2371 DEBUG_print("clearing journal\n");
2372
2373 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2374 s = write_start;
2375 init_journal(ic, s, 1, erase_seq);
2376 s++;
2377 wraparound_section(ic, &s);
2378 if (ic->journal_sections >= 2) {
2379 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2380 s += ic->journal_sections - 2;
2381 wraparound_section(ic, &s);
2382 init_journal(ic, s, 1, erase_seq);
2383 }
2384
2385 continue_section = 0;
2386 ic->commit_seq = next_commit_seq(erase_seq);
2387 }
2388
2389 ic->committed_section = continue_section;
2390 ic->n_committed_sections = 0;
2391
2392 ic->uncommitted_section = continue_section;
2393 ic->n_uncommitted_sections = 0;
2394
2395 ic->free_section = continue_section;
2396 ic->free_section_entry = 0;
2397 ic->free_sectors = ic->journal_entries;
2398
2399 ic->journal_tree_root = RB_ROOT;
2400 for (i = 0; i < ic->journal_entries; i++)
2401 init_journal_node(&ic->journal_tree[i]);
2402}
2403
2404static void dm_integrity_postsuspend(struct dm_target *ti)
2405{
2406 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2407
2408 del_timer_sync(&ic->autocommit_timer);
2409
Mikulas Patockac21b1632018-07-03 20:13:25 +02002410 WRITE_ONCE(ic->suspending, 1);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002411
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02002412 if (ic->recalc_wq)
2413 drain_workqueue(ic->recalc_wq);
2414
Mikulas Patocka7eada902017-01-04 20:23:53 +01002415 queue_work(ic->commit_wq, &ic->commit_work);
2416 drain_workqueue(ic->commit_wq);
2417
2418 if (ic->mode == 'J') {
Mikulas Patocka747829a2018-07-03 20:13:32 +02002419 if (ic->meta_dev)
2420 queue_work(ic->writer_wq, &ic->writer_work);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002421 drain_workqueue(ic->writer_wq);
2422 dm_integrity_flush_buffers(ic);
2423 }
2424
Mikulas Patockac21b1632018-07-03 20:13:25 +02002425 WRITE_ONCE(ic->suspending, 0);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002426
2427 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2428
2429 ic->journal_uptodate = true;
2430}
2431
2432static void dm_integrity_resume(struct dm_target *ti)
2433{
2434 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2435
2436 replay_journal(ic);
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02002437
2438 if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2439 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
2440 if (recalc_pos < ic->provided_data_sectors) {
2441 queue_work(ic->recalc_wq, &ic->recalc_work);
2442 } else if (recalc_pos > ic->provided_data_sectors) {
2443 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
2444 recalc_write_super(ic);
2445 }
2446 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01002447}
2448
2449static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2450 unsigned status_flags, char *result, unsigned maxlen)
2451{
2452 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2453 unsigned arg_count;
2454 size_t sz = 0;
2455
2456 switch (type) {
2457 case STATUSTYPE_INFO:
Mikulas Patockaf84fd2c2018-07-03 20:13:28 +02002458 DMEMIT("%llu %llu",
2459 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
2460 (unsigned long long)ic->provided_data_sectors);
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02002461 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2462 DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
2463 else
2464 DMEMIT(" -");
Mikulas Patocka7eada902017-01-04 20:23:53 +01002465 break;
2466
2467 case STATUSTYPE_TABLE: {
2468 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2469 watermark_percentage += ic->journal_entries / 2;
2470 do_div(watermark_percentage, ic->journal_entries);
Mikulas Patocka893e3c32019-04-29 14:57:18 +02002471 arg_count = 3;
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002472 arg_count += !!ic->meta_dev;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002473 arg_count += ic->sectors_per_block != 1;
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02002474 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
Mikulas Patocka893e3c32019-04-29 14:57:18 +02002475 arg_count += ic->mode == 'J';
2476 arg_count += ic->mode == 'J';
Mikulas Patocka7eada902017-01-04 20:23:53 +01002477 arg_count += !!ic->internal_hash_alg.alg_string;
2478 arg_count += !!ic->journal_crypt_alg.alg_string;
2479 arg_count += !!ic->journal_mac_alg.alg_string;
2480 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2481 ic->tag_size, ic->mode, arg_count);
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002482 if (ic->meta_dev)
2483 DMEMIT(" meta_device:%s", ic->meta_dev->name);
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02002484 if (ic->sectors_per_block != 1)
2485 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
2486 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2487 DMEMIT(" recalculate");
Mikulas Patocka56b67a42017-04-18 16:51:50 -04002488 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2489 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2490 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
Mikulas Patocka893e3c32019-04-29 14:57:18 +02002491 if (ic->mode == 'J') {
2492 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2493 DMEMIT(" commit_time:%u", ic->autocommit_msec);
2494 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01002495
2496#define EMIT_ALG(a, n) \
2497 do { \
2498 if (ic->a.alg_string) { \
2499 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2500 if (ic->a.key_string) \
2501 DMEMIT(":%s", ic->a.key_string);\
2502 } \
2503 } while (0)
Mikulas Patocka56b67a42017-04-18 16:51:50 -04002504 EMIT_ALG(internal_hash_alg, "internal_hash");
2505 EMIT_ALG(journal_crypt_alg, "journal_crypt");
2506 EMIT_ALG(journal_mac_alg, "journal_mac");
Mikulas Patocka7eada902017-01-04 20:23:53 +01002507 break;
2508 }
2509 }
2510}
2511
2512static int dm_integrity_iterate_devices(struct dm_target *ti,
2513 iterate_devices_callout_fn fn, void *data)
2514{
2515 struct dm_integrity_c *ic = ti->private;
2516
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002517 if (!ic->meta_dev)
2518 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2519 else
2520 return fn(ti, ic->dev, 0, ti->len, data);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002521}
2522
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002523static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
2524{
2525 struct dm_integrity_c *ic = ti->private;
2526
2527 if (ic->sectors_per_block > 1) {
2528 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2529 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2530 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
2531 }
2532}
2533
Mikulas Patocka7eada902017-01-04 20:23:53 +01002534static void calculate_journal_section_size(struct dm_integrity_c *ic)
2535{
2536 unsigned sector_space = JOURNAL_SECTOR_DATA;
2537
2538 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002539 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
Mikulas Patocka7eada902017-01-04 20:23:53 +01002540 JOURNAL_ENTRY_ROUNDUP);
2541
2542 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
2543 sector_space -= JOURNAL_MAC_PER_SECTOR;
2544 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
2545 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002546 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002547 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
2548}
2549
2550static int calculate_device_limits(struct dm_integrity_c *ic)
2551{
2552 __u64 initial_sectors;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002553
2554 calculate_journal_section_size(ic);
2555 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002556 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
Mikulas Patocka7eada902017-01-04 20:23:53 +01002557 return -EINVAL;
2558 ic->initial_sectors = initial_sectors;
2559
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002560 if (!ic->meta_dev) {
2561 sector_t last_sector, last_area, last_offset;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002562
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002563 ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
2564 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
2565 if (!(ic->metadata_run & (ic->metadata_run - 1)))
2566 ic->log2_metadata_run = __ffs(ic->metadata_run);
2567 else
2568 ic->log2_metadata_run = -1;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002569
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002570 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
2571 last_sector = get_data_sector(ic, last_area, last_offset);
2572 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
2573 return -EINVAL;
2574 } else {
Mikulas Patocka30bba432019-05-07 14:28:35 -04002575 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002576 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
2577 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
2578 meta_size <<= ic->log2_buffer_sectors;
2579 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
2580 ic->initial_sectors + meta_size > ic->meta_device_sectors)
2581 return -EINVAL;
2582 ic->metadata_run = 1;
2583 ic->log2_metadata_run = 0;
2584 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01002585
2586 return 0;
2587}
2588
2589static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
2590{
2591 unsigned journal_sections;
2592 int test_bit;
2593
Mikulas Patocka56b67a42017-04-18 16:51:50 -04002594 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002595 memcpy(ic->sb->magic, SB_MAGIC, 8);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002596 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002597 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002598 if (ic->journal_mac_alg.alg_string)
2599 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
2600
2601 calculate_journal_section_size(ic);
2602 journal_sections = journal_sectors / ic->journal_section_sectors;
2603 if (!journal_sections)
2604 journal_sections = 1;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002605
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002606 if (!ic->meta_dev) {
2607 ic->sb->journal_sections = cpu_to_le32(journal_sections);
2608 if (!interleave_sectors)
2609 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2610 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
2611 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2612 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002613
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002614 ic->provided_data_sectors = 0;
2615 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
2616 __u64 prev_data_sectors = ic->provided_data_sectors;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002617
Mikulas Patocka356d9d52018-07-03 20:13:30 +02002618 ic->provided_data_sectors |= (sector_t)1 << test_bit;
2619 if (calculate_device_limits(ic))
2620 ic->provided_data_sectors = prev_data_sectors;
2621 }
2622 if (!ic->provided_data_sectors)
2623 return -EINVAL;
2624 } else {
2625 ic->sb->log2_interleave_sectors = 0;
2626 ic->provided_data_sectors = ic->data_device_sectors;
2627 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
2628
2629try_smaller_buffer:
2630 ic->sb->journal_sections = cpu_to_le32(0);
2631 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
2632 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
2633 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
2634 if (test_journal_sections > journal_sections)
2635 continue;
2636 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
2637 if (calculate_device_limits(ic))
2638 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
2639
2640 }
2641 if (!le32_to_cpu(ic->sb->journal_sections)) {
2642 if (ic->log2_buffer_sectors > 3) {
2643 ic->log2_buffer_sectors--;
2644 goto try_smaller_buffer;
2645 }
2646 return -EINVAL;
2647 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01002648 }
2649
Mikulas Patocka7eada902017-01-04 20:23:53 +01002650 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2651
Mikulas Patocka1f9fc0b2018-07-03 20:13:31 +02002652 sb_set_version(ic);
2653
Mikulas Patocka7eada902017-01-04 20:23:53 +01002654 return 0;
2655}
2656
2657static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2658{
2659 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
2660 struct blk_integrity bi;
2661
2662 memset(&bi, 0, sizeof(bi));
2663 bi.profile = &dm_integrity_profile;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04002664 bi.tuple_size = ic->tag_size;
2665 bi.tag_size = bi.tuple_size;
Mikulas Patocka84ff1bc2017-04-26 18:39:47 -04002666 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002667
2668 blk_integrity_register(disk, &bi);
2669 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2670}
2671
Mikulas Patocka7eada902017-01-04 20:23:53 +01002672static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
2673{
2674 unsigned i;
2675
2676 if (!pl)
2677 return;
2678 for (i = 0; i < ic->journal_pages; i++)
2679 if (pl[i].page)
2680 __free_page(pl[i].page);
2681 kvfree(pl);
2682}
2683
2684static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
2685{
2686 size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
2687 struct page_list *pl;
2688 unsigned i;
2689
Mikulas Patocka702a6202017-05-20 14:56:21 -04002690 pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002691 if (!pl)
2692 return NULL;
2693
2694 for (i = 0; i < ic->journal_pages; i++) {
2695 pl[i].page = alloc_page(GFP_KERNEL);
2696 if (!pl[i].page) {
2697 dm_integrity_free_page_list(ic, pl);
2698 return NULL;
2699 }
2700 if (i)
2701 pl[i - 1].next = &pl[i];
2702 }
2703
2704 return pl;
2705}
2706
2707static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
2708{
2709 unsigned i;
2710 for (i = 0; i < ic->journal_sections; i++)
2711 kvfree(sl[i]);
Mikulas Patockafc8cec12018-04-17 18:32:26 -04002712 kvfree(sl);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002713}
2714
2715static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
2716{
2717 struct scatterlist **sl;
2718 unsigned i;
2719
Kees Cook344476e2018-06-12 14:04:32 -07002720 sl = kvmalloc_array(ic->journal_sections,
2721 sizeof(struct scatterlist *),
2722 GFP_KERNEL | __GFP_ZERO);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002723 if (!sl)
2724 return NULL;
2725
2726 for (i = 0; i < ic->journal_sections; i++) {
2727 struct scatterlist *s;
2728 unsigned start_index, start_offset;
2729 unsigned end_index, end_offset;
2730 unsigned n_pages;
2731 unsigned idx;
2732
2733 page_list_location(ic, i, 0, &start_index, &start_offset);
2734 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
2735
2736 n_pages = (end_index - start_index + 1);
2737
Kees Cook344476e2018-06-12 14:04:32 -07002738 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
2739 GFP_KERNEL);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002740 if (!s) {
2741 dm_integrity_free_journal_scatterlist(ic, sl);
2742 return NULL;
2743 }
2744
2745 sg_init_table(s, n_pages);
2746 for (idx = start_index; idx <= end_index; idx++) {
2747 char *va = lowmem_page_address(pl[idx].page);
2748 unsigned start = 0, end = PAGE_SIZE;
2749 if (idx == start_index)
2750 start = start_offset;
2751 if (idx == end_index)
2752 end = end_offset + (1 << SECTOR_SHIFT);
2753 sg_set_buf(&s[idx - start_index], va + start, end - start);
2754 }
2755
2756 sl[i] = s;
2757 }
2758
2759 return sl;
2760}
2761
2762static void free_alg(struct alg_spec *a)
2763{
2764 kzfree(a->alg_string);
2765 kzfree(a->key);
2766 memset(a, 0, sizeof *a);
2767}
2768
2769static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
2770{
2771 char *k;
2772
2773 free_alg(a);
2774
2775 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
2776 if (!a->alg_string)
2777 goto nomem;
2778
2779 k = strchr(a->alg_string, ':');
2780 if (k) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01002781 *k = 0;
2782 a->key_string = k + 1;
2783 if (strlen(a->key_string) & 1)
2784 goto inval;
2785
2786 a->key_size = strlen(a->key_string) / 2;
2787 a->key = kmalloc(a->key_size, GFP_KERNEL);
2788 if (!a->key)
2789 goto nomem;
Mikulas Patocka6625d902017-04-27 11:49:33 -04002790 if (hex2bin(a->key, a->key_string, a->key_size))
2791 goto inval;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002792 }
2793
2794 return 0;
2795inval:
2796 *error = error_inval;
2797 return -EINVAL;
2798nomem:
2799 *error = "Out of memory for an argument";
2800 return -ENOMEM;
2801}
2802
2803static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
2804 char *error_alg, char *error_key)
2805{
2806 int r;
2807
2808 if (a->alg_string) {
Eric Biggers3d234b32018-11-14 12:21:11 -08002809 *hash = crypto_alloc_shash(a->alg_string, 0, 0);
Mikulas Patocka7eada902017-01-04 20:23:53 +01002810 if (IS_ERR(*hash)) {
2811 *error = error_alg;
2812 r = PTR_ERR(*hash);
2813 *hash = NULL;
2814 return r;
2815 }
2816
2817 if (a->key) {
2818 r = crypto_shash_setkey(*hash, a->key, a->key_size);
2819 if (r) {
2820 *error = error_key;
2821 return r;
2822 }
Milan Broze16b4f92018-02-13 14:50:50 +01002823 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
2824 *error = error_key;
2825 return -ENOKEY;
Mikulas Patocka7eada902017-01-04 20:23:53 +01002826 }
2827 }
2828
2829 return 0;
2830}
2831
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002832static int create_journal(struct dm_integrity_c *ic, char **error)
2833{
2834 int r = 0;
2835 unsigned i;
2836 __u64 journal_pages, journal_desc_size, journal_tree_size;
Mikulas Patocka717f4b12018-01-10 09:32:47 -05002837 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
2838 struct skcipher_request *req = NULL;
Mikulas Patocka56b67a42017-04-18 16:51:50 -04002839
2840 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2841 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
2842 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
2843 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002844
2845 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
2846 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
2847 journal_desc_size = journal_pages * sizeof(struct page_list);
Arun KSca79b0c2018-12-28 00:34:29 -08002848 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002849 *error = "Journal doesn't fit into memory";
2850 r = -ENOMEM;
2851 goto bad;
2852 }
2853 ic->journal_pages = journal_pages;
2854
2855 ic->journal = dm_integrity_alloc_page_list(ic);
2856 if (!ic->journal) {
2857 *error = "Could not allocate memory for journal";
2858 r = -ENOMEM;
2859 goto bad;
2860 }
2861 if (ic->journal_crypt_alg.alg_string) {
2862 unsigned ivsize, blocksize;
2863 struct journal_completion comp;
2864
2865 comp.ic = ic;
2866 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
2867 if (IS_ERR(ic->journal_crypt)) {
2868 *error = "Invalid journal cipher";
2869 r = PTR_ERR(ic->journal_crypt);
2870 ic->journal_crypt = NULL;
2871 goto bad;
2872 }
2873 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
2874 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
2875
2876 if (ic->journal_crypt_alg.key) {
2877 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
2878 ic->journal_crypt_alg.key_size);
2879 if (r) {
2880 *error = "Error setting encryption key";
2881 goto bad;
2882 }
2883 }
2884 DEBUG_print("cipher %s, block size %u iv size %u\n",
2885 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
2886
2887 ic->journal_io = dm_integrity_alloc_page_list(ic);
2888 if (!ic->journal_io) {
2889 *error = "Could not allocate memory for journal io";
2890 r = -ENOMEM;
2891 goto bad;
2892 }
2893
2894 if (blocksize == 1) {
2895 struct scatterlist *sg;
Mikulas Patocka717f4b12018-01-10 09:32:47 -05002896
2897 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2898 if (!req) {
2899 *error = "Could not allocate crypt request";
2900 r = -ENOMEM;
2901 goto bad;
2902 }
2903
2904 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2905 if (!crypt_iv) {
2906 *error = "Could not allocate iv";
2907 r = -ENOMEM;
2908 goto bad;
2909 }
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002910
2911 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2912 if (!ic->journal_xor) {
2913 *error = "Could not allocate memory for journal xor";
2914 r = -ENOMEM;
2915 goto bad;
2916 }
2917
Kees Cook344476e2018-06-12 14:04:32 -07002918 sg = kvmalloc_array(ic->journal_pages + 1,
2919 sizeof(struct scatterlist),
2920 GFP_KERNEL);
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002921 if (!sg) {
2922 *error = "Unable to allocate sg list";
2923 r = -ENOMEM;
2924 goto bad;
2925 }
2926 sg_init_table(sg, ic->journal_pages + 1);
2927 for (i = 0; i < ic->journal_pages; i++) {
2928 char *va = lowmem_page_address(ic->journal_xor[i].page);
2929 clear_page(va);
2930 sg_set_buf(&sg[i], va, PAGE_SIZE);
2931 }
2932 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
Mikulas Patocka717f4b12018-01-10 09:32:47 -05002933 memset(crypt_iv, 0x00, ivsize);
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002934
Mikulas Patocka717f4b12018-01-10 09:32:47 -05002935 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +02002936 init_completion(&comp.comp);
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002937 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2938 if (do_crypt(true, req, &comp))
2939 wait_for_completion(&comp.comp);
2940 kvfree(sg);
2941 r = dm_integrity_failed(ic);
2942 if (r) {
2943 *error = "Unable to encrypt journal";
2944 goto bad;
2945 }
2946 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
2947
2948 crypto_free_skcipher(ic->journal_crypt);
2949 ic->journal_crypt = NULL;
2950 } else {
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002951 unsigned crypt_len = roundup(ivsize, blocksize);
Mikulas Patocka56b67a42017-04-18 16:51:50 -04002952
Mikulas Patocka717f4b12018-01-10 09:32:47 -05002953 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2954 if (!req) {
2955 *error = "Could not allocate crypt request";
2956 r = -ENOMEM;
2957 goto bad;
2958 }
2959
2960 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2961 if (!crypt_iv) {
2962 *error = "Could not allocate iv";
2963 r = -ENOMEM;
2964 goto bad;
2965 }
2966
Mikulas Patocka56b67a42017-04-18 16:51:50 -04002967 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2968 if (!crypt_data) {
2969 *error = "Unable to allocate crypt data";
2970 r = -ENOMEM;
2971 goto bad;
2972 }
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002973
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002974 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2975 if (!ic->journal_scatterlist) {
2976 *error = "Unable to allocate sg list";
2977 r = -ENOMEM;
2978 goto bad;
2979 }
2980 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
2981 if (!ic->journal_io_scatterlist) {
2982 *error = "Unable to allocate sg list";
2983 r = -ENOMEM;
2984 goto bad;
2985 }
Kees Cook344476e2018-06-12 14:04:32 -07002986 ic->sk_requests = kvmalloc_array(ic->journal_sections,
2987 sizeof(struct skcipher_request *),
2988 GFP_KERNEL | __GFP_ZERO);
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04002989 if (!ic->sk_requests) {
2990 *error = "Unable to allocate sk requests";
2991 r = -ENOMEM;
2992 goto bad;
2993 }
2994 for (i = 0; i < ic->journal_sections; i++) {
2995 struct scatterlist sg;
2996 struct skcipher_request *section_req;
2997 __u32 section_le = cpu_to_le32(i);
2998
Mikulas Patocka717f4b12018-01-10 09:32:47 -05002999 memset(crypt_iv, 0x00, ivsize);
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04003000 memset(crypt_data, 0x00, crypt_len);
3001 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3002
3003 sg_init_one(&sg, crypt_data, crypt_len);
Mikulas Patocka717f4b12018-01-10 09:32:47 -05003004 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
Arnd Bergmannb5e8ad92017-08-15 17:11:59 +02003005 init_completion(&comp.comp);
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04003006 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3007 if (do_crypt(true, req, &comp))
3008 wait_for_completion(&comp.comp);
3009
3010 r = dm_integrity_failed(ic);
3011 if (r) {
3012 *error = "Unable to generate iv";
3013 goto bad;
3014 }
3015
3016 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3017 if (!section_req) {
3018 *error = "Unable to allocate crypt request";
3019 r = -ENOMEM;
3020 goto bad;
3021 }
Kees Cook6da2ec52018-06-12 13:55:00 -07003022 section_req->iv = kmalloc_array(ivsize, 2,
3023 GFP_KERNEL);
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04003024 if (!section_req->iv) {
3025 skcipher_request_free(section_req);
3026 *error = "Unable to allocate iv";
3027 r = -ENOMEM;
3028 goto bad;
3029 }
3030 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3031 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3032 ic->sk_requests[i] = section_req;
3033 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3034 }
3035 }
3036 }
3037
3038 for (i = 0; i < N_COMMIT_IDS; i++) {
3039 unsigned j;
3040retest_commit_id:
3041 for (j = 0; j < i; j++) {
3042 if (ic->commit_ids[j] == ic->commit_ids[i]) {
3043 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3044 goto retest_commit_id;
3045 }
3046 }
3047 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3048 }
3049
3050 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3051 if (journal_tree_size > ULONG_MAX) {
3052 *error = "Journal doesn't fit into memory";
3053 r = -ENOMEM;
3054 goto bad;
3055 }
Mikulas Patocka702a6202017-05-20 14:56:21 -04003056 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04003057 if (!ic->journal_tree) {
3058 *error = "Could not allocate memory for journal tree";
3059 r = -ENOMEM;
3060 }
3061bad:
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003062 kfree(crypt_data);
Mikulas Patocka717f4b12018-01-10 09:32:47 -05003063 kfree(crypt_iv);
3064 skcipher_request_free(req);
3065
Mike Snitzer1aa0efd2017-03-17 14:56:17 -04003066 return r;
3067}
3068
Mikulas Patocka7eada902017-01-04 20:23:53 +01003069/*
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003070 * Construct a integrity mapping
Mikulas Patocka7eada902017-01-04 20:23:53 +01003071 *
3072 * Arguments:
3073 * device
3074 * offset from the start of the device
3075 * tag size
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003076 * D - direct writes, J - journal writes, R - recovery mode
Mikulas Patocka7eada902017-01-04 20:23:53 +01003077 * number of optional arguments
3078 * optional arguments:
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003079 * journal_sectors
3080 * interleave_sectors
3081 * buffer_sectors
3082 * journal_watermark
3083 * commit_time
Mikulas Patocka88ad5d12019-04-29 14:57:23 +02003084 * meta_device
3085 * block_size
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003086 * internal_hash
3087 * journal_crypt
3088 * journal_mac
Mikulas Patocka88ad5d12019-04-29 14:57:23 +02003089 * recalculate
Mikulas Patocka7eada902017-01-04 20:23:53 +01003090 */
3091static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3092{
3093 struct dm_integrity_c *ic;
3094 char dummy;
3095 int r;
Mikulas Patocka7eada902017-01-04 20:23:53 +01003096 unsigned extra_args;
3097 struct dm_arg_set as;
Eric Biggers5916a222017-06-22 11:32:45 -07003098 static const struct dm_arg _args[] = {
Mikulas Patocka9d609f852017-04-18 16:51:52 -04003099 {0, 9, "Invalid number of feature args"},
Mikulas Patocka7eada902017-01-04 20:23:53 +01003100 };
3101 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02003102 bool recalculate;
Mikulas Patocka7eada902017-01-04 20:23:53 +01003103 bool should_write_sb;
Mikulas Patocka7eada902017-01-04 20:23:53 +01003104 __u64 threshold;
3105 unsigned long long start;
3106
3107#define DIRECT_ARGUMENTS 4
3108
3109 if (argc <= DIRECT_ARGUMENTS) {
3110 ti->error = "Invalid argument count";
3111 return -EINVAL;
3112 }
3113
3114 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3115 if (!ic) {
3116 ti->error = "Cannot allocate integrity context";
3117 return -ENOMEM;
3118 }
3119 ti->private = ic;
3120 ti->per_io_data_size = sizeof(struct dm_integrity_io);
3121
Mikulas Patocka7eada902017-01-04 20:23:53 +01003122 ic->in_progress = RB_ROOT;
Mikulas Patocka724376a2018-07-03 20:13:27 +02003123 INIT_LIST_HEAD(&ic->wait_list);
Mikulas Patocka7eada902017-01-04 20:23:53 +01003124 init_waitqueue_head(&ic->endio_wait);
3125 bio_list_init(&ic->flush_bio_list);
3126 init_waitqueue_head(&ic->copy_to_journal_wait);
3127 init_completion(&ic->crypto_backoff);
Mikulas Patocka3f2e5392017-07-21 12:00:00 -04003128 atomic64_set(&ic->number_of_mismatches, 0);
Mikulas Patocka7eada902017-01-04 20:23:53 +01003129
3130 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3131 if (r) {
3132 ti->error = "Device lookup failed";
3133 goto bad;
3134 }
3135
3136 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3137 ti->error = "Invalid starting offset";
3138 r = -EINVAL;
3139 goto bad;
3140 }
3141 ic->start = start;
3142
3143 if (strcmp(argv[2], "-")) {
3144 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3145 ti->error = "Invalid tag size";
3146 r = -EINVAL;
3147 goto bad;
3148 }
3149 }
3150
Mikulas Patockac2bcb2b2017-03-17 12:40:51 -04003151 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
Mikulas Patocka7eada902017-01-04 20:23:53 +01003152 ic->mode = argv[3][0];
3153 else {
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003154 ti->error = "Invalid mode (expecting J, D, R)";
Mikulas Patocka7eada902017-01-04 20:23:53 +01003155 r = -EINVAL;
3156 goto bad;
3157 }
3158
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003159 journal_sectors = 0;
Mikulas Patocka7eada902017-01-04 20:23:53 +01003160 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3161 buffer_sectors = DEFAULT_BUFFER_SECTORS;
3162 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3163 sync_msec = DEFAULT_SYNC_MSEC;
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02003164 recalculate = false;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04003165 ic->sectors_per_block = 1;
Mikulas Patocka7eada902017-01-04 20:23:53 +01003166
3167 as.argc = argc - DIRECT_ARGUMENTS;
3168 as.argv = argv + DIRECT_ARGUMENTS;
3169 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3170 if (r)
3171 goto bad;
3172
3173 while (extra_args--) {
3174 const char *opt_string;
3175 unsigned val;
3176 opt_string = dm_shift_arg(&as);
3177 if (!opt_string) {
3178 r = -EINVAL;
3179 ti->error = "Not enough feature arguments";
3180 goto bad;
3181 }
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003182 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003183 journal_sectors = val ? val : 1;
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003184 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
Mikulas Patocka7eada902017-01-04 20:23:53 +01003185 interleave_sectors = val;
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003186 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
Mikulas Patocka7eada902017-01-04 20:23:53 +01003187 buffer_sectors = val;
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003188 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
Mikulas Patocka7eada902017-01-04 20:23:53 +01003189 journal_watermark = val;
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003190 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
Mikulas Patocka7eada902017-01-04 20:23:53 +01003191 sync_msec = val;
Mikulas Patocka0d74e6a2019-03-13 07:56:02 -04003192 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003193 if (ic->meta_dev) {
3194 dm_put_device(ti, ic->meta_dev);
3195 ic->meta_dev = NULL;
3196 }
3197 r = dm_get_device(ti, strchr(opt_string, ':') + 1, dm_table_get_mode(ti->table), &ic->meta_dev);
3198 if (r) {
3199 ti->error = "Device lookup failed";
3200 goto bad;
3201 }
3202 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
Mikulas Patocka9d609f852017-04-18 16:51:52 -04003203 if (val < 1 << SECTOR_SHIFT ||
3204 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3205 (val & (val -1))) {
3206 r = -EINVAL;
3207 ti->error = "Invalid block_size argument";
3208 goto bad;
3209 }
3210 ic->sectors_per_block = val >> SECTOR_SHIFT;
Mikulas Patocka0d74e6a2019-03-13 07:56:02 -04003211 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01003212 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003213 "Invalid internal_hash argument");
Mikulas Patocka7eada902017-01-04 20:23:53 +01003214 if (r)
3215 goto bad;
Mikulas Patocka0d74e6a2019-03-13 07:56:02 -04003216 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01003217 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003218 "Invalid journal_crypt argument");
Mikulas Patocka7eada902017-01-04 20:23:53 +01003219 if (r)
3220 goto bad;
Mikulas Patocka0d74e6a2019-03-13 07:56:02 -04003221 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01003222 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003223 "Invalid journal_mac argument");
Mikulas Patocka7eada902017-01-04 20:23:53 +01003224 if (r)
3225 goto bad;
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02003226 } else if (!strcmp(opt_string, "recalculate")) {
3227 recalculate = true;
Mikulas Patocka7eada902017-01-04 20:23:53 +01003228 } else {
3229 r = -EINVAL;
3230 ti->error = "Invalid argument";
3231 goto bad;
3232 }
3233 }
3234
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003235 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3236 if (!ic->meta_dev)
3237 ic->meta_device_sectors = ic->data_device_sectors;
3238 else
3239 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3240
3241 if (!journal_sectors) {
3242 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3243 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3244 }
3245
3246 if (!buffer_sectors)
3247 buffer_sectors = 1;
3248 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3249
Mikulas Patocka7eada902017-01-04 20:23:53 +01003250 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3251 "Invalid internal hash", "Error setting internal hash key");
3252 if (r)
3253 goto bad;
3254
3255 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3256 "Invalid journal mac", "Error setting journal mac key");
3257 if (r)
3258 goto bad;
3259
3260 if (!ic->tag_size) {
3261 if (!ic->internal_hash) {
3262 ti->error = "Unknown tag size";
3263 r = -EINVAL;
3264 goto bad;
3265 }
3266 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3267 }
3268 if (ic->tag_size > MAX_TAG_SIZE) {
3269 ti->error = "Too big tag size";
3270 r = -EINVAL;
3271 goto bad;
3272 }
3273 if (!(ic->tag_size & (ic->tag_size - 1)))
3274 ic->log2_tag_size = __ffs(ic->tag_size);
3275 else
3276 ic->log2_tag_size = -1;
3277
3278 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3279 ic->autocommit_msec = sync_msec;
Kees Cook8376d3c2017-10-16 17:01:48 -07003280 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
Mikulas Patocka7eada902017-01-04 20:23:53 +01003281
3282 ic->io = dm_io_client_create();
3283 if (IS_ERR(ic->io)) {
3284 r = PTR_ERR(ic->io);
3285 ic->io = NULL;
3286 ti->error = "Cannot allocate dm io";
3287 goto bad;
3288 }
3289
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003290 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3291 if (r) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01003292 ti->error = "Cannot allocate mempool";
3293 goto bad;
3294 }
3295
3296 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3297 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3298 if (!ic->metadata_wq) {
3299 ti->error = "Cannot allocate workqueue";
3300 r = -ENOMEM;
3301 goto bad;
3302 }
3303
3304 /*
3305 * If this workqueue were percpu, it would cause bio reordering
3306 * and reduced performance.
3307 */
3308 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3309 if (!ic->wait_wq) {
3310 ti->error = "Cannot allocate workqueue";
3311 r = -ENOMEM;
3312 goto bad;
3313 }
3314
3315 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3316 if (!ic->commit_wq) {
3317 ti->error = "Cannot allocate workqueue";
3318 r = -ENOMEM;
3319 goto bad;
3320 }
3321 INIT_WORK(&ic->commit_work, integrity_commit);
3322
3323 if (ic->mode == 'J') {
3324 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
3325 if (!ic->writer_wq) {
3326 ti->error = "Cannot allocate workqueue";
3327 r = -ENOMEM;
3328 goto bad;
3329 }
3330 INIT_WORK(&ic->writer_work, integrity_writer);
3331 }
3332
3333 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
3334 if (!ic->sb) {
3335 r = -ENOMEM;
3336 ti->error = "Cannot allocate superblock area";
3337 goto bad;
3338 }
3339
3340 r = sync_rw_sb(ic, REQ_OP_READ, 0);
3341 if (r) {
3342 ti->error = "Error reading superblock";
3343 goto bad;
3344 }
Mikulas Patockac2bcb2b2017-03-17 12:40:51 -04003345 should_write_sb = false;
3346 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3347 if (ic->mode != 'R') {
Mikulas Patocka56b67a42017-04-18 16:51:50 -04003348 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3349 r = -EINVAL;
3350 ti->error = "The device is not initialized";
3351 goto bad;
Mikulas Patocka7eada902017-01-04 20:23:53 +01003352 }
3353 }
3354
3355 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3356 if (r) {
3357 ti->error = "Could not initialize superblock";
3358 goto bad;
3359 }
Mikulas Patockac2bcb2b2017-03-17 12:40:51 -04003360 if (ic->mode != 'R')
3361 should_write_sb = true;
Mikulas Patocka7eada902017-01-04 20:23:53 +01003362 }
3363
Mikulas Patocka1f9fc0b2018-07-03 20:13:31 +02003364 if (!ic->sb->version || ic->sb->version > SB_VERSION_2) {
Mikulas Patocka7eada902017-01-04 20:23:53 +01003365 r = -EINVAL;
3366 ti->error = "Unknown version";
3367 goto bad;
3368 }
3369 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3370 r = -EINVAL;
Mikulas Patocka9d609f852017-04-18 16:51:52 -04003371 ti->error = "Tag size doesn't match the information in superblock";
3372 goto bad;
3373 }
3374 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3375 r = -EINVAL;
3376 ti->error = "Block size doesn't match the information in superblock";
Mikulas Patocka7eada902017-01-04 20:23:53 +01003377 goto bad;
3378 }
Mikulas Patockabc86a412017-07-21 11:58:38 -04003379 if (!le32_to_cpu(ic->sb->journal_sections)) {
3380 r = -EINVAL;
3381 ti->error = "Corrupted superblock, journal_sections is 0";
3382 goto bad;
3383 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01003384 /* make sure that ti->max_io_len doesn't overflow */
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003385 if (!ic->meta_dev) {
3386 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3387 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
3388 r = -EINVAL;
3389 ti->error = "Invalid interleave_sectors in the superblock";
3390 goto bad;
3391 }
3392 } else {
3393 if (ic->sb->log2_interleave_sectors) {
3394 r = -EINVAL;
3395 ti->error = "Invalid interleave_sectors in the superblock";
3396 goto bad;
3397 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01003398 }
3399 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3400 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3401 /* test for overflow */
3402 r = -EINVAL;
3403 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3404 goto bad;
3405 }
3406 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3407 r = -EINVAL;
3408 ti->error = "Journal mac mismatch";
3409 goto bad;
3410 }
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003411
3412try_smaller_buffer:
Mikulas Patocka7eada902017-01-04 20:23:53 +01003413 r = calculate_device_limits(ic);
3414 if (r) {
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003415 if (ic->meta_dev) {
3416 if (ic->log2_buffer_sectors > 3) {
3417 ic->log2_buffer_sectors--;
3418 goto try_smaller_buffer;
3419 }
3420 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01003421 ti->error = "The device is too small";
3422 goto bad;
3423 }
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003424 if (!ic->meta_dev)
3425 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
3426
Ondrej Mosnáček2ad50602017-06-05 17:52:39 +02003427 if (ti->len > ic->provided_data_sectors) {
3428 r = -EINVAL;
3429 ti->error = "Not enough provided sectors for requested mapping size";
3430 goto bad;
3431 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01003432
Mikulas Patocka7eada902017-01-04 20:23:53 +01003433
3434 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3435 threshold += 50;
3436 do_div(threshold, 100);
3437 ic->free_sectors_threshold = threshold;
3438
3439 DEBUG_print("initialized:\n");
3440 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3441 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
3442 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3443 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
3444 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
3445 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3446 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
3447 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
Mikulas Patocka30bba432019-05-07 14:28:35 -04003448 DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors);
Mikulas Patocka7eada902017-01-04 20:23:53 +01003449 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
3450 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
3451 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
3452 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3453 (unsigned long long)ic->provided_data_sectors);
3454 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
3455
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02003456 if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
3457 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3458 ic->sb->recalc_sector = cpu_to_le64(0);
3459 }
3460
3461 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3462 if (!ic->internal_hash) {
3463 r = -EINVAL;
3464 ti->error = "Recalculate is only valid with internal hash";
3465 goto bad;
3466 }
Colin Ian Kinge8c25662018-11-28 15:15:31 +00003467 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02003468 if (!ic->recalc_wq ) {
3469 ti->error = "Cannot allocate workqueue";
3470 r = -ENOMEM;
3471 goto bad;
3472 }
3473 INIT_WORK(&ic->recalc_work, integrity_recalc);
3474 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
3475 if (!ic->recalc_buffer) {
3476 ti->error = "Cannot allocate buffer for recalculating";
3477 r = -ENOMEM;
3478 goto bad;
3479 }
Kees Cook329e0982018-10-05 16:21:46 -07003480 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
3481 ic->tag_size, GFP_KERNEL);
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02003482 if (!ic->recalc_tags) {
3483 ti->error = "Cannot allocate tags for recalculating";
3484 r = -ENOMEM;
3485 goto bad;
3486 }
3487 }
3488
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003489 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
3490 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
Mikulas Patocka7eada902017-01-04 20:23:53 +01003491 if (IS_ERR(ic->bufio)) {
3492 r = PTR_ERR(ic->bufio);
3493 ti->error = "Cannot initialize dm-bufio";
3494 ic->bufio = NULL;
3495 goto bad;
3496 }
3497 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
3498
Mikulas Patockac2bcb2b2017-03-17 12:40:51 -04003499 if (ic->mode != 'R') {
3500 r = create_journal(ic, &ti->error);
3501 if (r)
3502 goto bad;
3503 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01003504
3505 if (should_write_sb) {
3506 int r;
3507
3508 init_journal(ic, 0, ic->journal_sections, 0);
3509 r = dm_integrity_failed(ic);
3510 if (unlikely(r)) {
3511 ti->error = "Error initializing journal";
3512 goto bad;
3513 }
3514 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3515 if (r) {
3516 ti->error = "Error initializing superblock";
3517 goto bad;
3518 }
3519 ic->just_formatted = true;
3520 }
3521
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003522 if (!ic->meta_dev) {
3523 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
3524 if (r)
3525 goto bad;
3526 }
Mikulas Patocka7eada902017-01-04 20:23:53 +01003527
3528 if (!ic->internal_hash)
3529 dm_integrity_set(ti, ic);
3530
3531 ti->num_flush_bios = 1;
3532 ti->flush_supported = true;
3533
3534 return 0;
3535bad:
3536 dm_integrity_dtr(ti);
3537 return r;
3538}
3539
3540static void dm_integrity_dtr(struct dm_target *ti)
3541{
3542 struct dm_integrity_c *ic = ti->private;
3543
3544 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
Mikulas Patocka724376a2018-07-03 20:13:27 +02003545 BUG_ON(!list_empty(&ic->wait_list));
Mikulas Patocka7eada902017-01-04 20:23:53 +01003546
3547 if (ic->metadata_wq)
3548 destroy_workqueue(ic->metadata_wq);
3549 if (ic->wait_wq)
3550 destroy_workqueue(ic->wait_wq);
3551 if (ic->commit_wq)
3552 destroy_workqueue(ic->commit_wq);
3553 if (ic->writer_wq)
3554 destroy_workqueue(ic->writer_wq);
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02003555 if (ic->recalc_wq)
3556 destroy_workqueue(ic->recalc_wq);
Mikulas Patocka97abfde2019-04-29 14:57:17 +02003557 vfree(ic->recalc_buffer);
3558 kvfree(ic->recalc_tags);
Mikulas Patocka7eada902017-01-04 20:23:53 +01003559 if (ic->bufio)
3560 dm_bufio_client_destroy(ic->bufio);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003561 mempool_exit(&ic->journal_io_mempool);
Mikulas Patocka7eada902017-01-04 20:23:53 +01003562 if (ic->io)
3563 dm_io_client_destroy(ic->io);
3564 if (ic->dev)
3565 dm_put_device(ti, ic->dev);
Mikulas Patocka356d9d52018-07-03 20:13:30 +02003566 if (ic->meta_dev)
3567 dm_put_device(ti, ic->meta_dev);
Mikulas Patocka7eada902017-01-04 20:23:53 +01003568 dm_integrity_free_page_list(ic, ic->journal);
3569 dm_integrity_free_page_list(ic, ic->journal_io);
3570 dm_integrity_free_page_list(ic, ic->journal_xor);
3571 if (ic->journal_scatterlist)
3572 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
3573 if (ic->journal_io_scatterlist)
3574 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
3575 if (ic->sk_requests) {
3576 unsigned i;
3577
3578 for (i = 0; i < ic->journal_sections; i++) {
3579 struct skcipher_request *req = ic->sk_requests[i];
3580 if (req) {
3581 kzfree(req->iv);
3582 skcipher_request_free(req);
3583 }
3584 }
3585 kvfree(ic->sk_requests);
3586 }
3587 kvfree(ic->journal_tree);
3588 if (ic->sb)
3589 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
3590
3591 if (ic->internal_hash)
3592 crypto_free_shash(ic->internal_hash);
3593 free_alg(&ic->internal_hash_alg);
3594
3595 if (ic->journal_crypt)
3596 crypto_free_skcipher(ic->journal_crypt);
3597 free_alg(&ic->journal_crypt_alg);
3598
3599 if (ic->journal_mac)
3600 crypto_free_shash(ic->journal_mac);
3601 free_alg(&ic->journal_mac_alg);
3602
3603 kfree(ic);
3604}
3605
3606static struct target_type integrity_target = {
3607 .name = "integrity",
Mikulas Patockaa3fcf722018-07-03 20:13:33 +02003608 .version = {1, 2, 0},
Mikulas Patocka7eada902017-01-04 20:23:53 +01003609 .module = THIS_MODULE,
3610 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
3611 .ctr = dm_integrity_ctr,
3612 .dtr = dm_integrity_dtr,
3613 .map = dm_integrity_map,
3614 .postsuspend = dm_integrity_postsuspend,
3615 .resume = dm_integrity_resume,
3616 .status = dm_integrity_status,
3617 .iterate_devices = dm_integrity_iterate_devices,
Mikulas Patocka9d609f852017-04-18 16:51:52 -04003618 .io_hints = dm_integrity_io_hints,
Mikulas Patocka7eada902017-01-04 20:23:53 +01003619};
3620
YueHaibing5efedc92019-03-22 22:16:34 +08003621static int __init dm_integrity_init(void)
Mikulas Patocka7eada902017-01-04 20:23:53 +01003622{
3623 int r;
3624
3625 journal_io_cache = kmem_cache_create("integrity_journal_io",
3626 sizeof(struct journal_io), 0, 0, NULL);
3627 if (!journal_io_cache) {
3628 DMERR("can't allocate journal io cache");
3629 return -ENOMEM;
3630 }
3631
3632 r = dm_register_target(&integrity_target);
3633
3634 if (r < 0)
3635 DMERR("register failed %d", r);
3636
3637 return r;
3638}
3639
YueHaibing5efedc92019-03-22 22:16:34 +08003640static void __exit dm_integrity_exit(void)
Mikulas Patocka7eada902017-01-04 20:23:53 +01003641{
3642 dm_unregister_target(&integrity_target);
3643 kmem_cache_destroy(journal_io_cache);
3644}
3645
3646module_init(dm_integrity_init);
3647module_exit(dm_integrity_exit);
3648
3649MODULE_AUTHOR("Milan Broz");
3650MODULE_AUTHOR("Mikulas Patocka");
3651MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
3652MODULE_LICENSE("GPL");