blob: d85d13a4c57a43ec11f2167a6b0542d046260872 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/device-mapper.h>
Mikulas Patocka90fa1522009-01-06 03:04:54 +000011#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/kdev_t.h>
15#include <linux/list.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
vignesh babu6f3c3f02007-10-19 22:38:44 +010020#include <linux/log2.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010021#include <linux/dm-kcopyd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Mikulas Patockab735fed2015-02-26 11:40:35 -050023#include "dm.h"
24
Jonathan Brassowaea53d92009-01-06 03:05:15 +000025#include "dm-exception-store.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Alasdair G Kergon72d94862006-06-26 00:27:35 -070027#define DM_MSG_PREFIX "snapshots"
28
Mikulas Patockad698aa42009-12-10 23:52:30 +000029static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
30
31#define dm_target_is_snapshot_merge(ti) \
32 ((ti)->type->name == dm_snapshot_merge_target_name)
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
Mikulas Patockacd45daf2008-07-21 12:00:32 +010035 * The size of the mempool used to track chunks in use.
36 */
37#define MIN_IOS 256
38
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010039#define DM_TRACKED_CHUNK_HASH_SIZE 16
40#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
41 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
42
Jon Brassow191437a2009-12-10 23:52:10 +000043struct dm_exception_table {
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010044 uint32_t hash_mask;
45 unsigned hash_shift;
46 struct list_head *table;
47};
48
49struct dm_snapshot {
Mikulas Patocka0685a252017-11-23 16:15:43 -050050 struct mutex lock;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010051
52 struct dm_dev *origin;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +000053 struct dm_dev *cow;
54
55 struct dm_target *ti;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010056
57 /* List of snapshots per Origin */
58 struct list_head list;
59
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +000060 /*
61 * You can't use a snapshot if this is 0 (e.g. if full).
62 * A snapshot-merge target never clears this.
63 */
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010064 int valid;
65
Mikulas Patocka76c44f62015-06-21 16:31:33 -040066 /*
67 * The snapshot overflowed because of a write to the snapshot device.
68 * We don't have to invalidate the snapshot in this case, but we need
69 * to prevent further writes.
70 */
71 int snapshot_overflowed;
72
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010073 /* Origin writes don't trigger exceptions until this is set */
74 int active;
75
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010076 atomic_t pending_exceptions_count;
77
Mikulas Patocka230c83a2013-11-29 18:13:37 -050078 /* Protected by "lock" */
79 sector_t exception_start_sequence;
80
81 /* Protected by kcopyd single-threaded callback */
82 sector_t exception_complete_sequence;
83
84 /*
85 * A list of pending exceptions that completed out of order.
86 * Protected by kcopyd single-threaded callback.
87 */
88 struct list_head out_of_order_list;
89
Mike Snitzer924e6002010-03-06 02:32:33 +000090 mempool_t *pending_pool;
91
Jon Brassow191437a2009-12-10 23:52:10 +000092 struct dm_exception_table pending;
93 struct dm_exception_table complete;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010094
95 /*
96 * pe_lock protects all pending_exception operations and access
97 * as well as the snapshot_bios list.
98 */
99 spinlock_t pe_lock;
100
Mike Snitzer924e6002010-03-06 02:32:33 +0000101 /* Chunks with outstanding reads */
102 spinlock_t tracked_chunk_lock;
Mike Snitzer924e6002010-03-06 02:32:33 +0000103 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
104
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100105 /* The on disk metadata handler */
106 struct dm_exception_store *store;
107
Mikulas Patocka37524332019-10-02 06:15:53 -0400108 unsigned in_progress;
109 wait_queue_head_t in_progress_wait;
Nikos Tsironis69855b52018-10-31 17:53:08 -0400110
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100111 struct dm_kcopyd_client *kcopyd_client;
112
Mike Snitzer924e6002010-03-06 02:32:33 +0000113 /* Wait for events based on state_bits */
114 unsigned long state_bits;
115
116 /* Range of chunks currently being merged. */
117 chunk_t first_merging_chunk;
118 int num_merging_chunks;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000119
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +0000120 /*
121 * The merge operation failed if this flag is set.
122 * Failure modes are handled as follows:
123 * - I/O error reading the header
124 * => don't load the target; abort.
125 * - Header does not have "valid" flag set
126 * => use the origin; forget about the snapshot.
127 * - I/O error when reading exceptions
128 * => don't load the target; abort.
129 * (We can't use the intermediate origin state.)
130 * - I/O error while merging
131 * => stop merging; set merge_failed; process I/O normally.
132 */
133 int merge_failed;
134
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000135 /*
136 * Incoming bios that overlap with chunks being merged must wait
137 * for them to be committed.
138 */
139 struct bio_list bios_queued_during_merge;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100140};
141
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000142/*
143 * state_bits:
144 * RUNNING_MERGE - Merge operation is in progress.
145 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
146 * cleared afterwards.
147 */
148#define RUNNING_MERGE 0
149#define SHUTDOWN_MERGE 1
150
Nikos Tsironis69855b52018-10-31 17:53:08 -0400151/*
152 * Maximum number of chunks being copied on write.
153 *
154 * The value was decided experimentally as a trade-off between memory
155 * consumption, stalling the kernel's workqueues and maintaining a high enough
156 * throughput.
157 */
158#define DEFAULT_COW_THRESHOLD 2048
159
Mikulas Patocka37524332019-10-02 06:15:53 -0400160static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
161module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
Nikos Tsironis69855b52018-10-31 17:53:08 -0400162MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
163
Mikulas Patockadf5d2e92013-03-01 22:45:49 +0000164DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
165 "A percentage of time allocated for copy on write");
166
Mikulas Patockac2411042010-08-12 04:13:51 +0100167struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
168{
169 return s->origin;
170}
171EXPORT_SYMBOL(dm_snap_origin);
172
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000173struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
174{
175 return s->cow;
176}
177EXPORT_SYMBOL(dm_snap_cow);
178
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100179static sector_t chunk_to_sector(struct dm_exception_store *store,
180 chunk_t chunk)
181{
182 return chunk << store->chunk_shift;
183}
184
185static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
186{
187 /*
188 * There is only ever one instance of a particular block
189 * device so we can compare pointers safely.
190 */
191 return lhs == rhs;
192}
193
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100194struct dm_snap_pending_exception {
Jon Brassow1d4989c2009-12-10 23:52:10 +0000195 struct dm_exception e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /*
198 * Origin buffers waiting for this to complete are held
199 * in a bio list
200 */
201 struct bio_list origin_bios;
202 struct bio_list snapshot_bios;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 /* Pointer back to snapshot context */
205 struct dm_snapshot *snap;
206
207 /*
208 * 1 indicates the exception has already been sent to
209 * kcopyd.
210 */
211 int started;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +0100212
Mikulas Patocka230c83a2013-11-29 18:13:37 -0500213 /* There was copying error. */
214 int copy_error;
215
216 /* A sequence number, it is used for in-order completion. */
217 sector_t exception_sequence;
218
219 struct list_head out_of_order_entry;
220
Mikulas Patockaa6e50b42011-08-02 12:32:04 +0100221 /*
222 * For writing a complete chunk, bypassing the copy.
223 */
224 struct bio *full_bio;
225 bio_end_io_t *full_bio_end_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226};
227
228/*
229 * Hash table mapping origin volumes to lists of snapshots and
230 * a lock to protect it
231 */
Christoph Lametere18b8902006-12-06 20:33:20 -0800232static struct kmem_cache *exception_cache;
233static struct kmem_cache *pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100235struct dm_snap_tracked_chunk {
236 struct hlist_node node;
237 chunk_t chunk;
238};
239
Mikulas Patockaee180262012-12-21 20:23:41 +0000240static void init_tracked_chunk(struct bio *bio)
241{
242 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
243 INIT_HLIST_NODE(&c->node);
244}
245
246static bool is_bio_tracked(struct bio *bio)
247{
248 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
249 return !hlist_unhashed(&c->node);
250}
251
252static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100253{
Mikulas Patocka42bc9542012-12-21 20:23:38 +0000254 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100255
256 c->chunk = chunk;
257
Mikulas Patocka9aa0c0e2012-12-21 20:23:33 +0000258 spin_lock_irq(&s->tracked_chunk_lock);
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100259 hlist_add_head(&c->node,
260 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
Mikulas Patocka9aa0c0e2012-12-21 20:23:33 +0000261 spin_unlock_irq(&s->tracked_chunk_lock);
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100262}
263
Mikulas Patockaee180262012-12-21 20:23:41 +0000264static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100265{
Mikulas Patockaee180262012-12-21 20:23:41 +0000266 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100267 unsigned long flags;
268
269 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
270 hlist_del(&c->node);
271 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100272}
273
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100274static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
275{
276 struct dm_snap_tracked_chunk *c;
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100277 int found = 0;
278
279 spin_lock_irq(&s->tracked_chunk_lock);
280
Sasha Levinb67bfe02013-02-27 17:06:00 -0800281 hlist_for_each_entry(c,
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100282 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
283 if (c->chunk == chunk) {
284 found = 1;
285 break;
286 }
287 }
288
289 spin_unlock_irq(&s->tracked_chunk_lock);
290
291 return found;
292}
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294/*
Mike Snitzer615d1eb2009-12-10 23:52:29 +0000295 * This conflicting I/O is extremely improbable in the caller,
296 * so msleep(1) is sufficient and there is no need for a wait queue.
297 */
298static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
299{
300 while (__chunk_is_tracked(s, chunk))
301 msleep(1);
302}
303
304/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 * One of these per registered origin, held in the snapshot_origins hash
306 */
307struct origin {
308 /* The origin device */
309 struct block_device *bdev;
310
311 struct list_head hash_list;
312
313 /* List of snapshots for this origin */
314 struct list_head snapshots;
315};
316
317/*
Mikulas Patockab735fed2015-02-26 11:40:35 -0500318 * This structure is allocated for each origin target
319 */
320struct dm_origin {
321 struct dm_dev *dev;
322 struct dm_target *ti;
323 unsigned split_boundary;
324 struct list_head hash_list;
325};
326
327/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 * Size of the hash table for origin volumes. If we make this
329 * the size of the minors list then it should be nearly perfect
330 */
331#define ORIGIN_HASH_SIZE 256
332#define ORIGIN_MASK 0xFF
333static struct list_head *_origins;
Mikulas Patockab735fed2015-02-26 11:40:35 -0500334static struct list_head *_dm_origins;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335static struct rw_semaphore _origins_lock;
336
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000337static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
338static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
339static uint64_t _pending_exceptions_done_count;
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341static int init_origin_hash(void)
342{
343 int i;
344
345 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
346 GFP_KERNEL);
347 if (!_origins) {
Mikulas Patockab735fed2015-02-26 11:40:35 -0500348 DMERR("unable to allocate memory for _origins");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 return -ENOMEM;
350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
352 INIT_LIST_HEAD(_origins + i);
Mikulas Patockab735fed2015-02-26 11:40:35 -0500353
354 _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
355 GFP_KERNEL);
356 if (!_dm_origins) {
357 DMERR("unable to allocate memory for _dm_origins");
358 kfree(_origins);
359 return -ENOMEM;
360 }
361 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
362 INIT_LIST_HEAD(_dm_origins + i);
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 init_rwsem(&_origins_lock);
365
366 return 0;
367}
368
369static void exit_origin_hash(void)
370{
371 kfree(_origins);
Mikulas Patockab735fed2015-02-26 11:40:35 -0500372 kfree(_dm_origins);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
374
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100375static unsigned origin_hash(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
377 return bdev->bd_dev & ORIGIN_MASK;
378}
379
380static struct origin *__lookup_origin(struct block_device *origin)
381{
382 struct list_head *ol;
383 struct origin *o;
384
385 ol = &_origins[origin_hash(origin)];
386 list_for_each_entry (o, ol, hash_list)
387 if (bdev_equal(o->bdev, origin))
388 return o;
389
390 return NULL;
391}
392
393static void __insert_origin(struct origin *o)
394{
395 struct list_head *sl = &_origins[origin_hash(o->bdev)];
396 list_add_tail(&o->hash_list, sl);
397}
398
Mikulas Patockab735fed2015-02-26 11:40:35 -0500399static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
400{
401 struct list_head *ol;
402 struct dm_origin *o;
403
404 ol = &_dm_origins[origin_hash(origin)];
405 list_for_each_entry (o, ol, hash_list)
406 if (bdev_equal(o->dev->bdev, origin))
407 return o;
408
409 return NULL;
410}
411
412static void __insert_dm_origin(struct dm_origin *o)
413{
414 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
415 list_add_tail(&o->hash_list, sl);
416}
417
418static void __remove_dm_origin(struct dm_origin *o)
419{
420 list_del(&o->hash_list);
421}
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423/*
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000424 * _origins_lock must be held when calling this function.
425 * Returns number of snapshots registered using the supplied cow device, plus:
426 * snap_src - a snapshot suitable for use as a source of exception handover
427 * snap_dest - a snapshot capable of receiving exception handover.
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000428 * snap_merge - an existing snapshot-merge target linked to the same origin.
429 * There can be at most one snapshot-merge target. The parameter is optional.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000430 *
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000431 * Possible return values and states of snap_src and snap_dest.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000432 * 0: NULL, NULL - first new snapshot
433 * 1: snap_src, NULL - normal snapshot
434 * 2: snap_src, snap_dest - waiting for handover
435 * 2: snap_src, NULL - handed over, waiting for old to be deleted
436 * 1: NULL, snap_dest - source got destroyed without handover
437 */
438static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
439 struct dm_snapshot **snap_src,
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000440 struct dm_snapshot **snap_dest,
441 struct dm_snapshot **snap_merge)
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000442{
443 struct dm_snapshot *s;
444 struct origin *o;
445 int count = 0;
446 int active;
447
448 o = __lookup_origin(snap->origin->bdev);
449 if (!o)
450 goto out;
451
452 list_for_each_entry(s, &o->snapshots, list) {
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000453 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
454 *snap_merge = s;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000455 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
456 continue;
457
Mikulas Patocka0685a252017-11-23 16:15:43 -0500458 mutex_lock(&s->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000459 active = s->active;
Mikulas Patocka0685a252017-11-23 16:15:43 -0500460 mutex_unlock(&s->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000461
462 if (active) {
463 if (snap_src)
464 *snap_src = s;
465 } else if (snap_dest)
466 *snap_dest = s;
467
468 count++;
469 }
470
471out:
472 return count;
473}
474
475/*
476 * On success, returns 1 if this snapshot is a handover destination,
477 * otherwise returns 0.
478 */
479static int __validate_exception_handover(struct dm_snapshot *snap)
480{
481 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000482 struct dm_snapshot *snap_merge = NULL;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000483
484 /* Does snapshot need exceptions handed over to it? */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000485 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
486 &snap_merge) == 2) ||
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000487 snap_dest) {
488 snap->ti->error = "Snapshot cow pairing for exception "
489 "table handover failed";
490 return -EINVAL;
491 }
492
493 /*
494 * If no snap_src was found, snap cannot become a handover
495 * destination.
496 */
497 if (!snap_src)
498 return 0;
499
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000500 /*
501 * Non-snapshot-merge handover?
502 */
503 if (!dm_target_is_snapshot_merge(snap->ti))
504 return 1;
505
506 /*
507 * Do not allow more than one merging snapshot.
508 */
509 if (snap_merge) {
510 snap->ti->error = "A snapshot is already merging.";
511 return -EINVAL;
512 }
513
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000514 if (!snap_src->store->type->prepare_merge ||
515 !snap_src->store->type->commit_merge) {
516 snap->ti->error = "Snapshot exception store does not "
517 "support snapshot-merge.";
518 return -EINVAL;
519 }
520
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000521 return 1;
522}
523
524static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
525{
526 struct dm_snapshot *l;
527
528 /* Sort the list according to chunk size, largest-first smallest-last */
529 list_for_each_entry(l, &o->snapshots, list)
530 if (l->store->chunk_size < s->store->chunk_size)
531 break;
532 list_add_tail(&s->list, &l->list);
533}
534
535/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 * Make a note of the snapshot and its origin so we can look it
537 * up when the origin has a write on it.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000538 *
539 * Also validate snapshot exception store handovers.
540 * On success, returns 1 if this registration is a handover destination,
541 * otherwise returns 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 */
543static int register_snapshot(struct dm_snapshot *snap)
544{
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000545 struct origin *o, *new_o = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 struct block_device *bdev = snap->origin->bdev;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000547 int r = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000549 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
550 if (!new_o)
551 return -ENOMEM;
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 down_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000555 r = __validate_exception_handover(snap);
556 if (r < 0) {
557 kfree(new_o);
558 goto out;
559 }
560
561 o = __lookup_origin(bdev);
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000562 if (o)
563 kfree(new_o);
564 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 /* New origin */
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000566 o = new_o;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568 /* Initialise the struct */
569 INIT_LIST_HEAD(&o->snapshots);
570 o->bdev = bdev;
571
572 __insert_origin(o);
573 }
574
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000575 __insert_snapshot(o, snap);
576
577out:
578 up_write(&_origins_lock);
579
580 return r;
581}
582
583/*
584 * Move snapshot to correct place in list according to chunk size.
585 */
586static void reregister_snapshot(struct dm_snapshot *s)
587{
588 struct block_device *bdev = s->origin->bdev;
589
590 down_write(&_origins_lock);
591
592 list_del(&s->list);
593 __insert_snapshot(__lookup_origin(bdev), s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 up_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596}
597
598static void unregister_snapshot(struct dm_snapshot *s)
599{
600 struct origin *o;
601
602 down_write(&_origins_lock);
603 o = __lookup_origin(s->origin->bdev);
604
605 list_del(&s->list);
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000606 if (o && list_empty(&o->snapshots)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 list_del(&o->hash_list);
608 kfree(o);
609 }
610
611 up_write(&_origins_lock);
612}
613
614/*
615 * Implementation of the exception hash tables.
Milan Brozd74f81f2008-02-08 02:11:27 +0000616 * The lowest hash_shift bits of the chunk number are ignored, allowing
617 * some consecutive chunks to be grouped together.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000619static int dm_exception_table_init(struct dm_exception_table *et,
620 uint32_t size, unsigned hash_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
622 unsigned int i;
623
Milan Brozd74f81f2008-02-08 02:11:27 +0000624 et->hash_shift = hash_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 et->hash_mask = size - 1;
626 et->table = dm_vcalloc(size, sizeof(struct list_head));
627 if (!et->table)
628 return -ENOMEM;
629
630 for (i = 0; i < size; i++)
631 INIT_LIST_HEAD(et->table + i);
632
633 return 0;
634}
635
Jon Brassow3510cb92009-12-10 23:52:11 +0000636static void dm_exception_table_exit(struct dm_exception_table *et,
637 struct kmem_cache *mem)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638{
639 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000640 struct dm_exception *ex, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 int i, size;
642
643 size = et->hash_mask + 1;
644 for (i = 0; i < size; i++) {
645 slot = et->table + i;
646
647 list_for_each_entry_safe (ex, next, slot, hash_list)
648 kmem_cache_free(mem, ex);
649 }
650
651 vfree(et->table);
652}
653
Jon Brassow191437a2009-12-10 23:52:10 +0000654static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
Milan Brozd74f81f2008-02-08 02:11:27 +0000656 return (chunk >> et->hash_shift) & et->hash_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657}
658
Jon Brassow3510cb92009-12-10 23:52:11 +0000659static void dm_remove_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
661 list_del(&e->hash_list);
662}
663
664/*
665 * Return the exception data for a sector, or NULL if not
666 * remapped.
667 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000668static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
669 chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670{
671 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000672 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
674 slot = &et->table[exception_hash(et, chunk)];
675 list_for_each_entry (e, slot, hash_list)
Milan Brozd74f81f2008-02-08 02:11:27 +0000676 if (chunk >= e->old_chunk &&
677 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return e;
679
680 return NULL;
681}
682
Mikulas Patocka119bc542014-01-13 19:13:36 -0500683static struct dm_exception *alloc_completed_exception(gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
Jon Brassow1d4989c2009-12-10 23:52:10 +0000685 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Mikulas Patocka119bc542014-01-13 19:13:36 -0500687 e = kmem_cache_alloc(exception_cache, gfp);
688 if (!e && gfp == GFP_NOIO)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
690
691 return e;
692}
693
Jon Brassow3510cb92009-12-10 23:52:11 +0000694static void free_completed_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
696 kmem_cache_free(exception_cache, e);
697}
698
Mikulas Patocka92e86812008-07-21 12:00:35 +0100699static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
Mikulas Patocka92e86812008-07-21 12:00:35 +0100701 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
702 GFP_NOIO);
703
Mikulas Patocka879129d22008-10-30 13:33:16 +0000704 atomic_inc(&s->pending_exceptions_count);
Mikulas Patocka92e86812008-07-21 12:00:35 +0100705 pe->snap = s;
706
707 return pe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708}
709
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100710static void free_pending_exception(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
Mikulas Patocka879129d22008-10-30 13:33:16 +0000712 struct dm_snapshot *s = pe->snap;
713
714 mempool_free(pe, s->pending_pool);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100715 smp_mb__before_atomic();
Mikulas Patocka879129d22008-10-30 13:33:16 +0000716 atomic_dec(&s->pending_exceptions_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718
Jon Brassow3510cb92009-12-10 23:52:11 +0000719static void dm_insert_exception(struct dm_exception_table *eh,
720 struct dm_exception *new_e)
Milan Brozd74f81f2008-02-08 02:11:27 +0000721{
Milan Brozd74f81f2008-02-08 02:11:27 +0000722 struct list_head *l;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000723 struct dm_exception *e = NULL;
Milan Brozd74f81f2008-02-08 02:11:27 +0000724
725 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
726
727 /* Add immediately if this table doesn't support consecutive chunks */
728 if (!eh->hash_shift)
729 goto out;
730
731 /* List is ordered by old_chunk */
732 list_for_each_entry_reverse(e, l, hash_list) {
733 /* Insert after an existing chunk? */
734 if (new_e->old_chunk == (e->old_chunk +
735 dm_consecutive_chunk_count(e) + 1) &&
736 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
737 dm_consecutive_chunk_count(e) + 1)) {
738 dm_consecutive_chunk_count_inc(e);
Jon Brassow3510cb92009-12-10 23:52:11 +0000739 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000740 return;
741 }
742
743 /* Insert before an existing chunk? */
744 if (new_e->old_chunk == (e->old_chunk - 1) &&
745 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
746 dm_consecutive_chunk_count_inc(e);
747 e->old_chunk--;
748 e->new_chunk--;
Jon Brassow3510cb92009-12-10 23:52:11 +0000749 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000750 return;
751 }
752
753 if (new_e->old_chunk > e->old_chunk)
754 break;
755 }
756
757out:
758 list_add(&new_e->hash_list, e ? &e->hash_list : l);
759}
760
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000761/*
762 * Callback used by the exception stores to load exceptions when
763 * initialising.
764 */
765static int dm_add_exception(void *context, chunk_t old, chunk_t new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766{
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000767 struct dm_snapshot *s = context;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000768 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Mikulas Patocka119bc542014-01-13 19:13:36 -0500770 e = alloc_completed_exception(GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (!e)
772 return -ENOMEM;
773
774 e->old_chunk = old;
Milan Brozd74f81f2008-02-08 02:11:27 +0000775
776 /* Consecutive_count is implicitly initialised to zero */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 e->new_chunk = new;
Milan Brozd74f81f2008-02-08 02:11:27 +0000778
Jon Brassow3510cb92009-12-10 23:52:11 +0000779 dm_insert_exception(&s->complete, e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return 0;
782}
783
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000784/*
785 * Return a minimum chunk size of all snapshots that have the specified origin.
786 * Return zero if the origin has no snapshots.
787 */
Mike Snitzer542f9032012-07-27 15:08:00 +0100788static uint32_t __minimum_chunk_size(struct origin *o)
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000789{
790 struct dm_snapshot *snap;
Mikulas Patocka1f314bb2021-05-25 13:17:19 -0400791 unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000792
793 if (o)
794 list_for_each_entry(snap, &o->snapshots, list)
795 chunk_size = min_not_zero(chunk_size,
796 snap->store->chunk_size);
797
Mike Snitzer542f9032012-07-27 15:08:00 +0100798 return (uint32_t) chunk_size;
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000799}
800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801/*
802 * Hard coded magic.
803 */
804static int calc_max_buckets(void)
805{
806 /* use a fixed size of 2MB */
807 unsigned long mem = 2 * 1024 * 1024;
808 mem /= sizeof(struct list_head);
809
810 return mem;
811}
812
813/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 * Allocate room for a suitable hash table.
815 */
Jonathan Brassowfee19982009-04-02 19:55:34 +0100816static int init_hash_tables(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817{
Mikulas Patocka60e356f2013-09-18 19:40:42 -0400818 sector_t hash_size, cow_dev_size, max_buckets;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
820 /*
821 * Calculate based on the size of the original volume or
822 * the COW volume...
823 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000824 cow_dev_size = get_dev_size(s->cow->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 max_buckets = calc_max_buckets();
826
Mikulas Patocka60e356f2013-09-18 19:40:42 -0400827 hash_size = cow_dev_size >> s->store->chunk_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 hash_size = min(hash_size, max_buckets);
829
Mikulas Patocka8e87b9b2009-12-10 23:51:54 +0000830 if (hash_size < 64)
831 hash_size = 64;
Robert P. J. Day8defd832008-02-08 02:10:06 +0000832 hash_size = rounddown_pow_of_two(hash_size);
Jon Brassow3510cb92009-12-10 23:52:11 +0000833 if (dm_exception_table_init(&s->complete, hash_size,
834 DM_CHUNK_CONSECUTIVE_BITS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 return -ENOMEM;
836
837 /*
838 * Allocate hash table for in-flight exceptions
839 * Make this smaller than the real hash table
840 */
841 hash_size >>= 3;
842 if (hash_size < 64)
843 hash_size = 64;
844
Jon Brassow3510cb92009-12-10 23:52:11 +0000845 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
846 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return -ENOMEM;
848 }
849
850 return 0;
851}
852
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000853static void merge_shutdown(struct dm_snapshot *s)
854{
855 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100856 smp_mb__after_atomic();
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000857 wake_up_bit(&s->state_bits, RUNNING_MERGE);
858}
859
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000860static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
861{
862 s->first_merging_chunk = 0;
863 s->num_merging_chunks = 0;
864
865 return bio_list_get(&s->bios_queued_during_merge);
866}
867
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000868/*
869 * Remove one chunk from the index of completed exceptions.
870 */
871static int __remove_single_exception_chunk(struct dm_snapshot *s,
872 chunk_t old_chunk)
873{
874 struct dm_exception *e;
875
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000876 e = dm_lookup_exception(&s->complete, old_chunk);
877 if (!e) {
878 DMERR("Corruption detected: exception for block %llu is "
879 "on disk but not in memory",
880 (unsigned long long)old_chunk);
881 return -EINVAL;
882 }
883
884 /*
885 * If this is the only chunk using this exception, remove exception.
886 */
887 if (!dm_consecutive_chunk_count(e)) {
888 dm_remove_exception(e);
889 free_completed_exception(e);
890 return 0;
891 }
892
893 /*
894 * The chunk may be either at the beginning or the end of a
895 * group of consecutive chunks - never in the middle. We are
896 * removing chunks in the opposite order to that in which they
897 * were added, so this should always be true.
898 * Decrement the consecutive chunk counter and adjust the
899 * starting point if necessary.
900 */
901 if (old_chunk == e->old_chunk) {
902 e->old_chunk++;
903 e->new_chunk++;
904 } else if (old_chunk != e->old_chunk +
905 dm_consecutive_chunk_count(e)) {
906 DMERR("Attempt to merge block %llu from the "
907 "middle of a chunk range [%llu - %llu]",
908 (unsigned long long)old_chunk,
909 (unsigned long long)e->old_chunk,
910 (unsigned long long)
911 e->old_chunk + dm_consecutive_chunk_count(e));
912 return -EINVAL;
913 }
914
915 dm_consecutive_chunk_count_dec(e);
916
917 return 0;
918}
919
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000920static void flush_bios(struct bio *bio);
921
922static int remove_single_exception_chunk(struct dm_snapshot *s)
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000923{
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000924 struct bio *b = NULL;
925 int r;
926 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000927
Mikulas Patocka0685a252017-11-23 16:15:43 -0500928 mutex_lock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000929
930 /*
931 * Process chunks (and associated exceptions) in reverse order
932 * so that dm_consecutive_chunk_count_dec() accounting works.
933 */
934 do {
935 r = __remove_single_exception_chunk(s, old_chunk);
936 if (r)
937 goto out;
938 } while (old_chunk-- > s->first_merging_chunk);
939
940 b = __release_queued_bios_after_merge(s);
941
942out:
Mikulas Patocka0685a252017-11-23 16:15:43 -0500943 mutex_unlock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000944 if (b)
945 flush_bios(b);
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000946
947 return r;
948}
949
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000950static int origin_write_extent(struct dm_snapshot *merging_snap,
951 sector_t sector, unsigned chunk_size);
952
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000953static void merge_callback(int read_err, unsigned long write_err,
954 void *context);
955
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000956static uint64_t read_pending_exceptions_done_count(void)
957{
958 uint64_t pending_exceptions_done;
959
960 spin_lock(&_pending_exceptions_done_spinlock);
961 pending_exceptions_done = _pending_exceptions_done_count;
962 spin_unlock(&_pending_exceptions_done_spinlock);
963
964 return pending_exceptions_done;
965}
966
967static void increment_pending_exceptions_done_count(void)
968{
969 spin_lock(&_pending_exceptions_done_spinlock);
970 _pending_exceptions_done_count++;
971 spin_unlock(&_pending_exceptions_done_spinlock);
972
973 wake_up_all(&_pending_exceptions_done);
974}
975
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000976static void snapshot_merge_next_chunks(struct dm_snapshot *s)
977{
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000978 int i, linear_chunks;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000979 chunk_t old_chunk, new_chunk;
980 struct dm_io_region src, dest;
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000981 sector_t io_size;
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000982 uint64_t previous_count;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000983
984 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
985 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
986 goto shut;
987
988 /*
989 * valid flag never changes during merge, so no lock required.
990 */
991 if (!s->valid) {
992 DMERR("Snapshot is invalid: can't merge");
993 goto shut;
994 }
995
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000996 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
997 &new_chunk);
998 if (linear_chunks <= 0) {
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +0000999 if (linear_chunks < 0) {
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001000 DMERR("Read error in exception store: "
1001 "shutting down merge");
Mikulas Patocka0685a252017-11-23 16:15:43 -05001002 mutex_lock(&s->lock);
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001003 s->merge_failed = 1;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001004 mutex_unlock(&s->lock);
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001005 }
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001006 goto shut;
1007 }
1008
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001009 /* Adjust old_chunk and new_chunk to reflect start of linear region */
1010 old_chunk = old_chunk + 1 - linear_chunks;
1011 new_chunk = new_chunk + 1 - linear_chunks;
1012
1013 /*
1014 * Use one (potentially large) I/O to copy all 'linear_chunks'
1015 * from the exception store to the origin
1016 */
1017 io_size = linear_chunks * s->store->chunk_size;
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001018
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001019 dest.bdev = s->origin->bdev;
1020 dest.sector = chunk_to_sector(s->store, old_chunk);
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001021 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001022
1023 src.bdev = s->cow->bdev;
1024 src.sector = chunk_to_sector(s->store, new_chunk);
1025 src.count = dest.count;
1026
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001027 /*
1028 * Reallocate any exceptions needed in other snapshots then
1029 * wait for the pending exceptions to complete.
1030 * Each time any pending exception (globally on the system)
1031 * completes we are woken and repeat the process to find out
1032 * if we can proceed. While this may not seem a particularly
1033 * efficient algorithm, it is not expected to have any
1034 * significant impact on performance.
1035 */
1036 previous_count = read_pending_exceptions_done_count();
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001037 while (origin_write_extent(s, dest.sector, io_size)) {
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001038 wait_event(_pending_exceptions_done,
1039 (read_pending_exceptions_done_count() !=
1040 previous_count));
1041 /* Retry after the wait, until all exceptions are done. */
1042 previous_count = read_pending_exceptions_done_count();
1043 }
1044
Mikulas Patocka0685a252017-11-23 16:15:43 -05001045 mutex_lock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001046 s->first_merging_chunk = old_chunk;
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001047 s->num_merging_chunks = linear_chunks;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001048 mutex_unlock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001049
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001050 /* Wait until writes to all 'linear_chunks' drain */
1051 for (i = 0; i < linear_chunks; i++)
1052 __check_for_conflicting_io(s, old_chunk + i);
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001053
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001054 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
1055 return;
1056
1057shut:
1058 merge_shutdown(s);
1059}
1060
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001061static void error_bios(struct bio *bio);
1062
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001063static void merge_callback(int read_err, unsigned long write_err, void *context)
1064{
1065 struct dm_snapshot *s = context;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001066 struct bio *b = NULL;
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001067
1068 if (read_err || write_err) {
1069 if (read_err)
1070 DMERR("Read error: shutting down merge.");
1071 else
1072 DMERR("Write error: shutting down merge.");
1073 goto shut;
1074 }
1075
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001076 if (s->store->type->commit_merge(s->store,
1077 s->num_merging_chunks) < 0) {
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001078 DMERR("Write error in exception store: shutting down merge");
1079 goto shut;
1080 }
1081
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001082 if (remove_single_exception_chunk(s) < 0)
1083 goto shut;
1084
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001085 snapshot_merge_next_chunks(s);
1086
1087 return;
1088
1089shut:
Mikulas Patocka0685a252017-11-23 16:15:43 -05001090 mutex_lock(&s->lock);
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001091 s->merge_failed = 1;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001092 b = __release_queued_bios_after_merge(s);
Mikulas Patocka0685a252017-11-23 16:15:43 -05001093 mutex_unlock(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001094 error_bios(b);
1095
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001096 merge_shutdown(s);
1097}
1098
1099static void start_merge(struct dm_snapshot *s)
1100{
1101 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1102 snapshot_merge_next_chunks(s);
1103}
1104
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001105/*
1106 * Stop the merging process and wait until it finishes.
1107 */
1108static void stop_merge(struct dm_snapshot *s)
1109{
1110 set_bit(SHUTDOWN_MERGE, &s->state_bits);
NeilBrown74316202014-07-07 15:16:04 +10001111 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001112 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1113}
1114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115/*
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04001116 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 */
1118static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1119{
1120 struct dm_snapshot *s;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001121 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 int r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001123 char *origin_path, *cow_path;
DingXiang4df2bf42016-02-02 12:29:18 +08001124 dev_t origin_dev, cow_dev;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001125 unsigned args_used, num_flush_bios = 1;
Mike Snitzer10b81062009-12-10 23:52:31 +00001126 fmode_t origin_mode = FMODE_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
Mark McLoughlin4c7e3bf2006-10-03 01:15:25 -07001128 if (argc != 4) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001129 ti->error = "requires exactly 4 arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001131 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 }
1133
Mike Snitzer10b81062009-12-10 23:52:31 +00001134 if (dm_target_is_snapshot_merge(ti)) {
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001135 num_flush_bios = 2;
Mike Snitzer10b81062009-12-10 23:52:31 +00001136 origin_mode = FMODE_WRITE;
1137 }
1138
Kent Overstreet7ff8f212018-06-05 05:26:33 -04001139 s = kzalloc(sizeof(*s), GFP_KERNEL);
Jonathan Brassowfee19982009-04-02 19:55:34 +01001140 if (!s) {
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001141 ti->error = "Cannot allocate private snapshot structure";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 r = -ENOMEM;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001143 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 }
1145
Mikulas Patockac2411042010-08-12 04:13:51 +01001146 origin_path = argv[0];
1147 argv++;
1148 argc--;
1149
1150 r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1151 if (r) {
1152 ti->error = "Cannot get origin device";
1153 goto bad_origin;
1154 }
DingXiang4df2bf42016-02-02 12:29:18 +08001155 origin_dev = s->origin->bdev->bd_dev;
Mikulas Patockac2411042010-08-12 04:13:51 +01001156
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001157 cow_path = argv[0];
1158 argv++;
1159 argc--;
1160
DingXiang4df2bf42016-02-02 12:29:18 +08001161 cow_dev = dm_get_dev_t(cow_path);
1162 if (cow_dev && cow_dev == origin_dev) {
1163 ti->error = "COW device cannot be the same as origin device";
1164 r = -EINVAL;
1165 goto bad_cow;
1166 }
1167
Milan Broz024d37e2011-03-24 13:52:14 +00001168 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001169 if (r) {
1170 ti->error = "Cannot get COW device";
1171 goto bad_cow;
1172 }
1173
1174 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1175 if (r) {
1176 ti->error = "Couldn't create exception store";
1177 r = -EINVAL;
1178 goto bad_store;
1179 }
1180
1181 argv += args_used;
1182 argc -= args_used;
1183
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001184 s->ti = ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 s->valid = 1;
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001186 s->snapshot_overflowed = 0;
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001187 s->active = 0;
Mikulas Patocka879129d22008-10-30 13:33:16 +00001188 atomic_set(&s->pending_exceptions_count, 0);
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001189 s->exception_start_sequence = 0;
1190 s->exception_complete_sequence = 0;
1191 INIT_LIST_HEAD(&s->out_of_order_list);
Mikulas Patocka0685a252017-11-23 16:15:43 -05001192 mutex_init(&s->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001193 INIT_LIST_HEAD(&s->list);
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001194 spin_lock_init(&s->pe_lock);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001195 s->state_bits = 0;
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001196 s->merge_failed = 0;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001197 s->first_merging_chunk = 0;
1198 s->num_merging_chunks = 0;
1199 bio_list_init(&s->bios_queued_during_merge);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201 /* Allocate hash table for COW data */
Jonathan Brassowfee19982009-04-02 19:55:34 +01001202 if (init_hash_tables(s)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 ti->error = "Unable to allocate hash table space";
1204 r = -ENOMEM;
Jonathan Brassowfee19982009-04-02 19:55:34 +01001205 goto bad_hash_tables;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 }
1207
Mikulas Patocka37524332019-10-02 06:15:53 -04001208 init_waitqueue_head(&s->in_progress_wait);
Nikos Tsironis69855b52018-10-31 17:53:08 -04001209
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00001210 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001211 if (IS_ERR(s->kcopyd_client)) {
1212 r = PTR_ERR(s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 ti->error = "Could not create kcopyd client";
Jonathan Brassowfee19982009-04-02 19:55:34 +01001214 goto bad_kcopyd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 }
1216
Mikulas Patocka92e86812008-07-21 12:00:35 +01001217 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1218 if (!s->pending_pool) {
1219 ti->error = "Could not allocate mempool for pending exceptions";
Wei Yongjun09e8b812013-05-10 14:37:15 +01001220 r = -ENOMEM;
Jonathan Brassowfee19982009-04-02 19:55:34 +01001221 goto bad_pending_pool;
Mikulas Patocka92e86812008-07-21 12:00:35 +01001222 }
1223
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001224 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1225 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1226
1227 spin_lock_init(&s->tracked_chunk_lock);
1228
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001229 ti->private = s;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001230 ti->num_flush_bios = num_flush_bios;
Mike Snitzer30187e12016-01-31 13:28:26 -05001231 ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001232
1233 /* Add snapshot to the list of snapshots for this origin */
1234 /* Exceptions aren't triggered till snapshot_resume() is called */
1235 r = register_snapshot(s);
1236 if (r == -ENOMEM) {
1237 ti->error = "Snapshot origin struct allocation failed";
1238 goto bad_load_and_register;
1239 } else if (r < 0) {
1240 /* invalid handover, register_snapshot has set ti->error */
1241 goto bad_load_and_register;
1242 }
1243
1244 /*
1245 * Metadata must only be loaded into one table at once, so skip this
1246 * if metadata will be handed over during resume.
1247 * Chunk size will be set during the handover - set it to zero to
1248 * ensure it's ignored.
1249 */
1250 if (r > 0) {
1251 s->store->chunk_size = 0;
1252 return 0;
1253 }
1254
Jonathan Brassow493df712009-04-02 19:55:31 +01001255 r = s->store->type->read_metadata(s->store, dm_add_exception,
1256 (void *)s);
Milan Broz07641472007-07-12 17:28:13 +01001257 if (r < 0) {
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001258 ti->error = "Failed to read snapshot metadata";
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001259 goto bad_read_metadata;
Milan Broz07641472007-07-12 17:28:13 +01001260 } else if (r > 0) {
1261 s->valid = 0;
1262 DMWARN("Snapshot is marked invalid.");
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001263 }
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001264
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001265 if (!s->store->chunk_size) {
1266 ti->error = "Chunk size not set";
Mikulas Patocka5d8ec842021-05-10 14:49:05 -04001267 r = -EINVAL;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001268 goto bad_read_metadata;
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001269 }
Mike Snitzer542f9032012-07-27 15:08:00 +01001270
1271 r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1272 if (r)
1273 goto bad_read_metadata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
1275 return 0;
1276
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001277bad_read_metadata:
1278 unregister_snapshot(s);
1279
Jonathan Brassowfee19982009-04-02 19:55:34 +01001280bad_load_and_register:
Mikulas Patocka92e86812008-07-21 12:00:35 +01001281 mempool_destroy(s->pending_pool);
1282
Jonathan Brassowfee19982009-04-02 19:55:34 +01001283bad_pending_pool:
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001284 dm_kcopyd_client_destroy(s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
Jonathan Brassowfee19982009-04-02 19:55:34 +01001286bad_kcopyd:
Jon Brassow3510cb92009-12-10 23:52:11 +00001287 dm_exception_table_exit(&s->pending, pending_cache);
1288 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
Jonathan Brassowfee19982009-04-02 19:55:34 +01001290bad_hash_tables:
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001291 dm_exception_store_destroy(s->store);
1292
1293bad_store:
1294 dm_put_device(ti, s->cow);
1295
1296bad_cow:
Mikulas Patockac2411042010-08-12 04:13:51 +01001297 dm_put_device(ti, s->origin);
1298
1299bad_origin:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 kfree(s);
1301
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001302bad:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 return r;
1304}
1305
Milan Broz31c93a02006-12-08 02:41:11 -08001306static void __free_exceptions(struct dm_snapshot *s)
1307{
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001308 dm_kcopyd_client_destroy(s->kcopyd_client);
Milan Broz31c93a02006-12-08 02:41:11 -08001309 s->kcopyd_client = NULL;
1310
Jon Brassow3510cb92009-12-10 23:52:11 +00001311 dm_exception_table_exit(&s->pending, pending_cache);
1312 dm_exception_table_exit(&s->complete, exception_cache);
Milan Broz31c93a02006-12-08 02:41:11 -08001313}
1314
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001315static void __handover_exceptions(struct dm_snapshot *snap_src,
1316 struct dm_snapshot *snap_dest)
1317{
1318 union {
1319 struct dm_exception_table table_swap;
1320 struct dm_exception_store *store_swap;
1321 } u;
1322
1323 /*
1324 * Swap all snapshot context information between the two instances.
1325 */
1326 u.table_swap = snap_dest->complete;
1327 snap_dest->complete = snap_src->complete;
1328 snap_src->complete = u.table_swap;
1329
1330 u.store_swap = snap_dest->store;
1331 snap_dest->store = snap_src->store;
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04001332 snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001333 snap_src->store = u.store_swap;
1334
1335 snap_dest->store->snap = snap_dest;
1336 snap_src->store->snap = snap_src;
1337
Mike Snitzer542f9032012-07-27 15:08:00 +01001338 snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001339 snap_dest->valid = snap_src->valid;
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001340 snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001341
1342 /*
1343 * Set source invalid to ensure it receives no further I/O.
1344 */
1345 snap_src->valid = 0;
1346}
1347
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348static void snapshot_dtr(struct dm_target *ti)
1349{
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001350#ifdef CONFIG_DM_DEBUG
1351 int i;
1352#endif
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001353 struct dm_snapshot *s = ti->private;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001354 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001356 down_read(&_origins_lock);
1357 /* Check whether exception handover must be cancelled */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001358 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001359 if (snap_src && snap_dest && (s == snap_src)) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001360 mutex_lock(&snap_dest->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001361 snap_dest->valid = 0;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001362 mutex_unlock(&snap_dest->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001363 DMERR("Cancelling snapshot handover.");
1364 }
1365 up_read(&_origins_lock);
1366
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001367 if (dm_target_is_snapshot_merge(ti))
1368 stop_merge(s);
1369
Alasdair G Kergon138728d2006-03-27 01:17:50 -08001370 /* Prevent further origin writes from using this snapshot. */
1371 /* After this returns there can be no new kcopyd jobs. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 unregister_snapshot(s);
1373
Mikulas Patocka879129d22008-10-30 13:33:16 +00001374 while (atomic_read(&s->pending_exceptions_count))
Mikulas Patocka90fa1522009-01-06 03:04:54 +00001375 msleep(1);
Mikulas Patocka879129d22008-10-30 13:33:16 +00001376 /*
1377 * Ensure instructions in mempool_destroy aren't reordered
1378 * before atomic_read.
1379 */
1380 smp_mb();
1381
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001382#ifdef CONFIG_DM_DEBUG
1383 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1384 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1385#endif
1386
Milan Broz31c93a02006-12-08 02:41:11 -08001387 __free_exceptions(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Mikulas Patocka92e86812008-07-21 12:00:35 +01001389 mempool_destroy(s->pending_pool);
1390
Jonathan Brassowfee19982009-04-02 19:55:34 +01001391 dm_exception_store_destroy(s->store);
Alasdair G Kergon138728d2006-03-27 01:17:50 -08001392
Mikulas Patocka0685a252017-11-23 16:15:43 -05001393 mutex_destroy(&s->lock);
1394
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001395 dm_put_device(ti, s->cow);
1396
Mikulas Patockac2411042010-08-12 04:13:51 +01001397 dm_put_device(ti, s->origin);
1398
Mikulas Patocka37524332019-10-02 06:15:53 -04001399 WARN_ON(s->in_progress);
1400
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 kfree(s);
1402}
1403
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001404static void account_start_copy(struct dm_snapshot *s)
1405{
Mikulas Patocka37524332019-10-02 06:15:53 -04001406 spin_lock(&s->in_progress_wait.lock);
1407 s->in_progress++;
1408 spin_unlock(&s->in_progress_wait.lock);
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001409}
1410
1411static void account_end_copy(struct dm_snapshot *s)
1412{
Mikulas Patocka37524332019-10-02 06:15:53 -04001413 spin_lock(&s->in_progress_wait.lock);
1414 BUG_ON(!s->in_progress);
1415 s->in_progress--;
1416 if (likely(s->in_progress <= cow_threshold) &&
1417 unlikely(waitqueue_active(&s->in_progress_wait)))
1418 wake_up_locked(&s->in_progress_wait);
1419 spin_unlock(&s->in_progress_wait.lock);
1420}
1421
1422static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
1423{
1424 if (unlikely(s->in_progress > cow_threshold)) {
1425 spin_lock(&s->in_progress_wait.lock);
1426 if (likely(s->in_progress > cow_threshold)) {
1427 /*
1428 * NOTE: this throttle doesn't account for whether
1429 * the caller is servicing an IO that will trigger a COW
1430 * so excess throttling may result for chunks not required
1431 * to be COW'd. But if cow_threshold was reached, extra
1432 * throttling is unlikely to negatively impact performance.
1433 */
1434 DECLARE_WAITQUEUE(wait, current);
1435 __add_wait_queue(&s->in_progress_wait, &wait);
1436 __set_current_state(TASK_UNINTERRUPTIBLE);
1437 spin_unlock(&s->in_progress_wait.lock);
1438 if (unlock_origins)
1439 up_read(&_origins_lock);
1440 io_schedule();
1441 remove_wait_queue(&s->in_progress_wait, &wait);
1442 return false;
1443 }
1444 spin_unlock(&s->in_progress_wait.lock);
1445 }
1446 return true;
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001447}
1448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449/*
1450 * Flush a list of buffers.
1451 */
1452static void flush_bios(struct bio *bio)
1453{
1454 struct bio *n;
1455
1456 while (bio) {
1457 n = bio->bi_next;
1458 bio->bi_next = NULL;
1459 generic_make_request(bio);
1460 bio = n;
1461 }
1462}
1463
Mikulas Patocka37524332019-10-02 06:15:53 -04001464static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
Mikulas Patocka515ad662009-12-10 23:52:30 +00001465
1466/*
1467 * Flush a list of buffers.
1468 */
1469static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1470{
1471 struct bio *n;
1472 int r;
1473
1474 while (bio) {
1475 n = bio->bi_next;
1476 bio->bi_next = NULL;
Mikulas Patocka37524332019-10-02 06:15:53 -04001477 r = do_origin(s->origin, bio, false);
Mikulas Patocka515ad662009-12-10 23:52:30 +00001478 if (r == DM_MAPIO_REMAPPED)
1479 generic_make_request(bio);
1480 bio = n;
1481 }
1482}
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484/*
1485 * Error a list of buffers.
1486 */
1487static void error_bios(struct bio *bio)
1488{
1489 struct bio *n;
1490
1491 while (bio) {
1492 n = bio->bi_next;
1493 bio->bi_next = NULL;
NeilBrown6712ecf2007-09-27 12:47:43 +02001494 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 bio = n;
1496 }
1497}
1498
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001499static void __invalidate_snapshot(struct dm_snapshot *s, int err)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001500{
1501 if (!s->valid)
1502 return;
1503
1504 if (err == -EIO)
1505 DMERR("Invalidating snapshot: Error reading/writing.");
1506 else if (err == -ENOMEM)
1507 DMERR("Invalidating snapshot: Unable to allocate exception.");
1508
Jonathan Brassow493df712009-04-02 19:55:31 +01001509 if (s->store->type->drop_snapshot)
1510 s->store->type->drop_snapshot(s->store);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001511
1512 s->valid = 0;
1513
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001514 dm_table_event(s->ti->table);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001515}
1516
Mikulas Patocka385277b2016-01-08 19:07:55 -05001517static void pending_complete(void *context, int success)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518{
Mikulas Patocka385277b2016-01-08 19:07:55 -05001519 struct dm_snap_pending_exception *pe = context;
Jon Brassow1d4989c2009-12-10 23:52:10 +00001520 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 struct dm_snapshot *s = pe->snap;
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001522 struct bio *origin_bios = NULL;
1523 struct bio *snapshot_bios = NULL;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001524 struct bio *full_bio = NULL;
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001525 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001527 if (!success) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 /* Read/write error - snapshot is unusable */
Mikulas Patocka0685a252017-11-23 16:15:43 -05001529 mutex_lock(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001530 __invalidate_snapshot(s, -EIO);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001531 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001532 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 }
1534
Mikulas Patocka119bc542014-01-13 19:13:36 -05001535 e = alloc_completed_exception(GFP_NOIO);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001536 if (!e) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001537 mutex_lock(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001538 __invalidate_snapshot(s, -ENOMEM);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001539 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001540 goto out;
1541 }
1542 *e = pe->e;
1543
Mikulas Patocka0685a252017-11-23 16:15:43 -05001544 mutex_lock(&s->lock);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001545 if (!s->valid) {
Jon Brassow3510cb92009-12-10 23:52:11 +00001546 free_completed_exception(e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001547 error = 1;
1548 goto out;
1549 }
1550
Mike Snitzer615d1eb2009-12-10 23:52:29 +00001551 /* Check for conflicting reads */
1552 __check_for_conflicting_io(s, pe->e.old_chunk);
Mikulas Patockaa8d41b52008-07-21 12:00:34 +01001553
1554 /*
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001555 * Add a proper exception, and remove the
1556 * in-flight exception from the list.
1557 */
Jon Brassow3510cb92009-12-10 23:52:11 +00001558 dm_insert_exception(&s->complete, e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001559
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001560out:
Jon Brassow3510cb92009-12-10 23:52:11 +00001561 dm_remove_exception(&pe->e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001562 snapshot_bios = bio_list_get(&pe->snapshot_bios);
Mikulas Patocka515ad662009-12-10 23:52:30 +00001563 origin_bios = bio_list_get(&pe->origin_bios);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001564 full_bio = pe->full_bio;
Mikulas Patockafe3265b2015-11-25 16:03:31 -05001565 if (full_bio)
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001566 full_bio->bi_end_io = pe->full_bio_end_io;
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001567 increment_pending_exceptions_done_count();
1568
Mikulas Patocka0685a252017-11-23 16:15:43 -05001569 mutex_unlock(&s->lock);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001570
1571 /* Submit any pending write bios */
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001572 if (error) {
1573 if (full_bio)
1574 bio_io_error(full_bio);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001575 error_bios(snapshot_bios);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001576 } else {
1577 if (full_bio)
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001578 bio_endio(full_bio);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001579 flush_bios(snapshot_bios);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001580 }
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001581
Mikulas Patocka515ad662009-12-10 23:52:30 +00001582 retry_origin_bios(s, origin_bios);
Mikulas Patocka22aa66a2015-02-17 14:34:00 -05001583
1584 free_pending_exception(pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585}
1586
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001587static void complete_exception(struct dm_snap_pending_exception *pe)
1588{
1589 struct dm_snapshot *s = pe->snap;
1590
Mikulas Patocka385277b2016-01-08 19:07:55 -05001591 /* Update the metadata if we are persistent */
1592 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1593 pending_complete, pe);
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001594}
1595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596/*
1597 * Called when the copy I/O has finished. kcopyd actually runs
1598 * this code so don't block.
1599 */
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -07001600static void copy_callback(int read_err, unsigned long write_err, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001602 struct dm_snap_pending_exception *pe = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 struct dm_snapshot *s = pe->snap;
1604
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001605 pe->copy_error = read_err || write_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001607 if (pe->exception_sequence == s->exception_complete_sequence) {
1608 s->exception_complete_sequence++;
1609 complete_exception(pe);
1610
1611 while (!list_empty(&s->out_of_order_list)) {
1612 pe = list_entry(s->out_of_order_list.next,
1613 struct dm_snap_pending_exception, out_of_order_entry);
1614 if (pe->exception_sequence != s->exception_complete_sequence)
1615 break;
1616 s->exception_complete_sequence++;
1617 list_del(&pe->out_of_order_entry);
1618 complete_exception(pe);
1619 }
1620 } else {
1621 struct list_head *lh;
1622 struct dm_snap_pending_exception *pe2;
1623
1624 list_for_each_prev(lh, &s->out_of_order_list) {
1625 pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1626 if (pe2->exception_sequence < pe->exception_sequence)
1627 break;
1628 }
1629 list_add(&pe->out_of_order_entry, lh);
1630 }
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001631 account_end_copy(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632}
1633
1634/*
1635 * Dispatches the copy operation to kcopyd.
1636 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001637static void start_copy(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638{
1639 struct dm_snapshot *s = pe->snap;
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +01001640 struct dm_io_region src, dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 struct block_device *bdev = s->origin->bdev;
1642 sector_t dev_size;
1643
1644 dev_size = get_dev_size(bdev);
1645
1646 src.bdev = bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001647 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
Mikulas Patockadf96eee2009-10-16 23:18:17 +01001648 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001650 dest.bdev = s->cow->bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001651 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 dest.count = src.count;
1653
1654 /* Hand over to kcopyd */
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001655 account_start_copy(s);
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001656 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657}
1658
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001659static void full_bio_end_io(struct bio *bio)
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001660{
1661 void *callback_data = bio->bi_private;
1662
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001663 dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001664}
1665
1666static void start_full_bio(struct dm_snap_pending_exception *pe,
1667 struct bio *bio)
1668{
1669 struct dm_snapshot *s = pe->snap;
1670 void *callback_data;
1671
1672 pe->full_bio = bio;
1673 pe->full_bio_end_io = bio->bi_end_io;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001674
Mikulas Patocka5de1e3a2019-10-02 06:14:17 -04001675 account_start_copy(s);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001676 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1677 copy_callback, pe);
1678
1679 bio->bi_end_io = full_bio_end_io;
1680 bio->bi_private = callback_data;
1681
1682 generic_make_request(bio);
1683}
1684
Mikulas Patocka29138082009-04-02 19:55:25 +01001685static struct dm_snap_pending_exception *
1686__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1687{
Jon Brassow3510cb92009-12-10 23:52:11 +00001688 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001689
1690 if (!e)
1691 return NULL;
1692
1693 return container_of(e, struct dm_snap_pending_exception, e);
1694}
1695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696/*
1697 * Looks to see if this snapshot already has a pending exception
1698 * for this chunk, otherwise it allocates a new one and inserts
1699 * it into the pending table.
1700 *
1701 * NOTE: a write lock must be held on snap->lock before calling
1702 * this.
1703 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001704static struct dm_snap_pending_exception *
Mikulas Patockac6621392009-04-02 19:55:25 +01001705__find_pending_exception(struct dm_snapshot *s,
1706 struct dm_snap_pending_exception *pe, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707{
Mikulas Patockac6621392009-04-02 19:55:25 +01001708 struct dm_snap_pending_exception *pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001709
Mikulas Patocka29138082009-04-02 19:55:25 +01001710 pe2 = __lookup_pending_exception(s, chunk);
1711 if (pe2) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001712 free_pending_exception(pe);
Mikulas Patocka29138082009-04-02 19:55:25 +01001713 return pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001714 }
1715
1716 pe->e.old_chunk = chunk;
1717 bio_list_init(&pe->origin_bios);
1718 bio_list_init(&pe->snapshot_bios);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001719 pe->started = 0;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001720 pe->full_bio = NULL;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001721
Jonathan Brassow493df712009-04-02 19:55:31 +01001722 if (s->store->type->prepare_exception(s->store, &pe->e)) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001723 free_pending_exception(pe);
1724 return NULL;
1725 }
1726
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001727 pe->exception_sequence = s->exception_start_sequence++;
1728
Jon Brassow3510cb92009-12-10 23:52:11 +00001729 dm_insert_exception(&s->pending, &pe->e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001730
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 return pe;
1732}
1733
Jon Brassow1d4989c2009-12-10 23:52:10 +00001734static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
Milan Brozd74f81f2008-02-08 02:11:27 +00001735 struct bio *bio, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736{
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001737 bio->bi_bdev = s->cow->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001738 bio->bi_iter.bi_sector =
1739 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1740 (chunk - e->old_chunk)) +
1741 (bio->bi_iter.bi_sector & s->store->chunk_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742}
1743
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001744static int snapshot_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745{
Jon Brassow1d4989c2009-12-10 23:52:10 +00001746 struct dm_exception *e;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001747 struct dm_snapshot *s = ti->private;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001748 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 chunk_t chunk;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001750 struct dm_snap_pending_exception *pe = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Mikulas Patockaee180262012-12-21 20:23:41 +00001752 init_tracked_chunk(bio);
1753
Jens Axboe1eff9d32016-08-05 15:35:16 -06001754 if (bio->bi_opf & REQ_PREFLUSH) {
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001755 bio->bi_bdev = s->cow->bdev;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01001756 return DM_MAPIO_REMAPPED;
1757 }
1758
Kent Overstreet4f024f32013-10-11 15:44:27 -07001759 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
1761 /* Full snapshots are not usable */
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001762 /* To get here the table must be live so s->active is always set. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 if (!s->valid)
Alasdair G Kergonf6a80ea2005-07-12 15:53:01 -07001764 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
Mikulas Patocka37524332019-10-02 06:15:53 -04001766 if (bio_data_dir(bio) == WRITE) {
1767 while (unlikely(!wait_for_in_progress(s, false)))
1768 ; /* wait_for_in_progress() has slept */
1769 }
1770
Mikulas Patocka0685a252017-11-23 16:15:43 -05001771 mutex_lock(&s->lock);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001772
Christoph Hellwig70246282016-07-19 11:28:41 +02001773 if (!s->valid || (unlikely(s->snapshot_overflowed) &&
1774 bio_data_dir(bio) == WRITE)) {
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001775 r = -EIO;
1776 goto out_unlock;
1777 }
1778
1779 /* If the block is already remapped - use that, else remap it */
Jon Brassow3510cb92009-12-10 23:52:11 +00001780 e = dm_lookup_exception(&s->complete, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001781 if (e) {
Milan Brozd74f81f2008-02-08 02:11:27 +00001782 remap_exception(s, e, bio, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001783 goto out_unlock;
1784 }
1785
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 /*
1787 * Write to snapshot - higher level takes care of RW/RO
1788 * flags so we should only get this if we are
1789 * writeable.
1790 */
Christoph Hellwig70246282016-07-19 11:28:41 +02001791 if (bio_data_dir(bio) == WRITE) {
Mikulas Patocka29138082009-04-02 19:55:25 +01001792 pe = __lookup_pending_exception(s, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001793 if (!pe) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001794 mutex_unlock(&s->lock);
Mikulas Patockac6621392009-04-02 19:55:25 +01001795 pe = alloc_pending_exception(s);
Mikulas Patocka0685a252017-11-23 16:15:43 -05001796 mutex_lock(&s->lock);
Mikulas Patockac6621392009-04-02 19:55:25 +01001797
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001798 if (!s->valid || s->snapshot_overflowed) {
Mikulas Patockac6621392009-04-02 19:55:25 +01001799 free_pending_exception(pe);
1800 r = -EIO;
1801 goto out_unlock;
1802 }
1803
Jon Brassow3510cb92009-12-10 23:52:11 +00001804 e = dm_lookup_exception(&s->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01001805 if (e) {
1806 free_pending_exception(pe);
1807 remap_exception(s, e, bio, chunk);
1808 goto out_unlock;
1809 }
1810
Mikulas Patockac6621392009-04-02 19:55:25 +01001811 pe = __find_pending_exception(s, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001812 if (!pe) {
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04001813 if (s->store->userspace_supports_overflow) {
1814 s->snapshot_overflowed = 1;
1815 DMERR("Snapshot overflowed: Unable to allocate exception.");
1816 } else
1817 __invalidate_snapshot(s, -ENOMEM);
Mikulas Patocka29138082009-04-02 19:55:25 +01001818 r = -EIO;
1819 goto out_unlock;
1820 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001821 }
1822
Milan Brozd74f81f2008-02-08 02:11:27 +00001823 remap_exception(s, &pe->e, bio, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001824
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001825 r = DM_MAPIO_SUBMITTED;
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001826
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001827 if (!pe->started &&
Kent Overstreet4f024f32013-10-11 15:44:27 -07001828 bio->bi_iter.bi_size ==
1829 (s->store->chunk_size << SECTOR_SHIFT)) {
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001830 pe->started = 1;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001831 mutex_unlock(&s->lock);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001832 start_full_bio(pe, bio);
1833 goto out;
1834 }
1835
1836 bio_list_add(&pe->snapshot_bios, bio);
1837
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001838 if (!pe->started) {
1839 /* this is protected by snap->lock */
1840 pe->started = 1;
Mikulas Patocka0685a252017-11-23 16:15:43 -05001841 mutex_unlock(&s->lock);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001842 start_copy(pe);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001843 goto out;
1844 }
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001845 } else {
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001846 bio->bi_bdev = s->origin->bdev;
Mikulas Patockaee180262012-12-21 20:23:41 +00001847 track_chunk(s, bio, chunk);
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001848 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001850out_unlock:
Mikulas Patocka0685a252017-11-23 16:15:43 -05001851 mutex_unlock(&s->lock);
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001852out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 return r;
1854}
1855
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001856/*
1857 * A snapshot-merge target behaves like a combination of a snapshot
1858 * target and a snapshot-origin target. It only generates new
1859 * exceptions in other snapshots and not in the one that is being
1860 * merged.
1861 *
1862 * For each chunk, if there is an existing exception, it is used to
1863 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1864 * which in turn might generate exceptions in other snapshots.
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001865 * If merging is currently taking place on the chunk in question, the
1866 * I/O is deferred by adding it to s->bios_queued_during_merge.
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001867 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001868static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001869{
1870 struct dm_exception *e;
1871 struct dm_snapshot *s = ti->private;
1872 int r = DM_MAPIO_REMAPPED;
1873 chunk_t chunk;
1874
Mikulas Patockaee180262012-12-21 20:23:41 +00001875 init_tracked_chunk(bio);
1876
Jens Axboe1eff9d32016-08-05 15:35:16 -06001877 if (bio->bi_opf & REQ_PREFLUSH) {
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001878 if (!dm_bio_get_target_bio_nr(bio))
Mike Snitzer10b81062009-12-10 23:52:31 +00001879 bio->bi_bdev = s->origin->bdev;
1880 else
1881 bio->bi_bdev = s->cow->bdev;
Mike Snitzer10b81062009-12-10 23:52:31 +00001882 return DM_MAPIO_REMAPPED;
1883 }
1884
Kent Overstreet4f024f32013-10-11 15:44:27 -07001885 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001886
Mikulas Patocka0685a252017-11-23 16:15:43 -05001887 mutex_lock(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001888
Mikulas Patockad2fdb772009-12-10 23:52:36 +00001889 /* Full merging snapshots are redirected to the origin */
1890 if (!s->valid)
1891 goto redirect_to_origin;
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001892
1893 /* If the block is already remapped - use that */
1894 e = dm_lookup_exception(&s->complete, chunk);
1895 if (e) {
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001896 /* Queue writes overlapping with chunks being merged */
Christoph Hellwig70246282016-07-19 11:28:41 +02001897 if (bio_data_dir(bio) == WRITE &&
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001898 chunk >= s->first_merging_chunk &&
1899 chunk < (s->first_merging_chunk +
1900 s->num_merging_chunks)) {
1901 bio->bi_bdev = s->origin->bdev;
1902 bio_list_add(&s->bios_queued_during_merge, bio);
1903 r = DM_MAPIO_SUBMITTED;
1904 goto out_unlock;
1905 }
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001906
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001907 remap_exception(s, e, bio, chunk);
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001908
Christoph Hellwig70246282016-07-19 11:28:41 +02001909 if (bio_data_dir(bio) == WRITE)
Mikulas Patockaee180262012-12-21 20:23:41 +00001910 track_chunk(s, bio, chunk);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001911 goto out_unlock;
1912 }
1913
Mikulas Patockad2fdb772009-12-10 23:52:36 +00001914redirect_to_origin:
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001915 bio->bi_bdev = s->origin->bdev;
1916
Christoph Hellwig70246282016-07-19 11:28:41 +02001917 if (bio_data_dir(bio) == WRITE) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001918 mutex_unlock(&s->lock);
Mikulas Patocka37524332019-10-02 06:15:53 -04001919 return do_origin(s->origin, bio, false);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001920 }
1921
1922out_unlock:
Mikulas Patocka0685a252017-11-23 16:15:43 -05001923 mutex_unlock(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001924
1925 return r;
1926}
1927
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001928static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001929{
1930 struct dm_snapshot *s = ti->private;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001931
Mikulas Patockaee180262012-12-21 20:23:41 +00001932 if (is_bio_tracked(bio))
1933 stop_tracking_chunk(s, bio);
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001934
1935 return 0;
1936}
1937
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001938static void snapshot_merge_presuspend(struct dm_target *ti)
1939{
1940 struct dm_snapshot *s = ti->private;
1941
1942 stop_merge(s);
1943}
1944
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001945static int snapshot_preresume(struct dm_target *ti)
1946{
1947 int r = 0;
1948 struct dm_snapshot *s = ti->private;
1949 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1950
1951 down_read(&_origins_lock);
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001952 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001953 if (snap_src && snap_dest) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05001954 mutex_lock(&snap_src->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001955 if (s == snap_src) {
1956 DMERR("Unable to resume snapshot source until "
1957 "handover completes.");
1958 r = -EINVAL;
Mike Snitzerb83b2f22011-01-13 19:59:59 +00001959 } else if (!dm_suspended(snap_src->ti)) {
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001960 DMERR("Unable to perform snapshot handover until "
1961 "source is suspended.");
1962 r = -EINVAL;
1963 }
Mikulas Patocka0685a252017-11-23 16:15:43 -05001964 mutex_unlock(&snap_src->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001965 }
1966 up_read(&_origins_lock);
1967
1968 return r;
1969}
1970
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971static void snapshot_resume(struct dm_target *ti)
1972{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001973 struct dm_snapshot *s = ti->private;
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001974 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
Mikulas Patockab735fed2015-02-26 11:40:35 -05001975 struct dm_origin *o;
1976 struct mapped_device *origin_md = NULL;
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001977 bool must_restart_merging = false;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001978
1979 down_read(&_origins_lock);
Mikulas Patockab735fed2015-02-26 11:40:35 -05001980
1981 o = __lookup_dm_origin(s->origin->bdev);
1982 if (o)
1983 origin_md = dm_table_get_md(o->ti->table);
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001984 if (!origin_md) {
1985 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1986 if (snap_merging)
1987 origin_md = dm_table_get_md(snap_merging->ti->table);
1988 }
Mikulas Patockab735fed2015-02-26 11:40:35 -05001989 if (origin_md == dm_table_get_md(ti->table))
1990 origin_md = NULL;
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001991 if (origin_md) {
1992 if (dm_hold(origin_md))
1993 origin_md = NULL;
1994 }
Mikulas Patockab735fed2015-02-26 11:40:35 -05001995
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001996 up_read(&_origins_lock);
1997
1998 if (origin_md) {
Mikulas Patockab735fed2015-02-26 11:40:35 -05001999 dm_internal_suspend_fast(origin_md);
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05002000 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
2001 must_restart_merging = true;
2002 stop_merge(snap_merging);
2003 }
2004 }
2005
2006 down_read(&_origins_lock);
Mikulas Patockab735fed2015-02-26 11:40:35 -05002007
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00002008 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002009 if (snap_src && snap_dest) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05002010 mutex_lock(&snap_src->lock);
2011 mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002012 __handover_exceptions(snap_src, snap_dest);
Mikulas Patocka0685a252017-11-23 16:15:43 -05002013 mutex_unlock(&snap_dest->lock);
2014 mutex_unlock(&snap_src->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002015 }
Mikulas Patockab735fed2015-02-26 11:40:35 -05002016
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002017 up_read(&_origins_lock);
2018
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05002019 if (origin_md) {
2020 if (must_restart_merging)
2021 start_merge(snap_merging);
2022 dm_internal_resume_fast(origin_md);
2023 dm_put(origin_md);
2024 }
2025
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002026 /* Now we have correct chunk size, reregister */
2027 reregister_snapshot(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
Mikulas Patocka0685a252017-11-23 16:15:43 -05002029 mutex_lock(&s->lock);
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08002030 s->active = 1;
Mikulas Patocka0685a252017-11-23 16:15:43 -05002031 mutex_unlock(&s->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032}
2033
Mike Snitzer542f9032012-07-27 15:08:00 +01002034static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002035{
Mike Snitzer542f9032012-07-27 15:08:00 +01002036 uint32_t min_chunksize;
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002037
2038 down_read(&_origins_lock);
2039 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
2040 up_read(&_origins_lock);
2041
2042 return min_chunksize;
2043}
2044
2045static void snapshot_merge_resume(struct dm_target *ti)
2046{
2047 struct dm_snapshot *s = ti->private;
2048
2049 /*
2050 * Handover exceptions from existing snapshot.
2051 */
2052 snapshot_resume(ti);
2053
2054 /*
Mike Snitzer542f9032012-07-27 15:08:00 +01002055 * snapshot-merge acts as an origin, so set ti->max_io_len
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002056 */
Mike Snitzer542f9032012-07-27 15:08:00 +01002057 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002058
2059 start_merge(s);
2060}
2061
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002062static void snapshot_status(struct dm_target *ti, status_type_t type,
2063 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064{
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01002065 unsigned sz = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002066 struct dm_snapshot *snap = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
2068 switch (type) {
2069 case STATUSTYPE_INFO:
Mikulas Patocka94e76572009-12-10 23:51:53 +00002070
Mikulas Patocka0685a252017-11-23 16:15:43 -05002071 mutex_lock(&snap->lock);
Mikulas Patocka94e76572009-12-10 23:51:53 +00002072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 if (!snap->valid)
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01002074 DMEMIT("Invalid");
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00002075 else if (snap->merge_failed)
2076 DMEMIT("Merge failed");
Mikulas Patocka76c44f62015-06-21 16:31:33 -04002077 else if (snap->snapshot_overflowed)
2078 DMEMIT("Overflow");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 else {
Mike Snitzer985903b2009-12-10 23:52:11 +00002080 if (snap->store->type->usage) {
2081 sector_t total_sectors, sectors_allocated,
2082 metadata_sectors;
2083 snap->store->type->usage(snap->store,
2084 &total_sectors,
2085 &sectors_allocated,
2086 &metadata_sectors);
2087 DMEMIT("%llu/%llu %llu",
2088 (unsigned long long)sectors_allocated,
2089 (unsigned long long)total_sectors,
2090 (unsigned long long)metadata_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 }
2092 else
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01002093 DMEMIT("Unknown");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 }
Mikulas Patocka94e76572009-12-10 23:51:53 +00002095
Mikulas Patocka0685a252017-11-23 16:15:43 -05002096 mutex_unlock(&snap->lock);
Mikulas Patocka94e76572009-12-10 23:51:53 +00002097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 break;
2099
2100 case STATUSTYPE_TABLE:
2101 /*
2102 * kdevname returns a static pointer so we need
2103 * to make private copies if the output is to
2104 * make sense.
2105 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00002106 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
Jonathan Brassow1e302a92009-04-02 19:55:35 +01002107 snap->store->type->status(snap->store, type, result + sz,
2108 maxlen - sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 break;
2110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111}
2112
Mike Snitzer8811f462009-09-04 20:40:19 +01002113static int snapshot_iterate_devices(struct dm_target *ti,
2114 iterate_devices_callout_fn fn, void *data)
2115{
2116 struct dm_snapshot *snap = ti->private;
Mikulas Patocka1e5554c2010-08-12 04:13:50 +01002117 int r;
Mike Snitzer8811f462009-09-04 20:40:19 +01002118
Mikulas Patocka1e5554c2010-08-12 04:13:50 +01002119 r = fn(ti, snap->origin, 0, ti->len, data);
2120
2121 if (!r)
2122 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
2123
2124 return r;
Mike Snitzer8811f462009-09-04 20:40:19 +01002125}
2126
2127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128/*-----------------------------------------------------------------
2129 * Origin methods
2130 *---------------------------------------------------------------*/
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002131
2132/*
2133 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2134 * supplied bio was ignored. The caller may submit it immediately.
2135 * (No remapping actually occurs as the origin is always a direct linear
2136 * map.)
2137 *
2138 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2139 * and any supplied bio is added to a list to be submitted once all
2140 * the necessary exceptions exist.
2141 */
2142static int __origin_write(struct list_head *snapshots, sector_t sector,
2143 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144{
Mikulas Patocka515ad662009-12-10 23:52:30 +00002145 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 struct dm_snapshot *snap;
Jon Brassow1d4989c2009-12-10 23:52:10 +00002147 struct dm_exception *e;
Mikulas Patocka515ad662009-12-10 23:52:30 +00002148 struct dm_snap_pending_exception *pe;
2149 struct dm_snap_pending_exception *pe_to_start_now = NULL;
2150 struct dm_snap_pending_exception *pe_to_start_last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 chunk_t chunk;
2152
2153 /* Do all the snapshots on this origin */
2154 list_for_each_entry (snap, snapshots, list) {
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00002155 /*
2156 * Don't make new exceptions in a merging snapshot
2157 * because it has effectively been deleted
2158 */
2159 if (dm_target_is_snapshot_merge(snap->ti))
2160 continue;
2161
Mikulas Patocka0685a252017-11-23 16:15:43 -05002162 mutex_lock(&snap->lock);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002163
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08002164 /* Only deal with valid and active snapshots */
2165 if (!snap->valid || !snap->active)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002166 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Alasdair G Kergond5e404c2005-07-12 15:53:05 -07002168 /* Nothing to do if writing beyond end of snapshot */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002169 if (sector >= dm_table_get_size(snap->ti->table))
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002170 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
2172 /*
2173 * Remember, different snapshots can have
2174 * different chunk sizes.
2175 */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002176 chunk = sector_to_chunk(snap->store, sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
2178 /*
2179 * Check exception table to see if block
2180 * is already remapped in this snapshot
2181 * and trigger an exception if not.
2182 */
Jon Brassow3510cb92009-12-10 23:52:11 +00002183 e = dm_lookup_exception(&snap->complete, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002184 if (e)
2185 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Mikulas Patocka29138082009-04-02 19:55:25 +01002187 pe = __lookup_pending_exception(snap, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002188 if (!pe) {
Mikulas Patocka0685a252017-11-23 16:15:43 -05002189 mutex_unlock(&snap->lock);
Mikulas Patockac6621392009-04-02 19:55:25 +01002190 pe = alloc_pending_exception(snap);
Mikulas Patocka0685a252017-11-23 16:15:43 -05002191 mutex_lock(&snap->lock);
Mikulas Patockac6621392009-04-02 19:55:25 +01002192
2193 if (!snap->valid) {
2194 free_pending_exception(pe);
2195 goto next_snapshot;
2196 }
2197
Jon Brassow3510cb92009-12-10 23:52:11 +00002198 e = dm_lookup_exception(&snap->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01002199 if (e) {
2200 free_pending_exception(pe);
2201 goto next_snapshot;
2202 }
2203
Mikulas Patockac6621392009-04-02 19:55:25 +01002204 pe = __find_pending_exception(snap, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01002205 if (!pe) {
2206 __invalidate_snapshot(snap, -ENOMEM);
2207 goto next_snapshot;
2208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 }
2210
Mikulas Patocka515ad662009-12-10 23:52:30 +00002211 r = DM_MAPIO_SUBMITTED;
2212
2213 /*
2214 * If an origin bio was supplied, queue it to wait for the
2215 * completion of this exception, and start this one last,
2216 * at the end of the function.
2217 */
2218 if (bio) {
2219 bio_list_add(&pe->origin_bios, bio);
2220 bio = NULL;
2221
2222 if (!pe->started) {
2223 pe->started = 1;
2224 pe_to_start_last = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002225 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002226 }
2227
2228 if (!pe->started) {
2229 pe->started = 1;
Mikulas Patocka515ad662009-12-10 23:52:30 +00002230 pe_to_start_now = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002231 }
2232
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01002233next_snapshot:
Mikulas Patocka0685a252017-11-23 16:15:43 -05002234 mutex_unlock(&snap->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Mikulas Patocka515ad662009-12-10 23:52:30 +00002236 if (pe_to_start_now) {
2237 start_copy(pe_to_start_now);
2238 pe_to_start_now = NULL;
2239 }
Alasdair G Kergonb4b610f2006-03-27 01:17:44 -08002240 }
2241
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 /*
Mikulas Patocka515ad662009-12-10 23:52:30 +00002243 * Submit the exception against which the bio is queued last,
2244 * to give the other exceptions a head start.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 */
Mikulas Patocka515ad662009-12-10 23:52:30 +00002246 if (pe_to_start_last)
2247 start_copy(pe_to_start_last);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
2249 return r;
2250}
2251
2252/*
2253 * Called on a write from the origin driver.
2254 */
Mikulas Patocka37524332019-10-02 06:15:53 -04002255static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256{
2257 struct origin *o;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08002258 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
Mikulas Patocka37524332019-10-02 06:15:53 -04002260again:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 down_read(&_origins_lock);
2262 o = __lookup_origin(origin->bdev);
Mikulas Patocka37524332019-10-02 06:15:53 -04002263 if (o) {
2264 if (limit) {
2265 struct dm_snapshot *s;
2266 list_for_each_entry(s, &o->snapshots, list)
2267 if (unlikely(!wait_for_in_progress(s, true)))
2268 goto again;
2269 }
2270
Kent Overstreet4f024f32013-10-11 15:44:27 -07002271 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
Mikulas Patocka37524332019-10-02 06:15:53 -04002272 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 up_read(&_origins_lock);
2274
2275 return r;
2276}
2277
2278/*
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002279 * Trigger exceptions in all non-merging snapshots.
2280 *
2281 * The chunk size of the merging snapshot may be larger than the chunk
2282 * size of some other snapshot so we may need to reallocate multiple
2283 * chunks in other snapshots.
2284 *
2285 * We scan all the overlapping exceptions in the other snapshots.
2286 * Returns 1 if anything was reallocated and must be waited for,
2287 * otherwise returns 0.
2288 *
2289 * size must be a multiple of merging_snap's chunk_size.
2290 */
2291static int origin_write_extent(struct dm_snapshot *merging_snap,
2292 sector_t sector, unsigned size)
2293{
2294 int must_wait = 0;
2295 sector_t n;
2296 struct origin *o;
2297
2298 /*
Mike Snitzer542f9032012-07-27 15:08:00 +01002299 * The origin's __minimum_chunk_size() got stored in max_io_len
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002300 * by snapshot_merge_resume().
2301 */
2302 down_read(&_origins_lock);
2303 o = __lookup_origin(merging_snap->origin->bdev);
Mike Snitzer542f9032012-07-27 15:08:00 +01002304 for (n = 0; n < size; n += merging_snap->ti->max_io_len)
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002305 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2306 DM_MAPIO_SUBMITTED)
2307 must_wait = 1;
2308 up_read(&_origins_lock);
2309
2310 return must_wait;
2311}
2312
2313/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 * Origin: maps a linear range of a device, with hooks for snapshotting.
2315 */
2316
2317/*
2318 * Construct an origin mapping: <dev_path>
2319 * The context for an origin is merely a 'struct dm_dev *'
2320 * pointing to the real device.
2321 */
2322static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2323{
2324 int r;
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002325 struct dm_origin *o;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326
2327 if (argc != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002328 ti->error = "origin: incorrect number of arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 return -EINVAL;
2330 }
2331
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002332 o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2333 if (!o) {
2334 ti->error = "Cannot allocate private origin structure";
2335 r = -ENOMEM;
2336 goto bad_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 }
2338
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002339 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2340 if (r) {
2341 ti->error = "Cannot get target device";
2342 goto bad_open;
2343 }
2344
Mikulas Patockab735fed2015-02-26 11:40:35 -05002345 o->ti = ti;
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002346 ti->private = o;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002347 ti->num_flush_bios = 1;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01002348
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 return 0;
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002350
2351bad_open:
2352 kfree(o);
2353bad_alloc:
2354 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355}
2356
2357static void origin_dtr(struct dm_target *ti)
2358{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002359 struct dm_origin *o = ti->private;
Mikulas Patockab735fed2015-02-26 11:40:35 -05002360
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002361 dm_put_device(ti, o->dev);
2362 kfree(o);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363}
2364
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00002365static int origin_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002367 struct dm_origin *o = ti->private;
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002368 unsigned available_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002370 bio->bi_bdev = o->dev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
Jens Axboe1eff9d32016-08-05 15:35:16 -06002372 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01002373 return DM_MAPIO_REMAPPED;
2374
Christoph Hellwig70246282016-07-19 11:28:41 +02002375 if (bio_data_dir(bio) != WRITE)
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002376 return DM_MAPIO_REMAPPED;
2377
2378 available_sectors = o->split_boundary -
2379 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2380
2381 if (bio_sectors(bio) > available_sectors)
2382 dm_accept_partial_bio(bio, available_sectors);
2383
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 /* Only tell snapshots if this is a write */
Mikulas Patocka37524332019-10-02 06:15:53 -04002385 return do_origin(o->dev, bio, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386}
2387
Toshi Kanif6e629b2016-06-28 13:37:16 -06002388static long origin_direct_access(struct dm_target *ti, sector_t sector,
Linus Torvaldsf0c98eb2016-07-28 17:22:07 -07002389 void **kaddr, pfn_t *pfn, long size)
Toshi Kanif6e629b2016-06-28 13:37:16 -06002390{
2391 DMWARN("device does not support dax.");
2392 return -EIO;
2393}
2394
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395/*
Mike Snitzer542f9032012-07-27 15:08:00 +01002396 * Set the target "max_io_len" field to the minimum of all the snapshots'
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 * chunk sizes.
2398 */
2399static void origin_resume(struct dm_target *ti)
2400{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002401 struct dm_origin *o = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002403 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
Mikulas Patockab735fed2015-02-26 11:40:35 -05002404
2405 down_write(&_origins_lock);
2406 __insert_dm_origin(o);
2407 up_write(&_origins_lock);
2408}
2409
2410static void origin_postsuspend(struct dm_target *ti)
2411{
2412 struct dm_origin *o = ti->private;
2413
2414 down_write(&_origins_lock);
2415 __remove_dm_origin(o);
2416 up_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417}
2418
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002419static void origin_status(struct dm_target *ti, status_type_t type,
2420 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002422 struct dm_origin *o = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423
2424 switch (type) {
2425 case STATUSTYPE_INFO:
2426 result[0] = '\0';
2427 break;
2428
2429 case STATUSTYPE_TABLE:
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002430 snprintf(result, maxlen, "%s", o->dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 break;
2432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433}
2434
Mike Snitzer8811f462009-09-04 20:40:19 +01002435static int origin_iterate_devices(struct dm_target *ti,
2436 iterate_devices_callout_fn fn, void *data)
2437{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002438 struct dm_origin *o = ti->private;
Mike Snitzer8811f462009-09-04 20:40:19 +01002439
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002440 return fn(ti, o->dev, 0, ti->len, data);
Mike Snitzer8811f462009-09-04 20:40:19 +01002441}
2442
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443static struct target_type origin_target = {
2444 .name = "snapshot-origin",
Mikulas Patockab735fed2015-02-26 11:40:35 -05002445 .version = {1, 9, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 .module = THIS_MODULE,
2447 .ctr = origin_ctr,
2448 .dtr = origin_dtr,
2449 .map = origin_map,
2450 .resume = origin_resume,
Mikulas Patockab735fed2015-02-26 11:40:35 -05002451 .postsuspend = origin_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 .status = origin_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002453 .iterate_devices = origin_iterate_devices,
Toshi Kanif6e629b2016-06-28 13:37:16 -06002454 .direct_access = origin_direct_access,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455};
2456
2457static struct target_type snapshot_target = {
2458 .name = "snapshot",
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04002459 .version = {1, 15, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 .module = THIS_MODULE,
2461 .ctr = snapshot_ctr,
2462 .dtr = snapshot_dtr,
2463 .map = snapshot_map,
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002464 .end_io = snapshot_end_io,
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002465 .preresume = snapshot_preresume,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 .resume = snapshot_resume,
2467 .status = snapshot_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002468 .iterate_devices = snapshot_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469};
2470
Mikulas Patockad698aa42009-12-10 23:52:30 +00002471static struct target_type merge_target = {
2472 .name = dm_snapshot_merge_target_name,
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04002473 .version = {1, 4, 0},
Mikulas Patockad698aa42009-12-10 23:52:30 +00002474 .module = THIS_MODULE,
2475 .ctr = snapshot_ctr,
2476 .dtr = snapshot_dtr,
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00002477 .map = snapshot_merge_map,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002478 .end_io = snapshot_end_io,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002479 .presuspend = snapshot_merge_presuspend,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002480 .preresume = snapshot_preresume,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002481 .resume = snapshot_merge_resume,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002482 .status = snapshot_status,
2483 .iterate_devices = snapshot_iterate_devices,
2484};
2485
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486static int __init dm_snapshot_init(void)
2487{
2488 int r;
2489
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002490 r = dm_exception_store_init();
2491 if (r) {
2492 DMERR("Failed to initialize exception stores");
2493 return r;
2494 }
2495
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 r = dm_register_target(&snapshot_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002497 if (r < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 DMERR("snapshot target register failed %d", r);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002499 goto bad_register_snapshot_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 }
2501
2502 r = dm_register_target(&origin_target);
2503 if (r < 0) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002504 DMERR("Origin target register failed %d", r);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002505 goto bad_register_origin_target;
2506 }
2507
2508 r = dm_register_target(&merge_target);
2509 if (r < 0) {
2510 DMERR("Merge target register failed %d", r);
2511 goto bad_register_merge_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 }
2513
2514 r = init_origin_hash();
2515 if (r) {
2516 DMERR("init_origin_hash failed.");
Mikulas Patockad698aa42009-12-10 23:52:30 +00002517 goto bad_origin_hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 }
2519
Jon Brassow1d4989c2009-12-10 23:52:10 +00002520 exception_cache = KMEM_CACHE(dm_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 if (!exception_cache) {
2522 DMERR("Couldn't create exception cache.");
2523 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002524 goto bad_exception_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 }
2526
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002527 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 if (!pending_cache) {
2529 DMERR("Couldn't create pending cache.");
2530 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002531 goto bad_pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 }
2533
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 return 0;
2535
Mikulas Patockad698aa42009-12-10 23:52:30 +00002536bad_pending_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 kmem_cache_destroy(exception_cache);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002538bad_exception_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 exit_origin_hash();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002540bad_origin_hash:
2541 dm_unregister_target(&merge_target);
2542bad_register_merge_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002544bad_register_origin_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 dm_unregister_target(&snapshot_target);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002546bad_register_snapshot_target:
2547 dm_exception_store_exit();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002548
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 return r;
2550}
2551
2552static void __exit dm_snapshot_exit(void)
2553{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00002554 dm_unregister_target(&snapshot_target);
2555 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002556 dm_unregister_target(&merge_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557
2558 exit_origin_hash();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 kmem_cache_destroy(pending_cache);
2560 kmem_cache_destroy(exception_cache);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002561
2562 dm_exception_store_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563}
2564
2565/* Module hooks */
2566module_init(dm_snapshot_init);
2567module_exit(dm_snapshot_exit);
2568
2569MODULE_DESCRIPTION(DM_NAME " snapshot target");
2570MODULE_AUTHOR("Joe Thornber");
2571MODULE_LICENSE("GPL");
Mikulas Patocka23cb2102013-03-01 22:45:47 +00002572MODULE_ALIAS("dm-snapshot-origin");
2573MODULE_ALIAS("dm-snapshot-merge");