blob: 3766386080a48fbfb06226ae80646b5e2f0e653a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/device-mapper.h>
Mikulas Patocka90fa1522009-01-06 03:04:54 +000011#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/kdev_t.h>
15#include <linux/list.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
vignesh babu6f3c3f02007-10-19 22:38:44 +010020#include <linux/log2.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010021#include <linux/dm-kcopyd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Mikulas Patockab735fed2015-02-26 11:40:35 -050023#include "dm.h"
24
Jonathan Brassowaea53d92009-01-06 03:05:15 +000025#include "dm-exception-store.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Alasdair G Kergon72d94862006-06-26 00:27:35 -070027#define DM_MSG_PREFIX "snapshots"
28
Mikulas Patockad698aa42009-12-10 23:52:30 +000029static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
30
31#define dm_target_is_snapshot_merge(ti) \
32 ((ti)->type->name == dm_snapshot_merge_target_name)
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
Mikulas Patockacd45daf2008-07-21 12:00:32 +010035 * The size of the mempool used to track chunks in use.
36 */
37#define MIN_IOS 256
38
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010039#define DM_TRACKED_CHUNK_HASH_SIZE 16
40#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
41 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
42
Jon Brassow191437a2009-12-10 23:52:10 +000043struct dm_exception_table {
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010044 uint32_t hash_mask;
45 unsigned hash_shift;
46 struct list_head *table;
47};
48
49struct dm_snapshot {
50 struct rw_semaphore lock;
51
52 struct dm_dev *origin;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +000053 struct dm_dev *cow;
54
55 struct dm_target *ti;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010056
57 /* List of snapshots per Origin */
58 struct list_head list;
59
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +000060 /*
61 * You can't use a snapshot if this is 0 (e.g. if full).
62 * A snapshot-merge target never clears this.
63 */
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010064 int valid;
65
Mikulas Patocka76c44f62015-06-21 16:31:33 -040066 /*
67 * The snapshot overflowed because of a write to the snapshot device.
68 * We don't have to invalidate the snapshot in this case, but we need
69 * to prevent further writes.
70 */
71 int snapshot_overflowed;
72
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010073 /* Origin writes don't trigger exceptions until this is set */
74 int active;
75
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010076 atomic_t pending_exceptions_count;
77
Mikulas Patocka230c83a2013-11-29 18:13:37 -050078 /* Protected by "lock" */
79 sector_t exception_start_sequence;
80
81 /* Protected by kcopyd single-threaded callback */
82 sector_t exception_complete_sequence;
83
84 /*
85 * A list of pending exceptions that completed out of order.
86 * Protected by kcopyd single-threaded callback.
87 */
88 struct list_head out_of_order_list;
89
Mike Snitzer924e6002010-03-06 02:32:33 +000090 mempool_t *pending_pool;
91
Jon Brassow191437a2009-12-10 23:52:10 +000092 struct dm_exception_table pending;
93 struct dm_exception_table complete;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +010094
95 /*
96 * pe_lock protects all pending_exception operations and access
97 * as well as the snapshot_bios list.
98 */
99 spinlock_t pe_lock;
100
Mike Snitzer924e6002010-03-06 02:32:33 +0000101 /* Chunks with outstanding reads */
102 spinlock_t tracked_chunk_lock;
Mike Snitzer924e6002010-03-06 02:32:33 +0000103 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
104
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100105 /* The on disk metadata handler */
106 struct dm_exception_store *store;
107
108 struct dm_kcopyd_client *kcopyd_client;
109
Mike Snitzer924e6002010-03-06 02:32:33 +0000110 /* Wait for events based on state_bits */
111 unsigned long state_bits;
112
113 /* Range of chunks currently being merged. */
114 chunk_t first_merging_chunk;
115 int num_merging_chunks;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000116
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +0000117 /*
118 * The merge operation failed if this flag is set.
119 * Failure modes are handled as follows:
120 * - I/O error reading the header
121 * => don't load the target; abort.
122 * - Header does not have "valid" flag set
123 * => use the origin; forget about the snapshot.
124 * - I/O error when reading exceptions
125 * => don't load the target; abort.
126 * (We can't use the intermediate origin state.)
127 * - I/O error while merging
128 * => stop merging; set merge_failed; process I/O normally.
129 */
130 int merge_failed;
131
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000132 /*
133 * Incoming bios that overlap with chunks being merged must wait
134 * for them to be committed.
135 */
136 struct bio_list bios_queued_during_merge;
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100137};
138
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000139/*
140 * state_bits:
141 * RUNNING_MERGE - Merge operation is in progress.
142 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
143 * cleared afterwards.
144 */
145#define RUNNING_MERGE 0
146#define SHUTDOWN_MERGE 1
147
Mikulas Patockadf5d2e92013-03-01 22:45:49 +0000148DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
149 "A percentage of time allocated for copy on write");
150
Mikulas Patockac2411042010-08-12 04:13:51 +0100151struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
152{
153 return s->origin;
154}
155EXPORT_SYMBOL(dm_snap_origin);
156
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000157struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
158{
159 return s->cow;
160}
161EXPORT_SYMBOL(dm_snap_cow);
162
Jonathan Brassowccc45ea2009-04-02 19:55:34 +0100163static sector_t chunk_to_sector(struct dm_exception_store *store,
164 chunk_t chunk)
165{
166 return chunk << store->chunk_shift;
167}
168
169static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
170{
171 /*
172 * There is only ever one instance of a particular block
173 * device so we can compare pointers safely.
174 */
175 return lhs == rhs;
176}
177
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100178struct dm_snap_pending_exception {
Jon Brassow1d4989c2009-12-10 23:52:10 +0000179 struct dm_exception e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
181 /*
182 * Origin buffers waiting for this to complete are held
183 * in a bio list
184 */
185 struct bio_list origin_bios;
186 struct bio_list snapshot_bios;
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 /* Pointer back to snapshot context */
189 struct dm_snapshot *snap;
190
191 /*
192 * 1 indicates the exception has already been sent to
193 * kcopyd.
194 */
195 int started;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +0100196
Mikulas Patocka230c83a2013-11-29 18:13:37 -0500197 /* There was copying error. */
198 int copy_error;
199
200 /* A sequence number, it is used for in-order completion. */
201 sector_t exception_sequence;
202
203 struct list_head out_of_order_entry;
204
Mikulas Patockaa6e50b42011-08-02 12:32:04 +0100205 /*
206 * For writing a complete chunk, bypassing the copy.
207 */
208 struct bio *full_bio;
209 bio_end_io_t *full_bio_end_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210};
211
212/*
213 * Hash table mapping origin volumes to lists of snapshots and
214 * a lock to protect it
215 */
Christoph Lametere18b8902006-12-06 20:33:20 -0800216static struct kmem_cache *exception_cache;
217static struct kmem_cache *pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100219struct dm_snap_tracked_chunk {
220 struct hlist_node node;
221 chunk_t chunk;
222};
223
Mikulas Patockaee180262012-12-21 20:23:41 +0000224static void init_tracked_chunk(struct bio *bio)
225{
226 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
227 INIT_HLIST_NODE(&c->node);
228}
229
230static bool is_bio_tracked(struct bio *bio)
231{
232 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
233 return !hlist_unhashed(&c->node);
234}
235
236static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100237{
Mikulas Patocka42bc9542012-12-21 20:23:38 +0000238 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100239
240 c->chunk = chunk;
241
Mikulas Patocka9aa0c0e2012-12-21 20:23:33 +0000242 spin_lock_irq(&s->tracked_chunk_lock);
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100243 hlist_add_head(&c->node,
244 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
Mikulas Patocka9aa0c0e2012-12-21 20:23:33 +0000245 spin_unlock_irq(&s->tracked_chunk_lock);
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100246}
247
Mikulas Patockaee180262012-12-21 20:23:41 +0000248static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100249{
Mikulas Patockaee180262012-12-21 20:23:41 +0000250 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100251 unsigned long flags;
252
253 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
254 hlist_del(&c->node);
255 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
Mikulas Patockacd45daf2008-07-21 12:00:32 +0100256}
257
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100258static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
259{
260 struct dm_snap_tracked_chunk *c;
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100261 int found = 0;
262
263 spin_lock_irq(&s->tracked_chunk_lock);
264
Sasha Levinb67bfe02013-02-27 17:06:00 -0800265 hlist_for_each_entry(c,
Mikulas Patockaa8d41b52008-07-21 12:00:34 +0100266 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
267 if (c->chunk == chunk) {
268 found = 1;
269 break;
270 }
271 }
272
273 spin_unlock_irq(&s->tracked_chunk_lock);
274
275 return found;
276}
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278/*
Mike Snitzer615d1eb2009-12-10 23:52:29 +0000279 * This conflicting I/O is extremely improbable in the caller,
280 * so msleep(1) is sufficient and there is no need for a wait queue.
281 */
282static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
283{
284 while (__chunk_is_tracked(s, chunk))
285 msleep(1);
286}
287
288/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 * One of these per registered origin, held in the snapshot_origins hash
290 */
291struct origin {
292 /* The origin device */
293 struct block_device *bdev;
294
295 struct list_head hash_list;
296
297 /* List of snapshots for this origin */
298 struct list_head snapshots;
299};
300
301/*
Mikulas Patockab735fed2015-02-26 11:40:35 -0500302 * This structure is allocated for each origin target
303 */
304struct dm_origin {
305 struct dm_dev *dev;
306 struct dm_target *ti;
307 unsigned split_boundary;
308 struct list_head hash_list;
309};
310
311/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 * Size of the hash table for origin volumes. If we make this
313 * the size of the minors list then it should be nearly perfect
314 */
315#define ORIGIN_HASH_SIZE 256
316#define ORIGIN_MASK 0xFF
317static struct list_head *_origins;
Mikulas Patockab735fed2015-02-26 11:40:35 -0500318static struct list_head *_dm_origins;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319static struct rw_semaphore _origins_lock;
320
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000321static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
322static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
323static uint64_t _pending_exceptions_done_count;
324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325static int init_origin_hash(void)
326{
327 int i;
328
329 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
330 GFP_KERNEL);
331 if (!_origins) {
Mikulas Patockab735fed2015-02-26 11:40:35 -0500332 DMERR("unable to allocate memory for _origins");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 return -ENOMEM;
334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
336 INIT_LIST_HEAD(_origins + i);
Mikulas Patockab735fed2015-02-26 11:40:35 -0500337
338 _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
339 GFP_KERNEL);
340 if (!_dm_origins) {
341 DMERR("unable to allocate memory for _dm_origins");
342 kfree(_origins);
343 return -ENOMEM;
344 }
345 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
346 INIT_LIST_HEAD(_dm_origins + i);
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 init_rwsem(&_origins_lock);
349
350 return 0;
351}
352
353static void exit_origin_hash(void)
354{
355 kfree(_origins);
Mikulas Patockab735fed2015-02-26 11:40:35 -0500356 kfree(_dm_origins);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100359static unsigned origin_hash(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
361 return bdev->bd_dev & ORIGIN_MASK;
362}
363
364static struct origin *__lookup_origin(struct block_device *origin)
365{
366 struct list_head *ol;
367 struct origin *o;
368
369 ol = &_origins[origin_hash(origin)];
370 list_for_each_entry (o, ol, hash_list)
371 if (bdev_equal(o->bdev, origin))
372 return o;
373
374 return NULL;
375}
376
377static void __insert_origin(struct origin *o)
378{
379 struct list_head *sl = &_origins[origin_hash(o->bdev)];
380 list_add_tail(&o->hash_list, sl);
381}
382
Mikulas Patockab735fed2015-02-26 11:40:35 -0500383static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
384{
385 struct list_head *ol;
386 struct dm_origin *o;
387
388 ol = &_dm_origins[origin_hash(origin)];
389 list_for_each_entry (o, ol, hash_list)
390 if (bdev_equal(o->dev->bdev, origin))
391 return o;
392
393 return NULL;
394}
395
396static void __insert_dm_origin(struct dm_origin *o)
397{
398 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
399 list_add_tail(&o->hash_list, sl);
400}
401
402static void __remove_dm_origin(struct dm_origin *o)
403{
404 list_del(&o->hash_list);
405}
406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407/*
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000408 * _origins_lock must be held when calling this function.
409 * Returns number of snapshots registered using the supplied cow device, plus:
410 * snap_src - a snapshot suitable for use as a source of exception handover
411 * snap_dest - a snapshot capable of receiving exception handover.
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000412 * snap_merge - an existing snapshot-merge target linked to the same origin.
413 * There can be at most one snapshot-merge target. The parameter is optional.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000414 *
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000415 * Possible return values and states of snap_src and snap_dest.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000416 * 0: NULL, NULL - first new snapshot
417 * 1: snap_src, NULL - normal snapshot
418 * 2: snap_src, snap_dest - waiting for handover
419 * 2: snap_src, NULL - handed over, waiting for old to be deleted
420 * 1: NULL, snap_dest - source got destroyed without handover
421 */
422static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
423 struct dm_snapshot **snap_src,
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000424 struct dm_snapshot **snap_dest,
425 struct dm_snapshot **snap_merge)
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000426{
427 struct dm_snapshot *s;
428 struct origin *o;
429 int count = 0;
430 int active;
431
432 o = __lookup_origin(snap->origin->bdev);
433 if (!o)
434 goto out;
435
436 list_for_each_entry(s, &o->snapshots, list) {
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000437 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
438 *snap_merge = s;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000439 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
440 continue;
441
442 down_read(&s->lock);
443 active = s->active;
444 up_read(&s->lock);
445
446 if (active) {
447 if (snap_src)
448 *snap_src = s;
449 } else if (snap_dest)
450 *snap_dest = s;
451
452 count++;
453 }
454
455out:
456 return count;
457}
458
459/*
460 * On success, returns 1 if this snapshot is a handover destination,
461 * otherwise returns 0.
462 */
463static int __validate_exception_handover(struct dm_snapshot *snap)
464{
465 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000466 struct dm_snapshot *snap_merge = NULL;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000467
468 /* Does snapshot need exceptions handed over to it? */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000469 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
470 &snap_merge) == 2) ||
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000471 snap_dest) {
472 snap->ti->error = "Snapshot cow pairing for exception "
473 "table handover failed";
474 return -EINVAL;
475 }
476
477 /*
478 * If no snap_src was found, snap cannot become a handover
479 * destination.
480 */
481 if (!snap_src)
482 return 0;
483
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +0000484 /*
485 * Non-snapshot-merge handover?
486 */
487 if (!dm_target_is_snapshot_merge(snap->ti))
488 return 1;
489
490 /*
491 * Do not allow more than one merging snapshot.
492 */
493 if (snap_merge) {
494 snap->ti->error = "A snapshot is already merging.";
495 return -EINVAL;
496 }
497
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000498 if (!snap_src->store->type->prepare_merge ||
499 !snap_src->store->type->commit_merge) {
500 snap->ti->error = "Snapshot exception store does not "
501 "support snapshot-merge.";
502 return -EINVAL;
503 }
504
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000505 return 1;
506}
507
508static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
509{
510 struct dm_snapshot *l;
511
512 /* Sort the list according to chunk size, largest-first smallest-last */
513 list_for_each_entry(l, &o->snapshots, list)
514 if (l->store->chunk_size < s->store->chunk_size)
515 break;
516 list_add_tail(&s->list, &l->list);
517}
518
519/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 * Make a note of the snapshot and its origin so we can look it
521 * up when the origin has a write on it.
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000522 *
523 * Also validate snapshot exception store handovers.
524 * On success, returns 1 if this registration is a handover destination,
525 * otherwise returns 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 */
527static int register_snapshot(struct dm_snapshot *snap)
528{
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000529 struct origin *o, *new_o = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 struct block_device *bdev = snap->origin->bdev;
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000531 int r = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000533 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
534 if (!new_o)
535 return -ENOMEM;
536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 down_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000539 r = __validate_exception_handover(snap);
540 if (r < 0) {
541 kfree(new_o);
542 goto out;
543 }
544
545 o = __lookup_origin(bdev);
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000546 if (o)
547 kfree(new_o);
548 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 /* New origin */
Mikulas Patocka60c856c82008-10-30 13:33:12 +0000550 o = new_o;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
552 /* Initialise the struct */
553 INIT_LIST_HEAD(&o->snapshots);
554 o->bdev = bdev;
555
556 __insert_origin(o);
557 }
558
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000559 __insert_snapshot(o, snap);
560
561out:
562 up_write(&_origins_lock);
563
564 return r;
565}
566
567/*
568 * Move snapshot to correct place in list according to chunk size.
569 */
570static void reregister_snapshot(struct dm_snapshot *s)
571{
572 struct block_device *bdev = s->origin->bdev;
573
574 down_write(&_origins_lock);
575
576 list_del(&s->list);
577 __insert_snapshot(__lookup_origin(bdev), s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
579 up_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580}
581
582static void unregister_snapshot(struct dm_snapshot *s)
583{
584 struct origin *o;
585
586 down_write(&_origins_lock);
587 o = __lookup_origin(s->origin->bdev);
588
589 list_del(&s->list);
Mike Snitzerc1f0c182009-12-10 23:52:24 +0000590 if (o && list_empty(&o->snapshots)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 list_del(&o->hash_list);
592 kfree(o);
593 }
594
595 up_write(&_origins_lock);
596}
597
598/*
599 * Implementation of the exception hash tables.
Milan Brozd74f81f2008-02-08 02:11:27 +0000600 * The lowest hash_shift bits of the chunk number are ignored, allowing
601 * some consecutive chunks to be grouped together.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000603static int dm_exception_table_init(struct dm_exception_table *et,
604 uint32_t size, unsigned hash_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
606 unsigned int i;
607
Milan Brozd74f81f2008-02-08 02:11:27 +0000608 et->hash_shift = hash_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 et->hash_mask = size - 1;
610 et->table = dm_vcalloc(size, sizeof(struct list_head));
611 if (!et->table)
612 return -ENOMEM;
613
614 for (i = 0; i < size; i++)
615 INIT_LIST_HEAD(et->table + i);
616
617 return 0;
618}
619
Jon Brassow3510cb92009-12-10 23:52:11 +0000620static void dm_exception_table_exit(struct dm_exception_table *et,
621 struct kmem_cache *mem)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622{
623 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000624 struct dm_exception *ex, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 int i, size;
626
627 size = et->hash_mask + 1;
628 for (i = 0; i < size; i++) {
629 slot = et->table + i;
630
631 list_for_each_entry_safe (ex, next, slot, hash_list)
632 kmem_cache_free(mem, ex);
633 }
634
635 vfree(et->table);
636}
637
Jon Brassow191437a2009-12-10 23:52:10 +0000638static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639{
Milan Brozd74f81f2008-02-08 02:11:27 +0000640 return (chunk >> et->hash_shift) & et->hash_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641}
642
Jon Brassow3510cb92009-12-10 23:52:11 +0000643static void dm_remove_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644{
645 list_del(&e->hash_list);
646}
647
648/*
649 * Return the exception data for a sector, or NULL if not
650 * remapped.
651 */
Jon Brassow3510cb92009-12-10 23:52:11 +0000652static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
653 chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
655 struct list_head *slot;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000656 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
658 slot = &et->table[exception_hash(et, chunk)];
659 list_for_each_entry (e, slot, hash_list)
Milan Brozd74f81f2008-02-08 02:11:27 +0000660 if (chunk >= e->old_chunk &&
661 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 return e;
663
664 return NULL;
665}
666
Mikulas Patocka119bc542014-01-13 19:13:36 -0500667static struct dm_exception *alloc_completed_exception(gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668{
Jon Brassow1d4989c2009-12-10 23:52:10 +0000669 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Mikulas Patocka119bc542014-01-13 19:13:36 -0500671 e = kmem_cache_alloc(exception_cache, gfp);
672 if (!e && gfp == GFP_NOIO)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
674
675 return e;
676}
677
Jon Brassow3510cb92009-12-10 23:52:11 +0000678static void free_completed_exception(struct dm_exception *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
680 kmem_cache_free(exception_cache, e);
681}
682
Mikulas Patocka92e86812008-07-21 12:00:35 +0100683static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
Mikulas Patocka92e86812008-07-21 12:00:35 +0100685 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
686 GFP_NOIO);
687
Mikulas Patocka879129d22008-10-30 13:33:16 +0000688 atomic_inc(&s->pending_exceptions_count);
Mikulas Patocka92e86812008-07-21 12:00:35 +0100689 pe->snap = s;
690
691 return pe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692}
693
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100694static void free_pending_exception(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
Mikulas Patocka879129d22008-10-30 13:33:16 +0000696 struct dm_snapshot *s = pe->snap;
697
698 mempool_free(pe, s->pending_pool);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100699 smp_mb__before_atomic();
Mikulas Patocka879129d22008-10-30 13:33:16 +0000700 atomic_dec(&s->pending_exceptions_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
Jon Brassow3510cb92009-12-10 23:52:11 +0000703static void dm_insert_exception(struct dm_exception_table *eh,
704 struct dm_exception *new_e)
Milan Brozd74f81f2008-02-08 02:11:27 +0000705{
Milan Brozd74f81f2008-02-08 02:11:27 +0000706 struct list_head *l;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000707 struct dm_exception *e = NULL;
Milan Brozd74f81f2008-02-08 02:11:27 +0000708
709 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
710
711 /* Add immediately if this table doesn't support consecutive chunks */
712 if (!eh->hash_shift)
713 goto out;
714
715 /* List is ordered by old_chunk */
716 list_for_each_entry_reverse(e, l, hash_list) {
717 /* Insert after an existing chunk? */
718 if (new_e->old_chunk == (e->old_chunk +
719 dm_consecutive_chunk_count(e) + 1) &&
720 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
721 dm_consecutive_chunk_count(e) + 1)) {
722 dm_consecutive_chunk_count_inc(e);
Jon Brassow3510cb92009-12-10 23:52:11 +0000723 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000724 return;
725 }
726
727 /* Insert before an existing chunk? */
728 if (new_e->old_chunk == (e->old_chunk - 1) &&
729 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
730 dm_consecutive_chunk_count_inc(e);
731 e->old_chunk--;
732 e->new_chunk--;
Jon Brassow3510cb92009-12-10 23:52:11 +0000733 free_completed_exception(new_e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000734 return;
735 }
736
737 if (new_e->old_chunk > e->old_chunk)
738 break;
739 }
740
741out:
742 list_add(&new_e->hash_list, e ? &e->hash_list : l);
743}
744
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000745/*
746 * Callback used by the exception stores to load exceptions when
747 * initialising.
748 */
749static int dm_add_exception(void *context, chunk_t old, chunk_t new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000751 struct dm_snapshot *s = context;
Jon Brassow1d4989c2009-12-10 23:52:10 +0000752 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Mikulas Patocka119bc542014-01-13 19:13:36 -0500754 e = alloc_completed_exception(GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (!e)
756 return -ENOMEM;
757
758 e->old_chunk = old;
Milan Brozd74f81f2008-02-08 02:11:27 +0000759
760 /* Consecutive_count is implicitly initialised to zero */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 e->new_chunk = new;
Milan Brozd74f81f2008-02-08 02:11:27 +0000762
Jon Brassow3510cb92009-12-10 23:52:11 +0000763 dm_insert_exception(&s->complete, e);
Milan Brozd74f81f2008-02-08 02:11:27 +0000764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 return 0;
766}
767
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000768/*
769 * Return a minimum chunk size of all snapshots that have the specified origin.
770 * Return zero if the origin has no snapshots.
771 */
Mike Snitzer542f9032012-07-27 15:08:00 +0100772static uint32_t __minimum_chunk_size(struct origin *o)
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000773{
774 struct dm_snapshot *snap;
775 unsigned chunk_size = 0;
776
777 if (o)
778 list_for_each_entry(snap, &o->snapshots, list)
779 chunk_size = min_not_zero(chunk_size,
780 snap->store->chunk_size);
781
Mike Snitzer542f9032012-07-27 15:08:00 +0100782 return (uint32_t) chunk_size;
Mikulas Patocka7e201b32009-12-10 23:52:08 +0000783}
784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785/*
786 * Hard coded magic.
787 */
788static int calc_max_buckets(void)
789{
790 /* use a fixed size of 2MB */
791 unsigned long mem = 2 * 1024 * 1024;
792 mem /= sizeof(struct list_head);
793
794 return mem;
795}
796
797/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 * Allocate room for a suitable hash table.
799 */
Jonathan Brassowfee19982009-04-02 19:55:34 +0100800static int init_hash_tables(struct dm_snapshot *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801{
Mikulas Patocka60e356f2013-09-18 19:40:42 -0400802 sector_t hash_size, cow_dev_size, max_buckets;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804 /*
805 * Calculate based on the size of the original volume or
806 * the COW volume...
807 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +0000808 cow_dev_size = get_dev_size(s->cow->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 max_buckets = calc_max_buckets();
810
Mikulas Patocka60e356f2013-09-18 19:40:42 -0400811 hash_size = cow_dev_size >> s->store->chunk_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 hash_size = min(hash_size, max_buckets);
813
Mikulas Patocka8e87b9b2009-12-10 23:51:54 +0000814 if (hash_size < 64)
815 hash_size = 64;
Robert P. J. Day8defd832008-02-08 02:10:06 +0000816 hash_size = rounddown_pow_of_two(hash_size);
Jon Brassow3510cb92009-12-10 23:52:11 +0000817 if (dm_exception_table_init(&s->complete, hash_size,
818 DM_CHUNK_CONSECUTIVE_BITS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 return -ENOMEM;
820
821 /*
822 * Allocate hash table for in-flight exceptions
823 * Make this smaller than the real hash table
824 */
825 hash_size >>= 3;
826 if (hash_size < 64)
827 hash_size = 64;
828
Jon Brassow3510cb92009-12-10 23:52:11 +0000829 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
830 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 return -ENOMEM;
832 }
833
834 return 0;
835}
836
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000837static void merge_shutdown(struct dm_snapshot *s)
838{
839 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100840 smp_mb__after_atomic();
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000841 wake_up_bit(&s->state_bits, RUNNING_MERGE);
842}
843
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000844static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
845{
846 s->first_merging_chunk = 0;
847 s->num_merging_chunks = 0;
848
849 return bio_list_get(&s->bios_queued_during_merge);
850}
851
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000852/*
853 * Remove one chunk from the index of completed exceptions.
854 */
855static int __remove_single_exception_chunk(struct dm_snapshot *s,
856 chunk_t old_chunk)
857{
858 struct dm_exception *e;
859
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000860 e = dm_lookup_exception(&s->complete, old_chunk);
861 if (!e) {
862 DMERR("Corruption detected: exception for block %llu is "
863 "on disk but not in memory",
864 (unsigned long long)old_chunk);
865 return -EINVAL;
866 }
867
868 /*
869 * If this is the only chunk using this exception, remove exception.
870 */
871 if (!dm_consecutive_chunk_count(e)) {
872 dm_remove_exception(e);
873 free_completed_exception(e);
874 return 0;
875 }
876
877 /*
878 * The chunk may be either at the beginning or the end of a
879 * group of consecutive chunks - never in the middle. We are
880 * removing chunks in the opposite order to that in which they
881 * were added, so this should always be true.
882 * Decrement the consecutive chunk counter and adjust the
883 * starting point if necessary.
884 */
885 if (old_chunk == e->old_chunk) {
886 e->old_chunk++;
887 e->new_chunk++;
888 } else if (old_chunk != e->old_chunk +
889 dm_consecutive_chunk_count(e)) {
890 DMERR("Attempt to merge block %llu from the "
891 "middle of a chunk range [%llu - %llu]",
892 (unsigned long long)old_chunk,
893 (unsigned long long)e->old_chunk,
894 (unsigned long long)
895 e->old_chunk + dm_consecutive_chunk_count(e));
896 return -EINVAL;
897 }
898
899 dm_consecutive_chunk_count_dec(e);
900
901 return 0;
902}
903
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000904static void flush_bios(struct bio *bio);
905
906static int remove_single_exception_chunk(struct dm_snapshot *s)
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000907{
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000908 struct bio *b = NULL;
909 int r;
910 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000911
912 down_write(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000913
914 /*
915 * Process chunks (and associated exceptions) in reverse order
916 * so that dm_consecutive_chunk_count_dec() accounting works.
917 */
918 do {
919 r = __remove_single_exception_chunk(s, old_chunk);
920 if (r)
921 goto out;
922 } while (old_chunk-- > s->first_merging_chunk);
923
924 b = __release_queued_bios_after_merge(s);
925
926out:
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000927 up_write(&s->lock);
Mikulas Patocka9fe862542009-12-10 23:52:33 +0000928 if (b)
929 flush_bios(b);
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000930
931 return r;
932}
933
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000934static int origin_write_extent(struct dm_snapshot *merging_snap,
935 sector_t sector, unsigned chunk_size);
936
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000937static void merge_callback(int read_err, unsigned long write_err,
938 void *context);
939
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000940static uint64_t read_pending_exceptions_done_count(void)
941{
942 uint64_t pending_exceptions_done;
943
944 spin_lock(&_pending_exceptions_done_spinlock);
945 pending_exceptions_done = _pending_exceptions_done_count;
946 spin_unlock(&_pending_exceptions_done_spinlock);
947
948 return pending_exceptions_done;
949}
950
951static void increment_pending_exceptions_done_count(void)
952{
953 spin_lock(&_pending_exceptions_done_spinlock);
954 _pending_exceptions_done_count++;
955 spin_unlock(&_pending_exceptions_done_spinlock);
956
957 wake_up_all(&_pending_exceptions_done);
958}
959
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000960static void snapshot_merge_next_chunks(struct dm_snapshot *s)
961{
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000962 int i, linear_chunks;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000963 chunk_t old_chunk, new_chunk;
964 struct dm_io_region src, dest;
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000965 sector_t io_size;
Mikulas Patocka73dfd072009-12-10 23:52:34 +0000966 uint64_t previous_count;
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000967
968 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
969 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
970 goto shut;
971
972 /*
973 * valid flag never changes during merge, so no lock required.
974 */
975 if (!s->valid) {
976 DMERR("Snapshot is invalid: can't merge");
977 goto shut;
978 }
979
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000980 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
981 &new_chunk);
982 if (linear_chunks <= 0) {
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +0000983 if (linear_chunks < 0) {
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000984 DMERR("Read error in exception store: "
985 "shutting down merge");
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +0000986 down_write(&s->lock);
987 s->merge_failed = 1;
988 up_write(&s->lock);
989 }
Mikulas Patocka1e03f972009-12-10 23:52:32 +0000990 goto shut;
991 }
992
Mike Snitzer8a2d5282009-12-10 23:52:34 +0000993 /* Adjust old_chunk and new_chunk to reflect start of linear region */
994 old_chunk = old_chunk + 1 - linear_chunks;
995 new_chunk = new_chunk + 1 - linear_chunks;
996
997 /*
998 * Use one (potentially large) I/O to copy all 'linear_chunks'
999 * from the exception store to the origin
1000 */
1001 io_size = linear_chunks * s->store->chunk_size;
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001002
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001003 dest.bdev = s->origin->bdev;
1004 dest.sector = chunk_to_sector(s->store, old_chunk);
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001005 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001006
1007 src.bdev = s->cow->bdev;
1008 src.sector = chunk_to_sector(s->store, new_chunk);
1009 src.count = dest.count;
1010
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001011 /*
1012 * Reallocate any exceptions needed in other snapshots then
1013 * wait for the pending exceptions to complete.
1014 * Each time any pending exception (globally on the system)
1015 * completes we are woken and repeat the process to find out
1016 * if we can proceed. While this may not seem a particularly
1017 * efficient algorithm, it is not expected to have any
1018 * significant impact on performance.
1019 */
1020 previous_count = read_pending_exceptions_done_count();
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001021 while (origin_write_extent(s, dest.sector, io_size)) {
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001022 wait_event(_pending_exceptions_done,
1023 (read_pending_exceptions_done_count() !=
1024 previous_count));
1025 /* Retry after the wait, until all exceptions are done. */
1026 previous_count = read_pending_exceptions_done_count();
1027 }
1028
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001029 down_write(&s->lock);
1030 s->first_merging_chunk = old_chunk;
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001031 s->num_merging_chunks = linear_chunks;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001032 up_write(&s->lock);
1033
Mike Snitzer8a2d5282009-12-10 23:52:34 +00001034 /* Wait until writes to all 'linear_chunks' drain */
1035 for (i = 0; i < linear_chunks; i++)
1036 __check_for_conflicting_io(s, old_chunk + i);
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001037
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001038 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
1039 return;
1040
1041shut:
1042 merge_shutdown(s);
1043}
1044
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001045static void error_bios(struct bio *bio);
1046
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001047static void merge_callback(int read_err, unsigned long write_err, void *context)
1048{
1049 struct dm_snapshot *s = context;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001050 struct bio *b = NULL;
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001051
1052 if (read_err || write_err) {
1053 if (read_err)
1054 DMERR("Read error: shutting down merge.");
1055 else
1056 DMERR("Write error: shutting down merge.");
1057 goto shut;
1058 }
1059
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001060 if (s->store->type->commit_merge(s->store,
1061 s->num_merging_chunks) < 0) {
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001062 DMERR("Write error in exception store: shutting down merge");
1063 goto shut;
1064 }
1065
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001066 if (remove_single_exception_chunk(s) < 0)
1067 goto shut;
1068
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001069 snapshot_merge_next_chunks(s);
1070
1071 return;
1072
1073shut:
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001074 down_write(&s->lock);
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001075 s->merge_failed = 1;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001076 b = __release_queued_bios_after_merge(s);
1077 up_write(&s->lock);
1078 error_bios(b);
1079
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001080 merge_shutdown(s);
1081}
1082
1083static void start_merge(struct dm_snapshot *s)
1084{
1085 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1086 snapshot_merge_next_chunks(s);
1087}
1088
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001089/*
1090 * Stop the merging process and wait until it finishes.
1091 */
1092static void stop_merge(struct dm_snapshot *s)
1093{
1094 set_bit(SHUTDOWN_MERGE, &s->state_bits);
NeilBrown74316202014-07-07 15:16:04 +10001095 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001096 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1097}
1098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099/*
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04001100 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 */
1102static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1103{
1104 struct dm_snapshot *s;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001105 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 int r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001107 char *origin_path, *cow_path;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001108 unsigned args_used, num_flush_bios = 1;
Mike Snitzer10b81062009-12-10 23:52:31 +00001109 fmode_t origin_mode = FMODE_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Mark McLoughlin4c7e3bf2006-10-03 01:15:25 -07001111 if (argc != 4) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001112 ti->error = "requires exactly 4 arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 r = -EINVAL;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001114 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 }
1116
Mike Snitzer10b81062009-12-10 23:52:31 +00001117 if (dm_target_is_snapshot_merge(ti)) {
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001118 num_flush_bios = 2;
Mike Snitzer10b81062009-12-10 23:52:31 +00001119 origin_mode = FMODE_WRITE;
1120 }
1121
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 s = kmalloc(sizeof(*s), GFP_KERNEL);
Jonathan Brassowfee19982009-04-02 19:55:34 +01001123 if (!s) {
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001124 ti->error = "Cannot allocate private snapshot structure";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 r = -ENOMEM;
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001126 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 }
1128
Mikulas Patockac2411042010-08-12 04:13:51 +01001129 origin_path = argv[0];
1130 argv++;
1131 argc--;
1132
1133 r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1134 if (r) {
1135 ti->error = "Cannot get origin device";
1136 goto bad_origin;
1137 }
1138
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001139 cow_path = argv[0];
1140 argv++;
1141 argc--;
1142
Milan Broz024d37e2011-03-24 13:52:14 +00001143 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001144 if (r) {
1145 ti->error = "Cannot get COW device";
1146 goto bad_cow;
1147 }
1148
1149 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1150 if (r) {
1151 ti->error = "Couldn't create exception store";
1152 r = -EINVAL;
1153 goto bad_store;
1154 }
1155
1156 argv += args_used;
1157 argc -= args_used;
1158
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001159 s->ti = ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 s->valid = 1;
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001161 s->snapshot_overflowed = 0;
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001162 s->active = 0;
Mikulas Patocka879129d22008-10-30 13:33:16 +00001163 atomic_set(&s->pending_exceptions_count, 0);
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001164 s->exception_start_sequence = 0;
1165 s->exception_complete_sequence = 0;
1166 INIT_LIST_HEAD(&s->out_of_order_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 init_rwsem(&s->lock);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001168 INIT_LIST_HEAD(&s->list);
Alasdair G Kergonca3a9312006-10-03 01:15:30 -07001169 spin_lock_init(&s->pe_lock);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001170 s->state_bits = 0;
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001171 s->merge_failed = 0;
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001172 s->first_merging_chunk = 0;
1173 s->num_merging_chunks = 0;
1174 bio_list_init(&s->bios_queued_during_merge);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
1176 /* Allocate hash table for COW data */
Jonathan Brassowfee19982009-04-02 19:55:34 +01001177 if (init_hash_tables(s)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 ti->error = "Unable to allocate hash table space";
1179 r = -ENOMEM;
Jonathan Brassowfee19982009-04-02 19:55:34 +01001180 goto bad_hash_tables;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 }
1182
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00001183 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001184 if (IS_ERR(s->kcopyd_client)) {
1185 r = PTR_ERR(s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 ti->error = "Could not create kcopyd client";
Jonathan Brassowfee19982009-04-02 19:55:34 +01001187 goto bad_kcopyd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 }
1189
Mikulas Patocka92e86812008-07-21 12:00:35 +01001190 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1191 if (!s->pending_pool) {
1192 ti->error = "Could not allocate mempool for pending exceptions";
Wei Yongjun09e8b812013-05-10 14:37:15 +01001193 r = -ENOMEM;
Jonathan Brassowfee19982009-04-02 19:55:34 +01001194 goto bad_pending_pool;
Mikulas Patocka92e86812008-07-21 12:00:35 +01001195 }
1196
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001197 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1198 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1199
1200 spin_lock_init(&s->tracked_chunk_lock);
1201
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001202 ti->private = s;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001203 ti->num_flush_bios = num_flush_bios;
Mikulas Patocka42bc9542012-12-21 20:23:38 +00001204 ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001205
1206 /* Add snapshot to the list of snapshots for this origin */
1207 /* Exceptions aren't triggered till snapshot_resume() is called */
1208 r = register_snapshot(s);
1209 if (r == -ENOMEM) {
1210 ti->error = "Snapshot origin struct allocation failed";
1211 goto bad_load_and_register;
1212 } else if (r < 0) {
1213 /* invalid handover, register_snapshot has set ti->error */
1214 goto bad_load_and_register;
1215 }
1216
1217 /*
1218 * Metadata must only be loaded into one table at once, so skip this
1219 * if metadata will be handed over during resume.
1220 * Chunk size will be set during the handover - set it to zero to
1221 * ensure it's ignored.
1222 */
1223 if (r > 0) {
1224 s->store->chunk_size = 0;
1225 return 0;
1226 }
1227
Jonathan Brassow493df712009-04-02 19:55:31 +01001228 r = s->store->type->read_metadata(s->store, dm_add_exception,
1229 (void *)s);
Milan Broz07641472007-07-12 17:28:13 +01001230 if (r < 0) {
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001231 ti->error = "Failed to read snapshot metadata";
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001232 goto bad_read_metadata;
Milan Broz07641472007-07-12 17:28:13 +01001233 } else if (r > 0) {
1234 s->valid = 0;
1235 DMWARN("Snapshot is marked invalid.");
Mark McLoughlinf9cea4f2006-10-03 01:15:25 -07001236 }
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001237
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001238 if (!s->store->chunk_size) {
1239 ti->error = "Chunk size not set";
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001240 goto bad_read_metadata;
Mikulas Patocka3f2412d2009-10-16 23:18:16 +01001241 }
Mike Snitzer542f9032012-07-27 15:08:00 +01001242
1243 r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1244 if (r)
1245 goto bad_read_metadata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
1247 return 0;
1248
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001249bad_read_metadata:
1250 unregister_snapshot(s);
1251
Jonathan Brassowfee19982009-04-02 19:55:34 +01001252bad_load_and_register:
Mikulas Patocka92e86812008-07-21 12:00:35 +01001253 mempool_destroy(s->pending_pool);
1254
Jonathan Brassowfee19982009-04-02 19:55:34 +01001255bad_pending_pool:
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001256 dm_kcopyd_client_destroy(s->kcopyd_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
Jonathan Brassowfee19982009-04-02 19:55:34 +01001258bad_kcopyd:
Jon Brassow3510cb92009-12-10 23:52:11 +00001259 dm_exception_table_exit(&s->pending, pending_cache);
1260 dm_exception_table_exit(&s->complete, exception_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Jonathan Brassowfee19982009-04-02 19:55:34 +01001262bad_hash_tables:
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001263 dm_exception_store_destroy(s->store);
1264
1265bad_store:
1266 dm_put_device(ti, s->cow);
1267
1268bad_cow:
Mikulas Patockac2411042010-08-12 04:13:51 +01001269 dm_put_device(ti, s->origin);
1270
1271bad_origin:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 kfree(s);
1273
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001274bad:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 return r;
1276}
1277
Milan Broz31c93a0c2006-12-08 02:41:11 -08001278static void __free_exceptions(struct dm_snapshot *s)
1279{
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001280 dm_kcopyd_client_destroy(s->kcopyd_client);
Milan Broz31c93a0c2006-12-08 02:41:11 -08001281 s->kcopyd_client = NULL;
1282
Jon Brassow3510cb92009-12-10 23:52:11 +00001283 dm_exception_table_exit(&s->pending, pending_cache);
1284 dm_exception_table_exit(&s->complete, exception_cache);
Milan Broz31c93a0c2006-12-08 02:41:11 -08001285}
1286
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001287static void __handover_exceptions(struct dm_snapshot *snap_src,
1288 struct dm_snapshot *snap_dest)
1289{
1290 union {
1291 struct dm_exception_table table_swap;
1292 struct dm_exception_store *store_swap;
1293 } u;
1294
1295 /*
1296 * Swap all snapshot context information between the two instances.
1297 */
1298 u.table_swap = snap_dest->complete;
1299 snap_dest->complete = snap_src->complete;
1300 snap_src->complete = u.table_swap;
1301
1302 u.store_swap = snap_dest->store;
1303 snap_dest->store = snap_src->store;
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04001304 snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001305 snap_src->store = u.store_swap;
1306
1307 snap_dest->store->snap = snap_dest;
1308 snap_src->store->snap = snap_src;
1309
Mike Snitzer542f9032012-07-27 15:08:00 +01001310 snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001311 snap_dest->valid = snap_src->valid;
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001312 snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001313
1314 /*
1315 * Set source invalid to ensure it receives no further I/O.
1316 */
1317 snap_src->valid = 0;
1318}
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320static void snapshot_dtr(struct dm_target *ti)
1321{
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001322#ifdef CONFIG_DM_DEBUG
1323 int i;
1324#endif
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001325 struct dm_snapshot *s = ti->private;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001326 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001328 down_read(&_origins_lock);
1329 /* Check whether exception handover must be cancelled */
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001330 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001331 if (snap_src && snap_dest && (s == snap_src)) {
1332 down_write(&snap_dest->lock);
1333 snap_dest->valid = 0;
1334 up_write(&snap_dest->lock);
1335 DMERR("Cancelling snapshot handover.");
1336 }
1337 up_read(&_origins_lock);
1338
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001339 if (dm_target_is_snapshot_merge(ti))
1340 stop_merge(s);
1341
Alasdair G Kergon138728dc2006-03-27 01:17:50 -08001342 /* Prevent further origin writes from using this snapshot. */
1343 /* After this returns there can be no new kcopyd jobs. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 unregister_snapshot(s);
1345
Mikulas Patocka879129d22008-10-30 13:33:16 +00001346 while (atomic_read(&s->pending_exceptions_count))
Mikulas Patocka90fa1522009-01-06 03:04:54 +00001347 msleep(1);
Mikulas Patocka879129d22008-10-30 13:33:16 +00001348 /*
1349 * Ensure instructions in mempool_destroy aren't reordered
1350 * before atomic_read.
1351 */
1352 smp_mb();
1353
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001354#ifdef CONFIG_DM_DEBUG
1355 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1356 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1357#endif
1358
Milan Broz31c93a0c2006-12-08 02:41:11 -08001359 __free_exceptions(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Mikulas Patocka92e86812008-07-21 12:00:35 +01001361 mempool_destroy(s->pending_pool);
1362
Jonathan Brassowfee19982009-04-02 19:55:34 +01001363 dm_exception_store_destroy(s->store);
Alasdair G Kergon138728dc2006-03-27 01:17:50 -08001364
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001365 dm_put_device(ti, s->cow);
1366
Mikulas Patockac2411042010-08-12 04:13:51 +01001367 dm_put_device(ti, s->origin);
1368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 kfree(s);
1370}
1371
1372/*
1373 * Flush a list of buffers.
1374 */
1375static void flush_bios(struct bio *bio)
1376{
1377 struct bio *n;
1378
1379 while (bio) {
1380 n = bio->bi_next;
1381 bio->bi_next = NULL;
1382 generic_make_request(bio);
1383 bio = n;
1384 }
1385}
1386
Mikulas Patocka515ad662009-12-10 23:52:30 +00001387static int do_origin(struct dm_dev *origin, struct bio *bio);
1388
1389/*
1390 * Flush a list of buffers.
1391 */
1392static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1393{
1394 struct bio *n;
1395 int r;
1396
1397 while (bio) {
1398 n = bio->bi_next;
1399 bio->bi_next = NULL;
1400 r = do_origin(s->origin, bio);
1401 if (r == DM_MAPIO_REMAPPED)
1402 generic_make_request(bio);
1403 bio = n;
1404 }
1405}
1406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407/*
1408 * Error a list of buffers.
1409 */
1410static void error_bios(struct bio *bio)
1411{
1412 struct bio *n;
1413
1414 while (bio) {
1415 n = bio->bi_next;
1416 bio->bi_next = NULL;
NeilBrown6712ecf2007-09-27 12:47:43 +02001417 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 bio = n;
1419 }
1420}
1421
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001422static void __invalidate_snapshot(struct dm_snapshot *s, int err)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001423{
1424 if (!s->valid)
1425 return;
1426
1427 if (err == -EIO)
1428 DMERR("Invalidating snapshot: Error reading/writing.");
1429 else if (err == -ENOMEM)
1430 DMERR("Invalidating snapshot: Unable to allocate exception.");
1431
Jonathan Brassow493df712009-04-02 19:55:31 +01001432 if (s->store->type->drop_snapshot)
1433 s->store->type->drop_snapshot(s->store);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001434
1435 s->valid = 0;
1436
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001437 dm_table_event(s->ti->table);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001438}
1439
Mikulas Patocka385277b2016-01-08 19:07:55 -05001440static void pending_complete(void *context, int success)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441{
Mikulas Patocka385277b2016-01-08 19:07:55 -05001442 struct dm_snap_pending_exception *pe = context;
Jon Brassow1d4989c2009-12-10 23:52:10 +00001443 struct dm_exception *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 struct dm_snapshot *s = pe->snap;
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001445 struct bio *origin_bios = NULL;
1446 struct bio *snapshot_bios = NULL;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001447 struct bio *full_bio = NULL;
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001448 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001450 if (!success) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 /* Read/write error - snapshot is unusable */
1452 down_write(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001453 __invalidate_snapshot(s, -EIO);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001454 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001455 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 }
1457
Mikulas Patocka119bc542014-01-13 19:13:36 -05001458 e = alloc_completed_exception(GFP_NOIO);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001459 if (!e) {
1460 down_write(&s->lock);
Alasdair G Kergon695368a2006-10-03 01:15:31 -07001461 __invalidate_snapshot(s, -ENOMEM);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001462 error = 1;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001463 goto out;
1464 }
1465 *e = pe->e;
1466
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001467 down_write(&s->lock);
1468 if (!s->valid) {
Jon Brassow3510cb92009-12-10 23:52:11 +00001469 free_completed_exception(e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001470 error = 1;
1471 goto out;
1472 }
1473
Mike Snitzer615d1eb2009-12-10 23:52:29 +00001474 /* Check for conflicting reads */
1475 __check_for_conflicting_io(s, pe->e.old_chunk);
Mikulas Patockaa8d41b52008-07-21 12:00:34 +01001476
1477 /*
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001478 * Add a proper exception, and remove the
1479 * in-flight exception from the list.
1480 */
Jon Brassow3510cb92009-12-10 23:52:11 +00001481 dm_insert_exception(&s->complete, e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001482
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001483out:
Jon Brassow3510cb92009-12-10 23:52:11 +00001484 dm_remove_exception(&pe->e);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001485 snapshot_bios = bio_list_get(&pe->snapshot_bios);
Mikulas Patocka515ad662009-12-10 23:52:30 +00001486 origin_bios = bio_list_get(&pe->origin_bios);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001487 full_bio = pe->full_bio;
Mikulas Patockafe3265b2015-11-25 16:03:31 -05001488 if (full_bio)
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001489 full_bio->bi_end_io = pe->full_bio_end_io;
Mikulas Patocka73dfd072009-12-10 23:52:34 +00001490 increment_pending_exceptions_done_count();
1491
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001492 up_write(&s->lock);
1493
1494 /* Submit any pending write bios */
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001495 if (error) {
1496 if (full_bio)
1497 bio_io_error(full_bio);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001498 error_bios(snapshot_bios);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001499 } else {
1500 if (full_bio)
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001501 bio_endio(full_bio);
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001502 flush_bios(snapshot_bios);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001503 }
Alasdair G Kergon9d493fa2006-10-03 01:15:29 -07001504
Mikulas Patocka515ad662009-12-10 23:52:30 +00001505 retry_origin_bios(s, origin_bios);
Mikulas Patocka22aa66a2015-02-17 14:34:00 -05001506
1507 free_pending_exception(pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508}
1509
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001510static void complete_exception(struct dm_snap_pending_exception *pe)
1511{
1512 struct dm_snapshot *s = pe->snap;
1513
Mikulas Patocka385277b2016-01-08 19:07:55 -05001514 /* Update the metadata if we are persistent */
1515 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1516 pending_complete, pe);
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001517}
1518
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519/*
1520 * Called when the copy I/O has finished. kcopyd actually runs
1521 * this code so don't block.
1522 */
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -07001523static void copy_callback(int read_err, unsigned long write_err, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001525 struct dm_snap_pending_exception *pe = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 struct dm_snapshot *s = pe->snap;
1527
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001528 pe->copy_error = read_err || write_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001530 if (pe->exception_sequence == s->exception_complete_sequence) {
1531 s->exception_complete_sequence++;
1532 complete_exception(pe);
1533
1534 while (!list_empty(&s->out_of_order_list)) {
1535 pe = list_entry(s->out_of_order_list.next,
1536 struct dm_snap_pending_exception, out_of_order_entry);
1537 if (pe->exception_sequence != s->exception_complete_sequence)
1538 break;
1539 s->exception_complete_sequence++;
1540 list_del(&pe->out_of_order_entry);
1541 complete_exception(pe);
1542 }
1543 } else {
1544 struct list_head *lh;
1545 struct dm_snap_pending_exception *pe2;
1546
1547 list_for_each_prev(lh, &s->out_of_order_list) {
1548 pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1549 if (pe2->exception_sequence < pe->exception_sequence)
1550 break;
1551 }
1552 list_add(&pe->out_of_order_entry, lh);
1553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554}
1555
1556/*
1557 * Dispatches the copy operation to kcopyd.
1558 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001559static void start_copy(struct dm_snap_pending_exception *pe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560{
1561 struct dm_snapshot *s = pe->snap;
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +01001562 struct dm_io_region src, dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 struct block_device *bdev = s->origin->bdev;
1564 sector_t dev_size;
1565
1566 dev_size = get_dev_size(bdev);
1567
1568 src.bdev = bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001569 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
Mikulas Patockadf96eee2009-10-16 23:18:17 +01001570 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001572 dest.bdev = s->cow->bdev;
Jonathan Brassow71fab002009-04-02 19:55:33 +01001573 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 dest.count = src.count;
1575
1576 /* Hand over to kcopyd */
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001577 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578}
1579
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001580static void full_bio_end_io(struct bio *bio)
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001581{
1582 void *callback_data = bio->bi_private;
1583
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001584 dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001585}
1586
1587static void start_full_bio(struct dm_snap_pending_exception *pe,
1588 struct bio *bio)
1589{
1590 struct dm_snapshot *s = pe->snap;
1591 void *callback_data;
1592
1593 pe->full_bio = bio;
1594 pe->full_bio_end_io = bio->bi_end_io;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001595
1596 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1597 copy_callback, pe);
1598
1599 bio->bi_end_io = full_bio_end_io;
1600 bio->bi_private = callback_data;
1601
1602 generic_make_request(bio);
1603}
1604
Mikulas Patocka29138082009-04-02 19:55:25 +01001605static struct dm_snap_pending_exception *
1606__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1607{
Jon Brassow3510cb92009-12-10 23:52:11 +00001608 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001609
1610 if (!e)
1611 return NULL;
1612
1613 return container_of(e, struct dm_snap_pending_exception, e);
1614}
1615
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616/*
1617 * Looks to see if this snapshot already has a pending exception
1618 * for this chunk, otherwise it allocates a new one and inserts
1619 * it into the pending table.
1620 *
1621 * NOTE: a write lock must be held on snap->lock before calling
1622 * this.
1623 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001624static struct dm_snap_pending_exception *
Mikulas Patockac6621392009-04-02 19:55:25 +01001625__find_pending_exception(struct dm_snapshot *s,
1626 struct dm_snap_pending_exception *pe, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627{
Mikulas Patockac6621392009-04-02 19:55:25 +01001628 struct dm_snap_pending_exception *pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001629
Mikulas Patocka29138082009-04-02 19:55:25 +01001630 pe2 = __lookup_pending_exception(s, chunk);
1631 if (pe2) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001632 free_pending_exception(pe);
Mikulas Patocka29138082009-04-02 19:55:25 +01001633 return pe2;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001634 }
1635
1636 pe->e.old_chunk = chunk;
1637 bio_list_init(&pe->origin_bios);
1638 bio_list_init(&pe->snapshot_bios);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001639 pe->started = 0;
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001640 pe->full_bio = NULL;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001641
Jonathan Brassow493df712009-04-02 19:55:31 +01001642 if (s->store->type->prepare_exception(s->store, &pe->e)) {
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001643 free_pending_exception(pe);
1644 return NULL;
1645 }
1646
Mikulas Patocka230c83a2013-11-29 18:13:37 -05001647 pe->exception_sequence = s->exception_start_sequence++;
1648
Jon Brassow3510cb92009-12-10 23:52:11 +00001649 dm_insert_exception(&s->pending, &pe->e);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 return pe;
1652}
1653
Jon Brassow1d4989c2009-12-10 23:52:10 +00001654static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
Milan Brozd74f81f2008-02-08 02:11:27 +00001655 struct bio *bio, chunk_t chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001657 bio->bi_bdev = s->cow->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001658 bio->bi_iter.bi_sector =
1659 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1660 (chunk - e->old_chunk)) +
1661 (bio->bi_iter.bi_sector & s->store->chunk_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662}
1663
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001664static int snapshot_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
Jon Brassow1d4989c2009-12-10 23:52:10 +00001666 struct dm_exception *e;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001667 struct dm_snapshot *s = ti->private;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001668 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 chunk_t chunk;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001670 struct dm_snap_pending_exception *pe = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
Mikulas Patockaee180262012-12-21 20:23:41 +00001672 init_tracked_chunk(bio);
1673
Tejun Heod87f4c12010-09-03 11:56:19 +02001674 if (bio->bi_rw & REQ_FLUSH) {
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00001675 bio->bi_bdev = s->cow->bdev;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01001676 return DM_MAPIO_REMAPPED;
1677 }
1678
Kent Overstreet4f024f32013-10-11 15:44:27 -07001679 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
1681 /* Full snapshots are not usable */
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001682 /* To get here the table must be live so s->active is always set. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 if (!s->valid)
Alasdair G Kergonf6a80ea2005-07-12 15:53:01 -07001684 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001686 /* FIXME: should only take write lock if we need
1687 * to copy an exception */
1688 down_write(&s->lock);
1689
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001690 if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001691 r = -EIO;
1692 goto out_unlock;
1693 }
1694
1695 /* If the block is already remapped - use that, else remap it */
Jon Brassow3510cb92009-12-10 23:52:11 +00001696 e = dm_lookup_exception(&s->complete, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001697 if (e) {
Milan Brozd74f81f2008-02-08 02:11:27 +00001698 remap_exception(s, e, bio, chunk);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001699 goto out_unlock;
1700 }
1701
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 /*
1703 * Write to snapshot - higher level takes care of RW/RO
1704 * flags so we should only get this if we are
1705 * writeable.
1706 */
1707 if (bio_rw(bio) == WRITE) {
Mikulas Patocka29138082009-04-02 19:55:25 +01001708 pe = __lookup_pending_exception(s, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001709 if (!pe) {
Mikulas Patockac6621392009-04-02 19:55:25 +01001710 up_write(&s->lock);
1711 pe = alloc_pending_exception(s);
1712 down_write(&s->lock);
1713
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001714 if (!s->valid || s->snapshot_overflowed) {
Mikulas Patockac6621392009-04-02 19:55:25 +01001715 free_pending_exception(pe);
1716 r = -EIO;
1717 goto out_unlock;
1718 }
1719
Jon Brassow3510cb92009-12-10 23:52:11 +00001720 e = dm_lookup_exception(&s->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01001721 if (e) {
1722 free_pending_exception(pe);
1723 remap_exception(s, e, bio, chunk);
1724 goto out_unlock;
1725 }
1726
Mikulas Patockac6621392009-04-02 19:55:25 +01001727 pe = __find_pending_exception(s, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01001728 if (!pe) {
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04001729 if (s->store->userspace_supports_overflow) {
1730 s->snapshot_overflowed = 1;
1731 DMERR("Snapshot overflowed: Unable to allocate exception.");
1732 } else
1733 __invalidate_snapshot(s, -ENOMEM);
Mikulas Patocka29138082009-04-02 19:55:25 +01001734 r = -EIO;
1735 goto out_unlock;
1736 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001737 }
1738
Milan Brozd74f81f2008-02-08 02:11:27 +00001739 remap_exception(s, &pe->e, bio, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001740
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001741 r = DM_MAPIO_SUBMITTED;
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001742
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001743 if (!pe->started &&
Kent Overstreet4f024f32013-10-11 15:44:27 -07001744 bio->bi_iter.bi_size ==
1745 (s->store->chunk_size << SECTOR_SHIFT)) {
Mikulas Patockaa6e50b42011-08-02 12:32:04 +01001746 pe->started = 1;
1747 up_write(&s->lock);
1748 start_full_bio(pe, bio);
1749 goto out;
1750 }
1751
1752 bio_list_add(&pe->snapshot_bios, bio);
1753
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001754 if (!pe->started) {
1755 /* this is protected by snap->lock */
1756 pe->started = 1;
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001757 up_write(&s->lock);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08001758 start_copy(pe);
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001759 goto out;
1760 }
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001761 } else {
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001762 bio->bi_bdev = s->origin->bdev;
Mikulas Patockaee180262012-12-21 20:23:41 +00001763 track_chunk(s, bio, chunk);
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001764 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001766out_unlock:
Alasdair G Kergonba40a2a2006-10-03 01:15:28 -07001767 up_write(&s->lock);
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01001768out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 return r;
1770}
1771
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001772/*
1773 * A snapshot-merge target behaves like a combination of a snapshot
1774 * target and a snapshot-origin target. It only generates new
1775 * exceptions in other snapshots and not in the one that is being
1776 * merged.
1777 *
1778 * For each chunk, if there is an existing exception, it is used to
1779 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1780 * which in turn might generate exceptions in other snapshots.
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001781 * If merging is currently taking place on the chunk in question, the
1782 * I/O is deferred by adding it to s->bios_queued_during_merge.
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001783 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001784static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001785{
1786 struct dm_exception *e;
1787 struct dm_snapshot *s = ti->private;
1788 int r = DM_MAPIO_REMAPPED;
1789 chunk_t chunk;
1790
Mikulas Patockaee180262012-12-21 20:23:41 +00001791 init_tracked_chunk(bio);
1792
Tejun Heod87f4c12010-09-03 11:56:19 +02001793 if (bio->bi_rw & REQ_FLUSH) {
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001794 if (!dm_bio_get_target_bio_nr(bio))
Mike Snitzer10b81062009-12-10 23:52:31 +00001795 bio->bi_bdev = s->origin->bdev;
1796 else
1797 bio->bi_bdev = s->cow->bdev;
Mike Snitzer10b81062009-12-10 23:52:31 +00001798 return DM_MAPIO_REMAPPED;
1799 }
1800
Kent Overstreet4f024f32013-10-11 15:44:27 -07001801 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001802
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001803 down_write(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001804
Mikulas Patockad2fdb772009-12-10 23:52:36 +00001805 /* Full merging snapshots are redirected to the origin */
1806 if (!s->valid)
1807 goto redirect_to_origin;
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001808
1809 /* If the block is already remapped - use that */
1810 e = dm_lookup_exception(&s->complete, chunk);
1811 if (e) {
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001812 /* Queue writes overlapping with chunks being merged */
1813 if (bio_rw(bio) == WRITE &&
1814 chunk >= s->first_merging_chunk &&
1815 chunk < (s->first_merging_chunk +
1816 s->num_merging_chunks)) {
1817 bio->bi_bdev = s->origin->bdev;
1818 bio_list_add(&s->bios_queued_during_merge, bio);
1819 r = DM_MAPIO_SUBMITTED;
1820 goto out_unlock;
1821 }
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001822
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001823 remap_exception(s, e, bio, chunk);
Mikulas Patocka17aa0332009-12-10 23:52:33 +00001824
1825 if (bio_rw(bio) == WRITE)
Mikulas Patockaee180262012-12-21 20:23:41 +00001826 track_chunk(s, bio, chunk);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001827 goto out_unlock;
1828 }
1829
Mikulas Patockad2fdb772009-12-10 23:52:36 +00001830redirect_to_origin:
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001831 bio->bi_bdev = s->origin->bdev;
1832
1833 if (bio_rw(bio) == WRITE) {
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001834 up_write(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001835 return do_origin(s->origin, bio);
1836 }
1837
1838out_unlock:
Mikulas Patocka9fe862542009-12-10 23:52:33 +00001839 up_write(&s->lock);
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00001840
1841 return r;
1842}
1843
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001844static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001845{
1846 struct dm_snapshot *s = ti->private;
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001847
Mikulas Patockaee180262012-12-21 20:23:41 +00001848 if (is_bio_tracked(bio))
1849 stop_tracking_chunk(s, bio);
Mikulas Patockacd45daf2008-07-21 12:00:32 +01001850
1851 return 0;
1852}
1853
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001854static void snapshot_merge_presuspend(struct dm_target *ti)
1855{
1856 struct dm_snapshot *s = ti->private;
1857
1858 stop_merge(s);
1859}
1860
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001861static int snapshot_preresume(struct dm_target *ti)
1862{
1863 int r = 0;
1864 struct dm_snapshot *s = ti->private;
1865 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1866
1867 down_read(&_origins_lock);
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001868 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001869 if (snap_src && snap_dest) {
1870 down_read(&snap_src->lock);
1871 if (s == snap_src) {
1872 DMERR("Unable to resume snapshot source until "
1873 "handover completes.");
1874 r = -EINVAL;
Mike Snitzerb83b2f22011-01-13 19:59:59 +00001875 } else if (!dm_suspended(snap_src->ti)) {
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001876 DMERR("Unable to perform snapshot handover until "
1877 "source is suspended.");
1878 r = -EINVAL;
1879 }
1880 up_read(&snap_src->lock);
1881 }
1882 up_read(&_origins_lock);
1883
1884 return r;
1885}
1886
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887static void snapshot_resume(struct dm_target *ti)
1888{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001889 struct dm_snapshot *s = ti->private;
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001890 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
Mikulas Patockab735fed2015-02-26 11:40:35 -05001891 struct dm_origin *o;
1892 struct mapped_device *origin_md = NULL;
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001893 bool must_restart_merging = false;
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001894
1895 down_read(&_origins_lock);
Mikulas Patockab735fed2015-02-26 11:40:35 -05001896
1897 o = __lookup_dm_origin(s->origin->bdev);
1898 if (o)
1899 origin_md = dm_table_get_md(o->ti->table);
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001900 if (!origin_md) {
1901 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1902 if (snap_merging)
1903 origin_md = dm_table_get_md(snap_merging->ti->table);
1904 }
Mikulas Patockab735fed2015-02-26 11:40:35 -05001905 if (origin_md == dm_table_get_md(ti->table))
1906 origin_md = NULL;
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001907 if (origin_md) {
1908 if (dm_hold(origin_md))
1909 origin_md = NULL;
1910 }
Mikulas Patockab735fed2015-02-26 11:40:35 -05001911
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001912 up_read(&_origins_lock);
1913
1914 if (origin_md) {
Mikulas Patockab735fed2015-02-26 11:40:35 -05001915 dm_internal_suspend_fast(origin_md);
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001916 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
1917 must_restart_merging = true;
1918 stop_merge(snap_merging);
1919 }
1920 }
1921
1922 down_read(&_origins_lock);
Mikulas Patockab735fed2015-02-26 11:40:35 -05001923
Mikulas Patocka9d3b15c2009-12-10 23:52:32 +00001924 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001925 if (snap_src && snap_dest) {
1926 down_write(&snap_src->lock);
1927 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1928 __handover_exceptions(snap_src, snap_dest);
1929 up_write(&snap_dest->lock);
1930 up_write(&snap_src->lock);
1931 }
Mikulas Patockab735fed2015-02-26 11:40:35 -05001932
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001933 up_read(&_origins_lock);
1934
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05001935 if (origin_md) {
1936 if (must_restart_merging)
1937 start_merge(snap_merging);
1938 dm_internal_resume_fast(origin_md);
1939 dm_put(origin_md);
1940 }
1941
Mike Snitzerc1f0c182009-12-10 23:52:24 +00001942 /* Now we have correct chunk size, reregister */
1943 reregister_snapshot(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08001945 down_write(&s->lock);
1946 s->active = 1;
1947 up_write(&s->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948}
1949
Mike Snitzer542f9032012-07-27 15:08:00 +01001950static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001951{
Mike Snitzer542f9032012-07-27 15:08:00 +01001952 uint32_t min_chunksize;
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001953
1954 down_read(&_origins_lock);
1955 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1956 up_read(&_origins_lock);
1957
1958 return min_chunksize;
1959}
1960
1961static void snapshot_merge_resume(struct dm_target *ti)
1962{
1963 struct dm_snapshot *s = ti->private;
1964
1965 /*
1966 * Handover exceptions from existing snapshot.
1967 */
1968 snapshot_resume(ti);
1969
1970 /*
Mike Snitzer542f9032012-07-27 15:08:00 +01001971 * snapshot-merge acts as an origin, so set ti->max_io_len
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001972 */
Mike Snitzer542f9032012-07-27 15:08:00 +01001973 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
Mikulas Patocka1e03f972009-12-10 23:52:32 +00001974
1975 start_merge(s);
1976}
1977
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001978static void snapshot_status(struct dm_target *ti, status_type_t type,
1979 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980{
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01001981 unsigned sz = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001982 struct dm_snapshot *snap = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
1984 switch (type) {
1985 case STATUSTYPE_INFO:
Mikulas Patocka94e765722009-12-10 23:51:53 +00001986
1987 down_write(&snap->lock);
1988
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 if (!snap->valid)
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01001990 DMEMIT("Invalid");
Mike Snitzerd8ddb1c2009-12-10 23:52:35 +00001991 else if (snap->merge_failed)
1992 DMEMIT("Merge failed");
Mikulas Patocka76c44f62015-06-21 16:31:33 -04001993 else if (snap->snapshot_overflowed)
1994 DMEMIT("Overflow");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 else {
Mike Snitzer985903b2009-12-10 23:52:11 +00001996 if (snap->store->type->usage) {
1997 sector_t total_sectors, sectors_allocated,
1998 metadata_sectors;
1999 snap->store->type->usage(snap->store,
2000 &total_sectors,
2001 &sectors_allocated,
2002 &metadata_sectors);
2003 DMEMIT("%llu/%llu %llu",
2004 (unsigned long long)sectors_allocated,
2005 (unsigned long long)total_sectors,
2006 (unsigned long long)metadata_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 }
2008 else
Jonathan Brassow2e4a31d2009-04-02 19:55:34 +01002009 DMEMIT("Unknown");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 }
Mikulas Patocka94e765722009-12-10 23:51:53 +00002011
2012 up_write(&snap->lock);
2013
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 break;
2015
2016 case STATUSTYPE_TABLE:
2017 /*
2018 * kdevname returns a static pointer so we need
2019 * to make private copies if the output is to
2020 * make sense.
2021 */
Mike Snitzerfc56f6f2009-12-10 23:52:12 +00002022 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
Jonathan Brassow1e302a92009-04-02 19:55:35 +01002023 snap->store->type->status(snap->store, type, result + sz,
2024 maxlen - sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 break;
2026 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027}
2028
Mike Snitzer8811f462009-09-04 20:40:19 +01002029static int snapshot_iterate_devices(struct dm_target *ti,
2030 iterate_devices_callout_fn fn, void *data)
2031{
2032 struct dm_snapshot *snap = ti->private;
Mikulas Patocka1e5554c2010-08-12 04:13:50 +01002033 int r;
Mike Snitzer8811f462009-09-04 20:40:19 +01002034
Mikulas Patocka1e5554c2010-08-12 04:13:50 +01002035 r = fn(ti, snap->origin, 0, ti->len, data);
2036
2037 if (!r)
2038 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
2039
2040 return r;
Mike Snitzer8811f462009-09-04 20:40:19 +01002041}
2042
2043
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044/*-----------------------------------------------------------------
2045 * Origin methods
2046 *---------------------------------------------------------------*/
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002047
2048/*
2049 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2050 * supplied bio was ignored. The caller may submit it immediately.
2051 * (No remapping actually occurs as the origin is always a direct linear
2052 * map.)
2053 *
2054 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2055 * and any supplied bio is added to a list to be submitted once all
2056 * the necessary exceptions exist.
2057 */
2058static int __origin_write(struct list_head *snapshots, sector_t sector,
2059 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060{
Mikulas Patocka515ad662009-12-10 23:52:30 +00002061 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 struct dm_snapshot *snap;
Jon Brassow1d4989c2009-12-10 23:52:10 +00002063 struct dm_exception *e;
Mikulas Patocka515ad662009-12-10 23:52:30 +00002064 struct dm_snap_pending_exception *pe;
2065 struct dm_snap_pending_exception *pe_to_start_now = NULL;
2066 struct dm_snap_pending_exception *pe_to_start_last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 chunk_t chunk;
2068
2069 /* Do all the snapshots on this origin */
2070 list_for_each_entry (snap, snapshots, list) {
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00002071 /*
2072 * Don't make new exceptions in a merging snapshot
2073 * because it has effectively been deleted
2074 */
2075 if (dm_target_is_snapshot_merge(snap->ti))
2076 continue;
2077
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002078 down_write(&snap->lock);
2079
Alasdair G Kergonaa14ede2006-02-01 03:04:50 -08002080 /* Only deal with valid and active snapshots */
2081 if (!snap->valid || !snap->active)
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002082 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
Alasdair G Kergond5e404c2005-07-12 15:53:05 -07002084 /* Nothing to do if writing beyond end of snapshot */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002085 if (sector >= dm_table_get_size(snap->ti->table))
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002086 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
2088 /*
2089 * Remember, different snapshots can have
2090 * different chunk sizes.
2091 */
Mikulas Patocka9eaae8f2009-12-10 23:52:28 +00002092 chunk = sector_to_chunk(snap->store, sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
2094 /*
2095 * Check exception table to see if block
2096 * is already remapped in this snapshot
2097 * and trigger an exception if not.
2098 */
Jon Brassow3510cb92009-12-10 23:52:11 +00002099 e = dm_lookup_exception(&snap->complete, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002100 if (e)
2101 goto next_snapshot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
Mikulas Patocka29138082009-04-02 19:55:25 +01002103 pe = __lookup_pending_exception(snap, chunk);
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002104 if (!pe) {
Mikulas Patockac6621392009-04-02 19:55:25 +01002105 up_write(&snap->lock);
2106 pe = alloc_pending_exception(snap);
2107 down_write(&snap->lock);
2108
2109 if (!snap->valid) {
2110 free_pending_exception(pe);
2111 goto next_snapshot;
2112 }
2113
Jon Brassow3510cb92009-12-10 23:52:11 +00002114 e = dm_lookup_exception(&snap->complete, chunk);
Mikulas Patocka35bf6592009-04-02 19:55:26 +01002115 if (e) {
2116 free_pending_exception(pe);
2117 goto next_snapshot;
2118 }
2119
Mikulas Patockac6621392009-04-02 19:55:25 +01002120 pe = __find_pending_exception(snap, pe, chunk);
Mikulas Patocka29138082009-04-02 19:55:25 +01002121 if (!pe) {
2122 __invalidate_snapshot(snap, -ENOMEM);
2123 goto next_snapshot;
2124 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 }
2126
Mikulas Patocka515ad662009-12-10 23:52:30 +00002127 r = DM_MAPIO_SUBMITTED;
2128
2129 /*
2130 * If an origin bio was supplied, queue it to wait for the
2131 * completion of this exception, and start this one last,
2132 * at the end of the function.
2133 */
2134 if (bio) {
2135 bio_list_add(&pe->origin_bios, bio);
2136 bio = NULL;
2137
2138 if (!pe->started) {
2139 pe->started = 1;
2140 pe_to_start_last = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002141 }
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002142 }
2143
2144 if (!pe->started) {
2145 pe->started = 1;
Mikulas Patocka515ad662009-12-10 23:52:30 +00002146 pe_to_start_now = pe;
Alasdair G Kergon76df1c62006-03-27 01:17:45 -08002147 }
2148
Jonathan Brassowa2d2b032011-08-02 12:32:03 +01002149next_snapshot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 up_write(&snap->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
Mikulas Patocka515ad662009-12-10 23:52:30 +00002152 if (pe_to_start_now) {
2153 start_copy(pe_to_start_now);
2154 pe_to_start_now = NULL;
2155 }
Alasdair G Kergonb4b610f2006-03-27 01:17:44 -08002156 }
2157
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 /*
Mikulas Patocka515ad662009-12-10 23:52:30 +00002159 * Submit the exception against which the bio is queued last,
2160 * to give the other exceptions a head start.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 */
Mikulas Patocka515ad662009-12-10 23:52:30 +00002162 if (pe_to_start_last)
2163 start_copy(pe_to_start_last);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
2165 return r;
2166}
2167
2168/*
2169 * Called on a write from the origin driver.
2170 */
2171static int do_origin(struct dm_dev *origin, struct bio *bio)
2172{
2173 struct origin *o;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08002174 int r = DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
2176 down_read(&_origins_lock);
2177 o = __lookup_origin(origin->bdev);
2178 if (o)
Kent Overstreet4f024f32013-10-11 15:44:27 -07002179 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 up_read(&_origins_lock);
2181
2182 return r;
2183}
2184
2185/*
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002186 * Trigger exceptions in all non-merging snapshots.
2187 *
2188 * The chunk size of the merging snapshot may be larger than the chunk
2189 * size of some other snapshot so we may need to reallocate multiple
2190 * chunks in other snapshots.
2191 *
2192 * We scan all the overlapping exceptions in the other snapshots.
2193 * Returns 1 if anything was reallocated and must be waited for,
2194 * otherwise returns 0.
2195 *
2196 * size must be a multiple of merging_snap's chunk_size.
2197 */
2198static int origin_write_extent(struct dm_snapshot *merging_snap,
2199 sector_t sector, unsigned size)
2200{
2201 int must_wait = 0;
2202 sector_t n;
2203 struct origin *o;
2204
2205 /*
Mike Snitzer542f9032012-07-27 15:08:00 +01002206 * The origin's __minimum_chunk_size() got stored in max_io_len
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002207 * by snapshot_merge_resume().
2208 */
2209 down_read(&_origins_lock);
2210 o = __lookup_origin(merging_snap->origin->bdev);
Mike Snitzer542f9032012-07-27 15:08:00 +01002211 for (n = 0; n < size; n += merging_snap->ti->max_io_len)
Mikulas Patocka73dfd072009-12-10 23:52:34 +00002212 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2213 DM_MAPIO_SUBMITTED)
2214 must_wait = 1;
2215 up_read(&_origins_lock);
2216
2217 return must_wait;
2218}
2219
2220/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 * Origin: maps a linear range of a device, with hooks for snapshotting.
2222 */
2223
2224/*
2225 * Construct an origin mapping: <dev_path>
2226 * The context for an origin is merely a 'struct dm_dev *'
2227 * pointing to the real device.
2228 */
2229static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2230{
2231 int r;
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002232 struct dm_origin *o;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
2234 if (argc != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002235 ti->error = "origin: incorrect number of arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 return -EINVAL;
2237 }
2238
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002239 o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2240 if (!o) {
2241 ti->error = "Cannot allocate private origin structure";
2242 r = -ENOMEM;
2243 goto bad_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 }
2245
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002246 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2247 if (r) {
2248 ti->error = "Cannot get target device";
2249 goto bad_open;
2250 }
2251
Mikulas Patockab735fed2015-02-26 11:40:35 -05002252 o->ti = ti;
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002253 ti->private = o;
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00002254 ti->num_flush_bios = 1;
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01002255
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 return 0;
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002257
2258bad_open:
2259 kfree(o);
2260bad_alloc:
2261 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262}
2263
2264static void origin_dtr(struct dm_target *ti)
2265{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002266 struct dm_origin *o = ti->private;
Mikulas Patockab735fed2015-02-26 11:40:35 -05002267
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002268 dm_put_device(ti, o->dev);
2269 kfree(o);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270}
2271
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00002272static int origin_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002274 struct dm_origin *o = ti->private;
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002275 unsigned available_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002277 bio->bi_bdev = o->dev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002279 if (unlikely(bio->bi_rw & REQ_FLUSH))
Mikulas Patocka494b3ee2009-06-22 10:12:25 +01002280 return DM_MAPIO_REMAPPED;
2281
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002282 if (bio_rw(bio) != WRITE)
2283 return DM_MAPIO_REMAPPED;
2284
2285 available_sectors = o->split_boundary -
2286 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2287
2288 if (bio_sectors(bio) > available_sectors)
2289 dm_accept_partial_bio(bio, available_sectors);
2290
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 /* Only tell snapshots if this is a write */
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002292 return do_origin(o->dev, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293}
2294
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295/*
Mike Snitzer542f9032012-07-27 15:08:00 +01002296 * Set the target "max_io_len" field to the minimum of all the snapshots'
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 * chunk sizes.
2298 */
2299static void origin_resume(struct dm_target *ti)
2300{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002301 struct dm_origin *o = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Mikulas Patocka298eaa82014-03-14 18:43:07 -04002303 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
Mikulas Patockab735fed2015-02-26 11:40:35 -05002304
2305 down_write(&_origins_lock);
2306 __insert_dm_origin(o);
2307 up_write(&_origins_lock);
2308}
2309
2310static void origin_postsuspend(struct dm_target *ti)
2311{
2312 struct dm_origin *o = ti->private;
2313
2314 down_write(&_origins_lock);
2315 __remove_dm_origin(o);
2316 up_write(&_origins_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317}
2318
Mikulas Patockafd7c0922013-03-01 22:45:44 +00002319static void origin_status(struct dm_target *ti, status_type_t type,
2320 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002322 struct dm_origin *o = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
2324 switch (type) {
2325 case STATUSTYPE_INFO:
2326 result[0] = '\0';
2327 break;
2328
2329 case STATUSTYPE_TABLE:
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002330 snprintf(result, maxlen, "%s", o->dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 break;
2332 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333}
2334
Mike Snitzer8811f462009-09-04 20:40:19 +01002335static int origin_iterate_devices(struct dm_target *ti,
2336 iterate_devices_callout_fn fn, void *data)
2337{
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002338 struct dm_origin *o = ti->private;
Mike Snitzer8811f462009-09-04 20:40:19 +01002339
Mikulas Patocka599cdf32014-03-14 18:42:12 -04002340 return fn(ti, o->dev, 0, ti->len, data);
Mike Snitzer8811f462009-09-04 20:40:19 +01002341}
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343static struct target_type origin_target = {
2344 .name = "snapshot-origin",
Mikulas Patockab735fed2015-02-26 11:40:35 -05002345 .version = {1, 9, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 .module = THIS_MODULE,
2347 .ctr = origin_ctr,
2348 .dtr = origin_dtr,
2349 .map = origin_map,
2350 .resume = origin_resume,
Mikulas Patockab735fed2015-02-26 11:40:35 -05002351 .postsuspend = origin_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 .status = origin_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002353 .iterate_devices = origin_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354};
2355
2356static struct target_type snapshot_target = {
2357 .name = "snapshot",
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04002358 .version = {1, 15, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 .module = THIS_MODULE,
2360 .ctr = snapshot_ctr,
2361 .dtr = snapshot_dtr,
2362 .map = snapshot_map,
Mikulas Patockacd45daf2008-07-21 12:00:32 +01002363 .end_io = snapshot_end_io,
Mike Snitzerc1f0c182009-12-10 23:52:24 +00002364 .preresume = snapshot_preresume,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 .resume = snapshot_resume,
2366 .status = snapshot_status,
Mike Snitzer8811f462009-09-04 20:40:19 +01002367 .iterate_devices = snapshot_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368};
2369
Mikulas Patockad698aa42009-12-10 23:52:30 +00002370static struct target_type merge_target = {
2371 .name = dm_snapshot_merge_target_name,
Mike Snitzerb0d3cc02015-10-08 18:05:41 -04002372 .version = {1, 4, 0},
Mikulas Patockad698aa42009-12-10 23:52:30 +00002373 .module = THIS_MODULE,
2374 .ctr = snapshot_ctr,
2375 .dtr = snapshot_dtr,
Mikulas Patocka3452c2a2009-12-10 23:52:31 +00002376 .map = snapshot_merge_map,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002377 .end_io = snapshot_end_io,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002378 .presuspend = snapshot_merge_presuspend,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002379 .preresume = snapshot_preresume,
Mikulas Patocka1e03f972009-12-10 23:52:32 +00002380 .resume = snapshot_merge_resume,
Mikulas Patockad698aa42009-12-10 23:52:30 +00002381 .status = snapshot_status,
2382 .iterate_devices = snapshot_iterate_devices,
2383};
2384
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385static int __init dm_snapshot_init(void)
2386{
2387 int r;
2388
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002389 r = dm_exception_store_init();
2390 if (r) {
2391 DMERR("Failed to initialize exception stores");
2392 return r;
2393 }
2394
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 r = dm_register_target(&snapshot_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002396 if (r < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 DMERR("snapshot target register failed %d", r);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002398 goto bad_register_snapshot_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 }
2400
2401 r = dm_register_target(&origin_target);
2402 if (r < 0) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002403 DMERR("Origin target register failed %d", r);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002404 goto bad_register_origin_target;
2405 }
2406
2407 r = dm_register_target(&merge_target);
2408 if (r < 0) {
2409 DMERR("Merge target register failed %d", r);
2410 goto bad_register_merge_target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 }
2412
2413 r = init_origin_hash();
2414 if (r) {
2415 DMERR("init_origin_hash failed.");
Mikulas Patockad698aa42009-12-10 23:52:30 +00002416 goto bad_origin_hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 }
2418
Jon Brassow1d4989c2009-12-10 23:52:10 +00002419 exception_cache = KMEM_CACHE(dm_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 if (!exception_cache) {
2421 DMERR("Couldn't create exception cache.");
2422 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002423 goto bad_exception_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 }
2425
Alasdair G Kergon028867a2007-07-12 17:26:32 +01002426 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 if (!pending_cache) {
2428 DMERR("Couldn't create pending cache.");
2429 r = -ENOMEM;
Mikulas Patockad698aa42009-12-10 23:52:30 +00002430 goto bad_pending_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 }
2432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 return 0;
2434
Mikulas Patockad698aa42009-12-10 23:52:30 +00002435bad_pending_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 kmem_cache_destroy(exception_cache);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002437bad_exception_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 exit_origin_hash();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002439bad_origin_hash:
2440 dm_unregister_target(&merge_target);
2441bad_register_merge_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002443bad_register_origin_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 dm_unregister_target(&snapshot_target);
Jonathan Brassow034a1862009-10-16 23:18:14 +01002445bad_register_snapshot_target:
2446 dm_exception_store_exit();
Mikulas Patockad698aa42009-12-10 23:52:30 +00002447
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 return r;
2449}
2450
2451static void __exit dm_snapshot_exit(void)
2452{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00002453 dm_unregister_target(&snapshot_target);
2454 dm_unregister_target(&origin_target);
Mikulas Patockad698aa42009-12-10 23:52:30 +00002455 dm_unregister_target(&merge_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
2457 exit_origin_hash();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 kmem_cache_destroy(pending_cache);
2459 kmem_cache_destroy(exception_cache);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00002460
2461 dm_exception_store_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462}
2463
2464/* Module hooks */
2465module_init(dm_snapshot_init);
2466module_exit(dm_snapshot_exit);
2467
2468MODULE_DESCRIPTION(DM_NAME " snapshot target");
2469MODULE_AUTHOR("Joe Thornber");
2470MODULE_LICENSE("GPL");
Mikulas Patocka23cb2102013-03-01 22:45:47 +00002471MODULE_ALIAS("dm-snapshot-origin");
2472MODULE_ALIAS("dm-snapshot-merge");