blob: db9e46114653119abb57eb61fb72b0622c816329 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Mike Snitzer4cc96132016-05-12 16:28:10 -04008#include "dm-core.h"
9#include "dm-rq.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +010010#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <linux/init.h>
13#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080014#include <linux/mutex.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010015#include <linux/sched/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/blkpg.h>
17#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/mempool.h>
Dan Williamsf26c5712017-04-12 12:35:44 -070019#include <linux/dax.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/slab.h>
21#include <linux/idr.h>
Dan Williams7e026c82017-05-29 12:57:56 -070022#include <linux/uio.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080023#include <linux/hdreg.h>
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +010024#include <linux/delay.h>
Mike Snitzerffcc3932014-10-28 18:34:52 -040025#include <linux/wait.h>
Christoph Hellwig71cdb692015-10-15 14:10:51 +020026#include <linux/pr.h>
Elena Reshetovab0b4d7c2017-10-20 10:37:39 +030027#include <linux/refcount.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +010028#include <linux/part_stat.h>
Li Zefan55782132009-06-09 13:43:05 +080029
Alasdair G Kergon72d94862006-06-26 00:27:35 -070030#define DM_MSG_PREFIX "core"
31
Milan Broz60935eb2009-06-22 10:12:30 +010032/*
33 * Cookies are numeric values sent with CHANGE and REMOVE
34 * uevents while resuming, removing or renaming the device.
35 */
36#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
37#define DM_COOKIE_LENGTH 24
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039static const char *_name = DM_NAME;
40
41static unsigned int major = 0;
42static unsigned int _major = 0;
43
Alasdair G Kergond15b7742011-08-02 12:32:01 +010044static DEFINE_IDR(_minor_idr);
45
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070046static DEFINE_SPINLOCK(_minor_lock);
Mikulas Patocka2c140a22013-11-01 18:27:41 -040047
48static void do_deferred_remove(struct work_struct *w);
49
50static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
51
Mikulas Patockaacfe0ad2014-06-14 13:44:31 -040052static struct workqueue_struct *deferred_remove_workqueue;
53
Mikulas Patocka93e64422017-01-16 16:05:59 -050054atomic_t dm_global_event_nr = ATOMIC_INIT(0);
55DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
56
Mikulas Patocka62e08242017-09-20 07:29:49 -040057void dm_issue_global_event(void)
58{
59 atomic_inc(&dm_global_event_nr);
60 wake_up(&dm_global_eventq);
61}
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/*
Mike Snitzer64f52b02017-12-11 23:17:47 -050064 * One of these is allocated (on-stack) per original bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 */
Mike Snitzer64f52b02017-12-11 23:17:47 -050066struct clone_info {
Mike Snitzer64f52b02017-12-11 23:17:47 -050067 struct dm_table *map;
68 struct bio *bio;
69 struct dm_io *io;
70 sector_t sector;
71 unsigned sector_count;
72};
73
74/*
75 * One of these is allocated per clone bio.
76 */
77#define DM_TIO_MAGIC 7282014
78struct dm_target_io {
79 unsigned magic;
80 struct dm_io *io;
81 struct dm_target *ti;
82 unsigned target_bio_nr;
83 unsigned *len_ptr;
84 bool inside_dm_io;
85 struct bio clone;
86};
87
88/*
89 * One of these is allocated per original bio.
90 * It contains the first clone used for that original.
91 */
92#define DM_IO_MAGIC 5191977
Linus Torvalds1da177e2005-04-16 15:20:36 -070093struct dm_io {
Mike Snitzer64f52b02017-12-11 23:17:47 -050094 unsigned magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 struct mapped_device *md;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020096 blk_status_t status;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 atomic_t io_count;
Mike Snitzer745dc572017-12-11 20:51:50 -050098 struct bio *orig_bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080099 unsigned long start_time;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +0100100 spinlock_t endio_lock;
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400101 struct dm_stats_aux stats_aux;
Mike Snitzer64f52b02017-12-11 23:17:47 -0500102 /* last member of dm_target_io is 'struct bio' */
103 struct dm_target_io tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104};
105
Mike Snitzer64f52b02017-12-11 23:17:47 -0500106void *dm_per_bio_data(struct bio *bio, size_t data_size)
107{
108 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
109 if (!tio->inside_dm_io)
110 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
111 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
112}
113EXPORT_SYMBOL_GPL(dm_per_bio_data);
114
115struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
116{
117 struct dm_io *io = (struct dm_io *)((char *)data + data_size);
118 if (io->magic == DM_IO_MAGIC)
119 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
120 BUG_ON(io->magic != DM_TIO_MAGIC);
121 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
122}
123EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
124
125unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
126{
127 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
128}
129EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
130
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -0700131#define MINOR_ALLOCED ((void *)-1)
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133/*
134 * Bits for the md->flags field.
135 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100136#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -0800138#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700139#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700140#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800141#define DMF_NOFLUSH_SUSPENDING 5
Kent Overstreet8ae12662015-04-27 23:48:34 -0700142#define DMF_DEFERRED_REMOVE 6
143#define DMF_SUSPENDED_INTERNALLY 7
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Mike Snitzer115485e2016-02-22 12:16:21 -0500145#define DM_NUMA_NODE NUMA_NO_NODE
Mike Snitzer115485e2016-02-22 12:16:21 -0500146static int dm_numa_node = DM_NUMA_NODE;
Mike Snitzerfaad87d2016-01-28 16:52:56 -0500147
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +0100148/*
149 * For mempools pre-allocation at the table loading time.
150 */
151struct dm_md_mempools {
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400152 struct bio_set bs;
153 struct bio_set io_bs;
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +0100154};
155
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500156struct table_device {
157 struct list_head list;
Elena Reshetovab0b4d7c2017-10-20 10:37:39 +0300158 refcount_t count;
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500159 struct dm_dev dm_dev;
160};
161
Mike Snitzerf4790822013-09-12 18:06:12 -0400162/*
Mike Snitzere8603132013-09-12 18:06:12 -0400163 * Bio-based DM's mempools' reserved IOs set by the user.
164 */
Mike Snitzer4cc96132016-05-12 16:28:10 -0400165#define RESERVED_BIO_BASED_IOS 16
Mike Snitzere8603132013-09-12 18:06:12 -0400166static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
167
Mike Snitzer115485e2016-02-22 12:16:21 -0500168static int __dm_get_module_param_int(int *module_param, int min, int max)
169{
Mark Rutland6aa7de02017-10-23 14:07:29 -0700170 int param = READ_ONCE(*module_param);
Mike Snitzer115485e2016-02-22 12:16:21 -0500171 int modified_param = 0;
172 bool modified = true;
173
174 if (param < min)
175 modified_param = min;
176 else if (param > max)
177 modified_param = max;
178 else
179 modified = false;
180
181 if (modified) {
182 (void)cmpxchg(module_param, param, modified_param);
183 param = modified_param;
184 }
185
186 return param;
187}
188
Mike Snitzer4cc96132016-05-12 16:28:10 -0400189unsigned __dm_get_module_param(unsigned *module_param,
190 unsigned def, unsigned max)
Mike Snitzerf4790822013-09-12 18:06:12 -0400191{
Mark Rutland6aa7de02017-10-23 14:07:29 -0700192 unsigned param = READ_ONCE(*module_param);
Mike Snitzer09c2d532015-02-27 22:25:26 -0500193 unsigned modified_param = 0;
Mike Snitzerf4790822013-09-12 18:06:12 -0400194
Mike Snitzer09c2d532015-02-27 22:25:26 -0500195 if (!param)
196 modified_param = def;
197 else if (param > max)
198 modified_param = max;
Mike Snitzerf4790822013-09-12 18:06:12 -0400199
Mike Snitzer09c2d532015-02-27 22:25:26 -0500200 if (modified_param) {
201 (void)cmpxchg(module_param, param, modified_param);
202 param = modified_param;
Mike Snitzerf4790822013-09-12 18:06:12 -0400203 }
204
Mike Snitzer09c2d532015-02-27 22:25:26 -0500205 return param;
Mike Snitzerf4790822013-09-12 18:06:12 -0400206}
207
Mike Snitzere8603132013-09-12 18:06:12 -0400208unsigned dm_get_reserved_bio_based_ios(void)
209{
Mike Snitzer09c2d532015-02-27 22:25:26 -0500210 return __dm_get_module_param(&reserved_bio_based_ios,
Mike Snitzer4cc96132016-05-12 16:28:10 -0400211 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
Mike Snitzere8603132013-09-12 18:06:12 -0400212}
213EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
214
Mike Snitzer115485e2016-02-22 12:16:21 -0500215static unsigned dm_get_numa_node(void)
216{
217 return __dm_get_module_param_int(&dm_numa_node,
218 DM_NUMA_NODE, num_online_nodes() - 1);
219}
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221static int __init local_init(void)
222{
Mike Snitzere689fba2019-02-20 15:37:44 -0500223 int r;
Mike Snitzer1ae49ea2014-12-05 17:11:05 -0500224
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100225 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100226 if (r)
Mike Snitzere689fba2019-02-20 15:37:44 -0500227 return r;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100228
Mikulas Patockaacfe0ad2014-06-14 13:44:31 -0400229 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
230 if (!deferred_remove_workqueue) {
231 r = -ENOMEM;
232 goto out_uevent_exit;
233 }
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 _major = major;
236 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100237 if (r < 0)
Mikulas Patockaacfe0ad2014-06-14 13:44:31 -0400238 goto out_free_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 if (!_major)
241 _major = r;
242
243 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100244
Mikulas Patockaacfe0ad2014-06-14 13:44:31 -0400245out_free_workqueue:
246 destroy_workqueue(deferred_remove_workqueue);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100247out_uevent_exit:
248 dm_uevent_exit();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100249
250 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251}
252
253static void local_exit(void)
254{
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400255 flush_scheduled_work();
Mikulas Patockaacfe0ad2014-06-14 13:44:31 -0400256 destroy_workqueue(deferred_remove_workqueue);
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400257
Akinobu Mita00d59402007-07-17 04:03:46 -0700258 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100259 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 _major = 0;
262
263 DMINFO("cleaned up");
264}
265
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000266static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 local_init,
268 dm_target_init,
269 dm_linear_init,
270 dm_stripe_init,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000271 dm_io_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100272 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 dm_interface_init,
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400274 dm_statistics_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275};
276
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000277static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 local_exit,
279 dm_target_exit,
280 dm_linear_exit,
281 dm_stripe_exit,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000282 dm_io_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100283 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 dm_interface_exit,
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400285 dm_statistics_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286};
287
288static int __init dm_init(void)
289{
290 const int count = ARRAY_SIZE(_inits);
291
292 int r, i;
293
294 for (i = 0; i < count; i++) {
295 r = _inits[i]();
296 if (r)
297 goto bad;
298 }
299
300 return 0;
301
302 bad:
303 while (i--)
304 _exits[i]();
305
306 return r;
307}
308
309static void __exit dm_exit(void)
310{
311 int i = ARRAY_SIZE(_exits);
312
313 while (i--)
314 _exits[i]();
Alasdair G Kergond15b7742011-08-02 12:32:01 +0100315
316 /*
317 * Should be empty by this point.
318 */
Alasdair G Kergond15b7742011-08-02 12:32:01 +0100319 idr_destroy(&_minor_idr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
322/*
323 * Block device functions
324 */
Mike Anderson432a2122009-12-10 23:52:20 +0000325int dm_deleting_md(struct mapped_device *md)
326{
327 return test_bit(DMF_DELETING, &md->flags);
328}
329
Al Virofe5f9f22008-03-02 10:29:31 -0500330static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
332 struct mapped_device *md;
333
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700334 spin_lock(&_minor_lock);
335
Al Virofe5f9f22008-03-02 10:29:31 -0500336 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700337 if (!md)
338 goto out;
339
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700340 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +0000341 dm_deleting_md(md)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700342 md = NULL;
343 goto out;
344 }
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700347 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700348out:
349 spin_unlock(&_minor_lock);
350
351 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Al Virodb2a1442013-05-05 21:52:57 -0400354static void dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
Mike Snitzer63a4f062015-03-23 17:01:43 -0400356 struct mapped_device *md;
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200357
Milan Broz4a1aeb92011-01-13 19:59:48 +0000358 spin_lock(&_minor_lock);
359
Mike Snitzer63a4f062015-03-23 17:01:43 -0400360 md = disk->private_data;
361 if (WARN_ON(!md))
362 goto out;
363
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400364 if (atomic_dec_and_test(&md->open_count) &&
365 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
Mikulas Patockaacfe0ad2014-06-14 13:44:31 -0400366 queue_work(deferred_remove_workqueue, &deferred_remove_work);
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 dm_put(md);
Mike Snitzer63a4f062015-03-23 17:01:43 -0400369out:
Milan Broz4a1aeb92011-01-13 19:59:48 +0000370 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371}
372
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700373int dm_open_count(struct mapped_device *md)
374{
375 return atomic_read(&md->open_count);
376}
377
378/*
379 * Guarantees nothing is using the device before it's deleted.
380 */
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400381int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700382{
383 int r = 0;
384
385 spin_lock(&_minor_lock);
386
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400387 if (dm_open_count(md)) {
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700388 r = -EBUSY;
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400389 if (mark_deferred)
390 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
391 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
392 r = -EEXIST;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700393 else
394 set_bit(DMF_DELETING, &md->flags);
395
396 spin_unlock(&_minor_lock);
397
398 return r;
399}
400
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400401int dm_cancel_deferred_remove(struct mapped_device *md)
402{
403 int r = 0;
404
405 spin_lock(&_minor_lock);
406
407 if (test_bit(DMF_DELETING, &md->flags))
408 r = -EBUSY;
409 else
410 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
411
412 spin_unlock(&_minor_lock);
413
414 return r;
415}
416
417static void do_deferred_remove(struct work_struct *w)
418{
419 dm_deferred_remove();
420}
421
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400422sector_t dm_get_size(struct mapped_device *md)
423{
424 return get_capacity(md->disk);
425}
426
Mike Snitzer9974fa22014-02-28 15:33:43 +0100427struct request_queue *dm_get_md_queue(struct mapped_device *md)
428{
429 return md->queue;
430}
431
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400432struct dm_stats *dm_get_stats(struct mapped_device *md)
433{
434 return &md->stats;
435}
436
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800437static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
438{
439 struct mapped_device *md = bdev->bd_disk->private_data;
440
441 return dm_get_geometry(md, geo);
442}
443
Christoph Hellwige76239a2018-10-12 19:08:49 +0900444#ifdef CONFIG_BLK_DEV_ZONED
Christoph Hellwigd4100352019-11-11 11:39:30 +0900445int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
446{
447 struct dm_report_zones_args *args = data;
448 sector_t sector_diff = args->tgt->begin - args->start;
449
450 /*
451 * Ignore zones beyond the target range.
452 */
453 if (zone->start >= args->start + args->tgt->len)
454 return 0;
455
456 /*
457 * Remap the start sector and write pointer position of the zone
458 * to match its position in the target range.
459 */
460 zone->start += sector_diff;
461 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
462 if (zone->cond == BLK_ZONE_COND_FULL)
463 zone->wp = zone->start + zone->len;
464 else if (zone->cond == BLK_ZONE_COND_EMPTY)
465 zone->wp = zone->start;
466 else
467 zone->wp += sector_diff;
468 }
469
470 args->next_sector = zone->start + zone->len;
471 return args->orig_cb(zone, args->zone_idx++, args->orig_data);
472}
473EXPORT_SYMBOL_GPL(dm_report_zones_cb);
474
475static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
476 unsigned int nr_zones, report_zones_cb cb, void *data)
477{
Christoph Hellwige76239a2018-10-12 19:08:49 +0900478 struct mapped_device *md = disk->private_data;
Christoph Hellwige76239a2018-10-12 19:08:49 +0900479 struct dm_table *map;
480 int srcu_idx, ret;
Christoph Hellwigd4100352019-11-11 11:39:30 +0900481 struct dm_report_zones_args args = {
482 .next_sector = sector,
483 .orig_data = data,
484 .orig_cb = cb,
485 };
Christoph Hellwige76239a2018-10-12 19:08:49 +0900486
487 if (dm_suspended_md(md))
488 return -EAGAIN;
489
490 map = dm_get_live_table(md, &srcu_idx);
491 if (!map)
492 return -EIO;
493
Christoph Hellwigd4100352019-11-11 11:39:30 +0900494 do {
495 struct dm_target *tgt;
Christoph Hellwige76239a2018-10-12 19:08:49 +0900496
Christoph Hellwigd4100352019-11-11 11:39:30 +0900497 tgt = dm_table_find_target(map, args.next_sector);
498 if (WARN_ON_ONCE(!tgt->type->report_zones)) {
499 ret = -EIO;
500 goto out;
501 }
Christoph Hellwige76239a2018-10-12 19:08:49 +0900502
Christoph Hellwigd4100352019-11-11 11:39:30 +0900503 args.tgt = tgt;
504 ret = tgt->type->report_zones(tgt, &args, nr_zones);
505 if (ret < 0)
506 goto out;
507 } while (args.zone_idx < nr_zones &&
508 args.next_sector < get_capacity(disk));
Christoph Hellwige76239a2018-10-12 19:08:49 +0900509
Christoph Hellwigd4100352019-11-11 11:39:30 +0900510 ret = args.zone_idx;
Christoph Hellwige76239a2018-10-12 19:08:49 +0900511out:
512 dm_put_live_table(md, srcu_idx);
513 return ret;
Christoph Hellwige76239a2018-10-12 19:08:49 +0900514}
Christoph Hellwigd4100352019-11-11 11:39:30 +0900515#else
516#define dm_blk_report_zones NULL
517#endif /* CONFIG_BLK_DEV_ZONED */
Christoph Hellwige76239a2018-10-12 19:08:49 +0900518
Mike Snitzer971888c2018-04-03 15:05:12 -0400519static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
Mike Snitzer5bd5e8d2018-04-03 16:54:10 -0400520 struct block_device **bdev)
Mike Snitzer971888c2018-04-03 15:05:12 -0400521 __acquires(md->io_barrier)
Milan Brozaa129a22006-10-03 01:15:15 -0700522{
Mike Snitzer66482022016-02-18 15:44:39 -0500523 struct dm_target *tgt;
Hannes Reinecke6c182cd2013-07-10 23:41:15 +0100524 struct dm_table *map;
Mike Snitzer971888c2018-04-03 15:05:12 -0400525 int r;
Milan Brozaa129a22006-10-03 01:15:15 -0700526
Hannes Reinecke6c182cd2013-07-10 23:41:15 +0100527retry:
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200528 r = -ENOTTY;
Mike Snitzer971888c2018-04-03 15:05:12 -0400529 map = dm_get_live_table(md, srcu_idx);
Milan Brozaa129a22006-10-03 01:15:15 -0700530 if (!map || !dm_table_get_size(map))
Mike Snitzer971888c2018-04-03 15:05:12 -0400531 return r;
Milan Brozaa129a22006-10-03 01:15:15 -0700532
533 /* We only support devices that have a single target */
534 if (dm_table_get_num_targets(map) != 1)
Mike Snitzer971888c2018-04-03 15:05:12 -0400535 return r;
Milan Brozaa129a22006-10-03 01:15:15 -0700536
Mike Snitzer66482022016-02-18 15:44:39 -0500537 tgt = dm_table_get_target(map, 0);
538 if (!tgt->type->prepare_ioctl)
Mike Snitzer971888c2018-04-03 15:05:12 -0400539 return r;
Milan Brozaa129a22006-10-03 01:15:15 -0700540
Mike Snitzer971888c2018-04-03 15:05:12 -0400541 if (dm_suspended_md(md))
542 return -EAGAIN;
Milan Brozaa129a22006-10-03 01:15:15 -0700543
Mike Snitzer5bd5e8d2018-04-03 16:54:10 -0400544 r = tgt->type->prepare_ioctl(tgt, bdev);
Junichi Nomura5bbbfdf2015-11-17 09:39:26 +0000545 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
Mike Snitzer971888c2018-04-03 15:05:12 -0400546 dm_put_live_table(md, *srcu_idx);
Hannes Reinecke6c182cd2013-07-10 23:41:15 +0100547 msleep(10);
548 goto retry;
549 }
Mike Snitzer971888c2018-04-03 15:05:12 -0400550
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200551 return r;
552}
Hannes Reinecke6c182cd2013-07-10 23:41:15 +0100553
Mike Snitzer971888c2018-04-03 15:05:12 -0400554static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
555 __releases(md->io_barrier)
556{
557 dm_put_live_table(md, srcu_idx);
558}
559
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200560static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
561 unsigned int cmd, unsigned long arg)
562{
563 struct mapped_device *md = bdev->bd_disk->private_data;
Mike Snitzer971888c2018-04-03 15:05:12 -0400564 int r, srcu_idx;
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200565
Mike Snitzer5bd5e8d2018-04-03 16:54:10 -0400566 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200567 if (r < 0)
Mike Snitzer971888c2018-04-03 15:05:12 -0400568 goto out;
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200569
570 if (r > 0) {
571 /*
Christoph Hellwige980f622017-02-04 10:45:03 +0100572 * Target determined this ioctl is being issued against a
573 * subset of the parent bdev; require extra privileges.
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200574 */
Christoph Hellwige980f622017-02-04 10:45:03 +0100575 if (!capable(CAP_SYS_RAWIO)) {
576 DMWARN_LIMIT(
577 "%s: sending ioctl %x to DM device without required privilege.",
578 current->comm, cmd);
579 r = -ENOIOCTLCMD;
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200580 goto out;
Christoph Hellwige980f622017-02-04 10:45:03 +0100581 }
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200582 }
583
Mike Snitzer66482022016-02-18 15:44:39 -0500584 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200585out:
Mike Snitzer971888c2018-04-03 15:05:12 -0400586 dm_unprepare_ioctl(md, srcu_idx);
Milan Brozaa129a22006-10-03 01:15:15 -0700587 return r;
588}
589
Mike Snitzer978e51b2017-12-09 15:16:42 -0500590static void start_io_acct(struct dm_io *io);
591
592static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
Mike Snitzer64f52b02017-12-11 23:17:47 -0500594 struct dm_io *io;
595 struct dm_target_io *tio;
596 struct bio *clone;
597
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400598 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
Mike Snitzer64f52b02017-12-11 23:17:47 -0500599 if (!clone)
600 return NULL;
601
602 tio = container_of(clone, struct dm_target_io, clone);
603 tio->inside_dm_io = true;
604 tio->io = NULL;
605
606 io = container_of(tio, struct dm_io, tio);
607 io->magic = DM_IO_MAGIC;
Mike Snitzer978e51b2017-12-09 15:16:42 -0500608 io->status = 0;
609 atomic_set(&io->io_count, 1);
610 io->orig_bio = bio;
611 io->md = md;
612 spin_lock_init(&io->endio_lock);
613
614 start_io_acct(io);
Mike Snitzer64f52b02017-12-11 23:17:47 -0500615
616 return io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617}
618
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100619static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620{
Mike Snitzer64f52b02017-12-11 23:17:47 -0500621 bio_put(&io->tio.clone);
622}
623
624static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
625 unsigned target_bio_nr, gfp_t gfp_mask)
626{
627 struct dm_target_io *tio;
628
629 if (!ci->io->tio.io) {
630 /* the dm_target_io embedded in ci->io is available */
631 tio = &ci->io->tio;
632 } else {
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400633 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
Mike Snitzer64f52b02017-12-11 23:17:47 -0500634 if (!clone)
635 return NULL;
636
637 tio = container_of(clone, struct dm_target_io, clone);
638 tio->inside_dm_io = false;
639 }
640
641 tio->magic = DM_TIO_MAGIC;
642 tio->io = ci->io;
643 tio->ti = ti;
644 tio->target_bio_nr = target_bio_nr;
645
646 return tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647}
648
Mike Snitzercfae7522016-04-11 12:05:38 -0400649static void free_tio(struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650{
Mike Snitzer64f52b02017-12-11 23:17:47 -0500651 if (tio->inside_dm_io)
652 return;
Mikulas Patockadba14162012-10-12 21:02:15 +0100653 bio_put(&tio->clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
Mike Snitzerc4576ae2018-12-11 09:10:26 -0500656static bool md_in_flight_bios(struct mapped_device *md)
Kiyoshi Ueda90abb8c2009-12-10 23:52:13 +0000657{
Mikulas Patocka6f757232018-12-06 11:41:22 -0500658 int cpu;
659 struct hd_struct *part = &dm_disk(md)->part0;
Jens Axboeb7934ba2018-12-10 15:45:53 -0700660 long sum = 0;
Mikulas Patocka6f757232018-12-06 11:41:22 -0500661
662 for_each_possible_cpu(cpu) {
Jens Axboeb7934ba2018-12-10 15:45:53 -0700663 sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
664 sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
Mikulas Patocka6f757232018-12-06 11:41:22 -0500665 }
666
Jens Axboeb7934ba2018-12-10 15:45:53 -0700667 return sum != 0;
Kiyoshi Ueda90abb8c2009-12-10 23:52:13 +0000668}
669
Mike Snitzerc4576ae2018-12-11 09:10:26 -0500670static bool md_in_flight(struct mapped_device *md)
671{
672 if (queue_is_mq(md->queue))
Jens Axboe3c94d832018-12-17 21:11:17 -0700673 return blk_mq_queue_inflight(md->queue);
Mike Snitzerc4576ae2018-12-11 09:10:26 -0500674 else
675 return md_in_flight_bios(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100676}
677
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800678static void start_io_acct(struct dm_io *io)
679{
680 struct mapped_device *md = io->md;
Mike Snitzer745dc572017-12-11 20:51:50 -0500681 struct bio *bio = io->orig_bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800682
683 io->start_time = jiffies;
684
Michael Callahanddcf35d2018-07-18 04:47:39 -0700685 generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
686 &dm_disk(md)->part0);
Mike Snitzerf3986372017-12-17 11:56:48 -0500687
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400688 if (unlikely(dm_stats_used(&md->stats)))
Mike Christie528ec5a2016-06-05 14:32:03 -0500689 dm_stats_account_io(&md->stats, bio_data_dir(bio),
690 bio->bi_iter.bi_sector, bio_sectors(bio),
691 false, 0, &io->stats_aux);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800692}
693
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000694static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800695{
696 struct mapped_device *md = io->md;
Mike Snitzer745dc572017-12-11 20:51:50 -0500697 struct bio *bio = io->orig_bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800698 unsigned long duration = jiffies - io->start_time;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800699
Michael Callahanddcf35d2018-07-18 04:47:39 -0700700 generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
701 io->start_time);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800702
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400703 if (unlikely(dm_stats_used(&md->stats)))
Mike Christie528ec5a2016-06-05 14:32:03 -0500704 dm_stats_account_io(&md->stats, bio_data_dir(bio),
705 bio->bi_iter.bi_sector, bio_sectors(bio),
706 true, duration, &io->stats_aux);
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400707
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000708 /* nudge anyone waiting on suspend queue */
Mikulas Patocka645efa82019-02-05 05:09:00 -0500709 if (unlikely(wq_has_sleeper(&md->wait)))
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000710 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800711}
712
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713/*
714 * Add the bio to the list of deferred io.
715 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100716static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717{
Kiyoshi Ueda054474202010-09-08 18:07:01 +0200718 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
Kiyoshi Ueda054474202010-09-08 18:07:01 +0200720 spin_lock_irqsave(&md->deferred_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 bio_list_add(&md->deferred, bio);
Kiyoshi Ueda054474202010-09-08 18:07:01 +0200722 spin_unlock_irqrestore(&md->deferred_lock, flags);
Tejun Heo6a8736d2010-09-08 18:07:00 +0200723 queue_work(md->wq, &md->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724}
725
726/*
727 * Everyone (including functions in this file), should use this
728 * function to access the md->map field, and make sure they call
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +0100729 * dm_put_live_table() when finished.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 */
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +0100731struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +0100733 *srcu_idx = srcu_read_lock(&md->io_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +0100735 return srcu_dereference(md->map, &md->io_barrier);
736}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +0100738void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
739{
740 srcu_read_unlock(&md->io_barrier, srcu_idx);
741}
742
743void dm_sync_table(struct mapped_device *md)
744{
745 synchronize_srcu(&md->io_barrier);
746 synchronize_rcu_expedited();
747}
748
749/*
750 * A fast alternative to dm_get_live_table/dm_put_live_table.
751 * The caller must not block between these two functions.
752 */
753static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
754{
755 rcu_read_lock();
756 return rcu_dereference(md->map);
757}
758
759static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
760{
761 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762}
763
Mike Snitzer971888c2018-04-03 15:05:12 -0400764static char *_dm_claim_ptr = "I belong to device-mapper";
765
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800766/*
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500767 * Open a table device so we can use it as a map destination.
768 */
769static int open_table_device(struct table_device *td, dev_t dev,
770 struct mapped_device *md)
771{
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500772 struct block_device *bdev;
773
774 int r;
775
776 BUG_ON(td->dm_dev.bdev);
777
Mike Snitzer519049a2018-02-22 13:31:20 -0500778 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500779 if (IS_ERR(bdev))
780 return PTR_ERR(bdev);
781
782 r = bd_link_disk_holder(bdev, dm_disk(md));
783 if (r) {
784 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
785 return r;
786 }
787
788 td->dm_dev.bdev = bdev;
Dan Williams817bf402017-04-12 13:37:44 -0700789 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500790 return 0;
791}
792
793/*
794 * Close a table device that we've been using.
795 */
796static void close_table_device(struct table_device *td, struct mapped_device *md)
797{
798 if (!td->dm_dev.bdev)
799 return;
800
801 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
802 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
Dan Williams817bf402017-04-12 13:37:44 -0700803 put_dax(td->dm_dev.dax_dev);
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500804 td->dm_dev.bdev = NULL;
Dan Williams817bf402017-04-12 13:37:44 -0700805 td->dm_dev.dax_dev = NULL;
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500806}
807
808static struct table_device *find_table_device(struct list_head *l, dev_t dev,
Sheetal Singala8454fca2019-05-10 23:18:37 +0530809 fmode_t mode)
810{
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500811 struct table_device *td;
812
813 list_for_each_entry(td, l, list)
814 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
815 return td;
816
817 return NULL;
818}
819
820int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
Sheetal Singala8454fca2019-05-10 23:18:37 +0530821 struct dm_dev **result)
822{
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500823 int r;
824 struct table_device *td;
825
826 mutex_lock(&md->table_devices_lock);
827 td = find_table_device(&md->table_devices, dev, mode);
828 if (!td) {
Mike Snitzer115485e2016-02-22 12:16:21 -0500829 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500830 if (!td) {
831 mutex_unlock(&md->table_devices_lock);
832 return -ENOMEM;
833 }
834
835 td->dm_dev.mode = mode;
836 td->dm_dev.bdev = NULL;
837
838 if ((r = open_table_device(td, dev, md))) {
839 mutex_unlock(&md->table_devices_lock);
840 kfree(td);
841 return r;
842 }
843
844 format_dev_t(td->dm_dev.name, dev);
845
Elena Reshetovab0b4d7c2017-10-20 10:37:39 +0300846 refcount_set(&td->count, 1);
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500847 list_add(&td->list, &md->table_devices);
Elena Reshetovab0b4d7c2017-10-20 10:37:39 +0300848 } else {
849 refcount_inc(&td->count);
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500850 }
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500851 mutex_unlock(&md->table_devices_lock);
852
853 *result = &td->dm_dev;
854 return 0;
855}
856EXPORT_SYMBOL_GPL(dm_get_table_device);
857
858void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
859{
860 struct table_device *td = container_of(d, struct table_device, dm_dev);
861
862 mutex_lock(&md->table_devices_lock);
Elena Reshetovab0b4d7c2017-10-20 10:37:39 +0300863 if (refcount_dec_and_test(&td->count)) {
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500864 close_table_device(td, md);
865 list_del(&td->list);
866 kfree(td);
867 }
868 mutex_unlock(&md->table_devices_lock);
869}
870EXPORT_SYMBOL(dm_put_table_device);
871
872static void free_table_devices(struct list_head *devices)
873{
874 struct list_head *tmp, *next;
875
876 list_for_each_safe(tmp, next, devices) {
877 struct table_device *td = list_entry(tmp, struct table_device, list);
878
879 DMWARN("dm_destroy: %s still exists with %d references",
Elena Reshetovab0b4d7c2017-10-20 10:37:39 +0300880 td->dm_dev.name, refcount_read(&td->count));
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500881 kfree(td);
882 }
883}
884
885/*
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800886 * Get the geometry associated with a dm device
887 */
888int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
889{
890 *geo = md->geometry;
891
892 return 0;
893}
894
895/*
896 * Set the geometry of a device.
897 */
898int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
899{
900 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
901
902 if (geo->start > sz) {
903 DMWARN("Start sector is beyond the geometry limits.");
904 return -EINVAL;
905 }
906
907 md->geometry = *geo;
908
909 return 0;
910}
911
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800912static int __noflush_suspending(struct mapped_device *md)
913{
914 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
915}
916
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917/*
918 * Decrements the number of outstanding ios that a bio has been
919 * cloned into, completing the original io if necc.
920 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200921static void dec_pending(struct dm_io *io, blk_status_t error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800923 unsigned long flags;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200924 blk_status_t io_error;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000925 struct bio *bio;
926 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800927
928 /* Push-back supersedes any I/O errors */
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +0100929 if (unlikely(error)) {
930 spin_lock_irqsave(&io->endio_lock, flags);
Mike Snitzer745dc572017-12-11 20:51:50 -0500931 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200932 io->status = error;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +0100933 spin_unlock_irqrestore(&io->endio_lock, flags);
934 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936 if (atomic_dec_and_test(&io->io_count)) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200937 if (io->status == BLK_STS_DM_REQUEUE) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800938 /*
939 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800940 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100941 spin_lock_irqsave(&md->deferred_lock, flags);
Tejun Heo6a8736d2010-09-08 18:07:00 +0200942 if (__noflush_suspending(md))
Mike Snitzer745dc572017-12-11 20:51:50 -0500943 /* NOTE early return due to BLK_STS_DM_REQUEUE below */
944 bio_list_add_head(&md->deferred, io->orig_bio);
Tejun Heo6a8736d2010-09-08 18:07:00 +0200945 else
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800946 /* noflush suspend was interrupted. */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200947 io->status = BLK_STS_IOERR;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100948 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800949 }
950
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200951 io_error = io->status;
Mike Snitzer745dc572017-12-11 20:51:50 -0500952 bio = io->orig_bio;
Tejun Heo6a8736d2010-09-08 18:07:00 +0200953 end_io_acct(io);
954 free_io(md, io);
Jens Axboe2056a782006-03-23 20:00:26 +0100955
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200956 if (io_error == BLK_STS_DM_REQUEUE)
Tejun Heo6a8736d2010-09-08 18:07:00 +0200957 return;
958
Jens Axboe1eff9d32016-08-05 15:35:16 -0600959 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100960 /*
Tejun Heo6a8736d2010-09-08 18:07:00 +0200961 * Preflush done for flush with data, reissue
Mike Christie28a8f0d2016-06-05 14:32:25 -0500962 * without REQ_PREFLUSH.
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100963 */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600964 bio->bi_opf &= ~REQ_PREFLUSH;
Tejun Heo6a8736d2010-09-08 18:07:00 +0200965 queue_io(md, bio);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100966 } else {
Mike Snitzerb372d362010-09-08 18:07:01 +0200967 /* done with normal IO or empty flush */
NeilBrown8dd601f2018-02-15 20:00:15 +1100968 if (io_error)
969 bio->bi_status = io_error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200970 bio_endio(bio);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800971 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 }
973}
974
Mike Snitzerbcb44432019-04-03 12:23:11 -0400975void disable_discard(struct mapped_device *md)
976{
977 struct queue_limits *limits = dm_get_queue_limits(md);
978
979 /* device doesn't really support DISCARD, disable it */
980 limits->max_discard_sectors = 0;
981 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
982}
983
Mike Snitzer4cc96132016-05-12 16:28:10 -0400984void disable_write_same(struct mapped_device *md)
Mike Snitzer7eee4ae2014-06-02 15:50:06 -0400985{
986 struct queue_limits *limits = dm_get_queue_limits(md);
987
988 /* device doesn't really support WRITE SAME, disable it */
989 limits->max_write_same_sectors = 0;
990}
991
Christoph Hellwigac62d622017-04-05 19:21:05 +0200992void disable_write_zeroes(struct mapped_device *md)
993{
994 struct queue_limits *limits = dm_get_queue_limits(md);
995
996 /* device doesn't really support WRITE ZEROES, disable it */
997 limits->max_write_zeroes_sectors = 0;
998}
999
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001000static void clone_endio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001002 blk_status_t error = bio->bi_status;
Mikulas Patockabfc6d412014-03-04 18:24:49 -05001003 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
Milan Brozb35f8ca2009-03-16 17:44:36 +00001004 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -07001005 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 dm_endio_fn endio = tio->ti->type->end_io;
1007
Mike Snitzer978e51b2017-12-09 15:16:42 -05001008 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
Mike Snitzerbcb44432019-04-03 12:23:11 -04001009 if (bio_op(bio) == REQ_OP_DISCARD &&
1010 !bio->bi_disk->queue->limits.max_discard_sectors)
1011 disable_discard(md);
1012 else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
1013 !bio->bi_disk->queue->limits.max_write_same_sectors)
Christoph Hellwigac62d622017-04-05 19:21:05 +02001014 disable_write_same(md);
Mike Snitzerbcb44432019-04-03 12:23:11 -04001015 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1016 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
Christoph Hellwigac62d622017-04-05 19:21:05 +02001017 disable_write_zeroes(md);
1018 }
Mike Snitzer7eee4ae2014-06-02 15:50:06 -04001019
Christoph Hellwig1be56902017-06-03 09:38:03 +02001020 if (endio) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001021 int r = endio(tio->ti, bio, &error);
Christoph Hellwig1be56902017-06-03 09:38:03 +02001022 switch (r) {
1023 case DM_ENDIO_REQUEUE:
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001024 error = BLK_STS_DM_REQUEUE;
Christoph Hellwig1be56902017-06-03 09:38:03 +02001025 /*FALLTHRU*/
1026 case DM_ENDIO_DONE:
1027 break;
1028 case DM_ENDIO_INCOMPLETE:
1029 /* The target will handle the io */
1030 return;
1031 default:
1032 DMWARN("unimplemented target endio return value: %d", r);
1033 BUG();
1034 }
1035 }
1036
Mike Snitzercfae7522016-04-11 12:05:38 -04001037 free_tio(tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +00001038 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039}
1040
Mike Snitzer78d8e582015-06-26 10:01:13 -04001041/*
Mike Snitzer56a67df2010-08-12 04:14:10 +01001042 * Return maximum size of I/O possible at the supplied sector up to the current
1043 * target boundary.
1044 */
1045static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046{
Mike Snitzer56a67df2010-08-12 04:14:10 +01001047 sector_t target_offset = dm_target_offset(ti, sector);
1048
1049 return ti->len - target_offset;
1050}
1051
1052static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1053{
1054 sector_t len = max_io_len_target_boundary(sector, ti);
Mike Snitzer542f9032012-07-27 15:08:00 +01001055 sector_t offset, max_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
1057 /*
Mike Snitzer542f9032012-07-27 15:08:00 +01001058 * Does the target need to split even further?
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 */
Mike Snitzer542f9032012-07-27 15:08:00 +01001060 if (ti->max_io_len) {
1061 offset = dm_target_offset(ti, sector);
1062 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1063 max_len = sector_div(offset, ti->max_io_len);
1064 else
1065 max_len = offset & (ti->max_io_len - 1);
1066 max_len = ti->max_io_len - max_len;
1067
1068 if (len > max_len)
1069 len = max_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 }
1071
1072 return len;
1073}
1074
Mike Snitzer542f9032012-07-27 15:08:00 +01001075int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1076{
1077 if (len > UINT_MAX) {
1078 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1079 (unsigned long long)len, UINT_MAX);
1080 ti->error = "Maximum size of target IO is too large";
1081 return -EINVAL;
1082 }
1083
Mikulas Patocka75ae1932019-03-21 16:46:12 -04001084 ti->max_io_len = (uint32_t) len;
Mike Snitzer542f9032012-07-27 15:08:00 +01001085
1086 return 0;
1087}
1088EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1089
Dan Williamsf26c5712017-04-12 12:35:44 -07001090static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
Mike Snitzer3d97c822018-04-30 16:06:28 -04001091 sector_t sector, int *srcu_idx)
1092 __acquires(md->io_barrier)
Toshi Kani545ed202016-06-22 17:54:53 -06001093{
Toshi Kani545ed202016-06-22 17:54:53 -06001094 struct dm_table *map;
1095 struct dm_target *ti;
Toshi Kani545ed202016-06-22 17:54:53 -06001096
Dan Williamsf26c5712017-04-12 12:35:44 -07001097 map = dm_get_live_table(md, srcu_idx);
Toshi Kani545ed202016-06-22 17:54:53 -06001098 if (!map)
Dan Williamsf26c5712017-04-12 12:35:44 -07001099 return NULL;
Toshi Kani545ed202016-06-22 17:54:53 -06001100
1101 ti = dm_table_find_target(map, sector);
Mikulas Patocka123d87d2019-08-23 09:55:26 -04001102 if (!ti)
Dan Williamsf26c5712017-04-12 12:35:44 -07001103 return NULL;
1104
1105 return ti;
1106}
1107
1108static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
Mike Snitzer3d97c822018-04-30 16:06:28 -04001109 long nr_pages, void **kaddr, pfn_t *pfn)
Dan Williamsf26c5712017-04-12 12:35:44 -07001110{
1111 struct mapped_device *md = dax_get_private(dax_dev);
1112 sector_t sector = pgoff * PAGE_SECTORS;
1113 struct dm_target *ti;
1114 long len, ret = -EIO;
1115 int srcu_idx;
1116
1117 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1118
1119 if (!ti)
Toshi Kani545ed202016-06-22 17:54:53 -06001120 goto out;
Dan Williamsf26c5712017-04-12 12:35:44 -07001121 if (!ti->type->direct_access)
1122 goto out;
1123 len = max_io_len(sector, ti) / PAGE_SECTORS;
1124 if (len < 1)
1125 goto out;
1126 nr_pages = min(len, nr_pages);
Ross Zwislerdbc62652018-06-26 16:30:41 -06001127 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
Dan Williams817bf402017-04-12 13:37:44 -07001128
Dan Williamsf26c5712017-04-12 12:35:44 -07001129 out:
Toshi Kani545ed202016-06-22 17:54:53 -06001130 dm_put_live_table(md, srcu_idx);
Dan Williamsf26c5712017-04-12 12:35:44 -07001131
1132 return ret;
Toshi Kani545ed202016-06-22 17:54:53 -06001133}
1134
Dan Williams7bf7eac2019-05-16 13:26:29 -07001135static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
1136 int blocksize, sector_t start, sector_t len)
1137{
1138 struct mapped_device *md = dax_get_private(dax_dev);
1139 struct dm_table *map;
1140 int srcu_idx;
1141 bool ret;
1142
1143 map = dm_get_live_table(md, &srcu_idx);
1144 if (!map)
1145 return false;
1146
Pankaj Gupta2e9ee092019-07-05 19:33:25 +05301147 ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
Dan Williams7bf7eac2019-05-16 13:26:29 -07001148
1149 dm_put_live_table(md, srcu_idx);
1150
1151 return ret;
1152}
1153
Dan Williams7e026c82017-05-29 12:57:56 -07001154static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
Mike Snitzer3d97c822018-04-30 16:06:28 -04001155 void *addr, size_t bytes, struct iov_iter *i)
Dan Williams7e026c82017-05-29 12:57:56 -07001156{
1157 struct mapped_device *md = dax_get_private(dax_dev);
1158 sector_t sector = pgoff * PAGE_SECTORS;
1159 struct dm_target *ti;
1160 long ret = 0;
1161 int srcu_idx;
1162
1163 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1164
1165 if (!ti)
1166 goto out;
1167 if (!ti->type->dax_copy_from_iter) {
1168 ret = copy_from_iter(addr, bytes, i);
1169 goto out;
1170 }
1171 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
1172 out:
1173 dm_put_live_table(md, srcu_idx);
1174
1175 return ret;
1176}
1177
Dan Williamsb3a9a0c2018-05-02 06:46:33 -07001178static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1179 void *addr, size_t bytes, struct iov_iter *i)
1180{
1181 struct mapped_device *md = dax_get_private(dax_dev);
1182 sector_t sector = pgoff * PAGE_SECTORS;
1183 struct dm_target *ti;
1184 long ret = 0;
1185 int srcu_idx;
1186
1187 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1188
1189 if (!ti)
1190 goto out;
1191 if (!ti->type->dax_copy_to_iter) {
1192 ret = copy_to_iter(addr, bytes, i);
1193 goto out;
1194 }
1195 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
1196 out:
1197 dm_put_live_table(md, srcu_idx);
1198
1199 return ret;
1200}
1201
Vivek Goyalcdf6cdc2020-02-28 11:34:54 -05001202static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1203 size_t nr_pages)
1204{
1205 struct mapped_device *md = dax_get_private(dax_dev);
1206 sector_t sector = pgoff * PAGE_SECTORS;
1207 struct dm_target *ti;
1208 int ret = -EIO;
1209 int srcu_idx;
1210
1211 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1212
1213 if (!ti)
1214 goto out;
1215 if (WARN_ON(!ti->type->dax_zero_page_range)) {
1216 /*
1217 * ->zero_page_range() is mandatory dax operation. If we are
1218 * here, something is wrong.
1219 */
1220 dm_put_live_table(md, srcu_idx);
1221 goto out;
1222 }
1223 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1224
1225 out:
1226 dm_put_live_table(md, srcu_idx);
1227
1228 return ret;
1229}
1230
Mikulas Patocka1dd40c32014-03-14 18:41:24 -04001231/*
1232 * A target may call dm_accept_partial_bio only from the map routine. It is
Ajay Joshi2e2d6f72019-10-27 23:05:48 +09001233 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
1234 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
Mikulas Patocka1dd40c32014-03-14 18:41:24 -04001235 *
1236 * dm_accept_partial_bio informs the dm that the target only wants to process
1237 * additional n_sectors sectors of the bio and the rest of the data should be
1238 * sent in a next bio.
1239 *
1240 * A diagram that explains the arithmetics:
1241 * +--------------------+---------------+-------+
1242 * | 1 | 2 | 3 |
1243 * +--------------------+---------------+-------+
1244 *
1245 * <-------------- *tio->len_ptr --------------->
1246 * <------- bi_size ------->
1247 * <-- n_sectors -->
1248 *
1249 * Region 1 was already iterated over with bio_advance or similar function.
1250 * (it may be empty if the target doesn't use bio_advance)
1251 * Region 2 is the remaining bio size that the target wants to process.
1252 * (it may be empty if region 1 is non-empty, although there is no reason
1253 * to make it empty)
1254 * The target requires that region 3 is to be sent in the next bio.
1255 *
1256 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1257 * the partially processed part (the sum of regions 1+2) must be the same for all
1258 * copies of the bio.
1259 */
1260void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1261{
1262 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1263 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
Jens Axboe1eff9d32016-08-05 15:35:16 -06001264 BUG_ON(bio->bi_opf & REQ_PREFLUSH);
Mikulas Patocka1dd40c32014-03-14 18:41:24 -04001265 BUG_ON(bi_size > *tio->len_ptr);
1266 BUG_ON(n_sectors > bi_size);
1267 *tio->len_ptr -= bi_size - n_sectors;
1268 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1269}
1270EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1271
Mike Snitzer978e51b2017-12-09 15:16:42 -05001272static blk_qc_t __map_bio(struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273{
1274 int r;
Jens Axboe2056a782006-03-23 20:00:26 +01001275 sector_t sector;
Mikulas Patockadba14162012-10-12 21:02:15 +01001276 struct bio *clone = &tio->clone;
Mike Snitzer64f52b02017-12-11 23:17:47 -05001277 struct dm_io *io = tio->io;
Mike Snitzer978e51b2017-12-09 15:16:42 -05001278 struct mapped_device *md = io->md;
Alasdair G Kergonbd2a49b2013-03-01 22:45:46 +00001279 struct dm_target *ti = tio->ti;
Mike Snitzer978e51b2017-12-09 15:16:42 -05001280 blk_qc_t ret = BLK_QC_T_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 clone->bi_end_io = clone_endio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
1284 /*
1285 * Map the clone. If r == 0 we don't need to do
1286 * anything, the target has assumed ownership of
1287 * this io.
1288 */
Mike Snitzer64f52b02017-12-11 23:17:47 -05001289 atomic_inc(&io->io_count);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001290 sector = clone->bi_iter.bi_sector;
Mikulas Patockad67a5f42017-02-15 11:26:10 -05001291
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001292 r = ti->type->map(ti, clone);
Christoph Hellwig846785e2017-06-03 09:38:02 +02001293 switch (r) {
1294 case DM_MAPIO_SUBMITTED:
1295 break;
1296 case DM_MAPIO_REMAPPED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 /* the bio has been remapped so dispatch it */
Christoph Hellwig74d46992017-08-23 19:10:32 +02001298 trace_block_bio_remap(clone->bi_disk->queue, clone,
Mike Snitzer64f52b02017-12-11 23:17:47 -05001299 bio_dev(io->orig_bio), sector);
Mike Snitzer978e51b2017-12-09 15:16:42 -05001300 if (md->type == DM_TYPE_NVME_BIO_BASED)
1301 ret = direct_make_request(clone);
1302 else
1303 ret = generic_make_request(clone);
Christoph Hellwig846785e2017-06-03 09:38:02 +02001304 break;
1305 case DM_MAPIO_KILL:
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001306 free_tio(tio);
Mike Snitzer64f52b02017-12-11 23:17:47 -05001307 dec_pending(io, BLK_STS_IOERR);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001308 break;
Christoph Hellwig846785e2017-06-03 09:38:02 +02001309 case DM_MAPIO_REQUEUE:
Mike Snitzercfae7522016-04-11 12:05:38 -04001310 free_tio(tio);
Mike Snitzer64f52b02017-12-11 23:17:47 -05001311 dec_pending(io, BLK_STS_DM_REQUEUE);
Christoph Hellwig846785e2017-06-03 09:38:02 +02001312 break;
1313 default:
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001314 DMWARN("unimplemented target map return value: %d", r);
1315 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Mike Snitzer978e51b2017-12-09 15:16:42 -05001318 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
Mikulas Patockae0d66092014-03-14 18:40:39 -04001321static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
Alasdair G Kergonbd2a49b2013-03-01 22:45:46 +00001322{
Kent Overstreet4f024f32013-10-11 15:44:27 -07001323 bio->bi_iter.bi_sector = sector;
1324 bio->bi_iter.bi_size = to_bytes(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325}
1326
1327/*
1328 * Creates a bio that consists of range of complete bvecs.
1329 */
Mike Snitzerc80914e2016-03-02 12:33:03 -05001330static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1331 sector_t sector, unsigned len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332{
Mikulas Patockadba14162012-10-12 21:02:15 +01001333 struct bio *clone = &tio->clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
Kent Overstreet1c3b13e2013-10-29 17:17:49 -07001335 __bio_clone_fast(clone, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
Mike Snitzer57c36512019-01-16 18:53:26 -05001337 if (bio_integrity(bio)) {
Mikulas Patockae2460f22017-04-18 16:51:48 -04001338 int r;
1339
1340 if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
1341 !dm_target_passes_integrity(tio->ti->type))) {
1342 DMWARN("%s: the target %s doesn't support integrity data.",
1343 dm_device_name(tio->io->md),
1344 tio->ti->type->name);
1345 return -EIO;
1346 }
1347
1348 r = bio_integrity_clone(clone, bio, GFP_NOIO);
Mike Snitzerc80914e2016-03-02 12:33:03 -05001349 if (r < 0)
1350 return r;
1351 }
Kent Overstreet1c3b13e2013-10-29 17:17:49 -07001352
Mike Snitzerfa8db492019-02-05 17:07:58 -05001353 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1354 clone->bi_iter.bi_size = to_bytes(len);
1355
1356 if (bio_integrity(bio))
1357 bio_integrity_trim(clone);
Mike Snitzerc80914e2016-03-02 12:33:03 -05001358
1359 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360}
1361
Mike Snitzer318716d2017-11-22 14:56:12 -05001362static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1363 struct dm_target *ti, unsigned num_bios)
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001364{
Mikulas Patockadba14162012-10-12 21:02:15 +01001365 struct dm_target_io *tio;
Mike Snitzer318716d2017-11-22 14:56:12 -05001366 int try;
Mikulas Patockadba14162012-10-12 21:02:15 +01001367
Mike Snitzer318716d2017-11-22 14:56:12 -05001368 if (!num_bios)
1369 return;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001370
Mike Snitzer318716d2017-11-22 14:56:12 -05001371 if (num_bios == 1) {
1372 tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1373 bio_list_add(blist, &tio->clone);
1374 return;
1375 }
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001376
Mike Snitzer318716d2017-11-22 14:56:12 -05001377 for (try = 0; try < 2; try++) {
1378 int bio_nr;
1379 struct bio *bio;
1380
1381 if (try)
Mike Snitzerbc02cdb2017-12-14 16:30:42 -05001382 mutex_lock(&ci->io->md->table_devices_lock);
Mike Snitzer318716d2017-11-22 14:56:12 -05001383 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1384 tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
1385 if (!tio)
1386 break;
1387
1388 bio_list_add(blist, &tio->clone);
1389 }
1390 if (try)
Mike Snitzerbc02cdb2017-12-14 16:30:42 -05001391 mutex_unlock(&ci->io->md->table_devices_lock);
Mike Snitzer318716d2017-11-22 14:56:12 -05001392 if (bio_nr == num_bios)
1393 return;
1394
1395 while ((bio = bio_list_pop(blist))) {
1396 tio = container_of(bio, struct dm_target_io, clone);
1397 free_tio(tio);
1398 }
1399 }
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001400}
1401
Mike Snitzer978e51b2017-12-09 15:16:42 -05001402static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
1403 struct dm_target_io *tio, unsigned *len)
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001404{
Mikulas Patockadba14162012-10-12 21:02:15 +01001405 struct bio *clone = &tio->clone;
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001406
Mikulas Patocka1dd40c32014-03-14 18:41:24 -04001407 tio->len_ptr = len;
1408
Junichi Nomura99778272014-10-03 11:55:16 +00001409 __bio_clone_fast(clone, ci->bio);
Alasdair G Kergonbd2a49b2013-03-01 22:45:46 +00001410 if (len)
Mikulas Patocka1dd40c32014-03-14 18:41:24 -04001411 bio_setup_sector(clone, ci->sector, *len);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001412
Mike Snitzer978e51b2017-12-09 15:16:42 -05001413 return __map_bio(tio);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001414}
1415
Alasdair G Kergon14fe5942013-03-01 22:45:47 +00001416static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
Mikulas Patocka1dd40c32014-03-14 18:41:24 -04001417 unsigned num_bios, unsigned *len)
Mike Snitzer06a426c2010-08-12 04:14:09 +01001418{
Mike Snitzer318716d2017-11-22 14:56:12 -05001419 struct bio_list blist = BIO_EMPTY_LIST;
1420 struct bio *bio;
1421 struct dm_target_io *tio;
Mike Snitzer06a426c2010-08-12 04:14:09 +01001422
Mike Snitzer318716d2017-11-22 14:56:12 -05001423 alloc_multiple_bios(&blist, ci, ti, num_bios);
1424
1425 while ((bio = bio_list_pop(&blist))) {
1426 tio = container_of(bio, struct dm_target_io, clone);
Mike Snitzer978e51b2017-12-09 15:16:42 -05001427 (void) __clone_and_map_simple_bio(ci, tio, len);
Mike Snitzer318716d2017-11-22 14:56:12 -05001428 }
Mike Snitzer06a426c2010-08-12 04:14:09 +01001429}
1430
Alasdair G Kergon14fe5942013-03-01 22:45:47 +00001431static int __send_empty_flush(struct clone_info *ci)
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001432{
Mike Snitzer06a426c2010-08-12 04:14:09 +01001433 unsigned target_nr = 0;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001434 struct dm_target *ti;
1435
Dennis Zhou892ad712018-12-05 12:10:30 -05001436 /*
Jens Axboedbe3ece2018-12-19 09:13:34 -07001437 * Empty flush uses a statically initialized bio, as the base for
1438 * cloning. However, blkg association requires that a bdev is
1439 * associated with a gendisk, which doesn't happen until the bdev is
1440 * opened. So, blkg association is done at issue time of the flush
1441 * rather than when the device is created in alloc_dev().
Dennis Zhou892ad712018-12-05 12:10:30 -05001442 */
1443 bio_set_dev(ci->bio, ci->io->md->bdev);
1444
Mike Snitzerb372d362010-09-08 18:07:01 +02001445 BUG_ON(bio_has_data(ci->bio));
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001446 while ((ti = dm_table_get_target(ci->map, target_nr++)))
Mikulas Patocka1dd40c32014-03-14 18:41:24 -04001447 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001448
Dennis Zhou892ad712018-12-05 12:10:30 -05001449 bio_disassociate_blkg(ci->bio);
1450
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001451 return 0;
1452}
1453
Mike Snitzerc80914e2016-03-02 12:33:03 -05001454static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
NeilBrownf31c21e2017-11-22 14:25:18 +11001455 sector_t sector, unsigned *len)
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001456{
Mikulas Patockadba14162012-10-12 21:02:15 +01001457 struct bio *bio = ci->bio;
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001458 struct dm_target_io *tio;
NeilBrownf31c21e2017-11-22 14:25:18 +11001459 int r;
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001460
Mike Snitzer318716d2017-11-22 14:56:12 -05001461 tio = alloc_tio(ci, ti, 0, GFP_NOIO);
NeilBrownf31c21e2017-11-22 14:25:18 +11001462 tio->len_ptr = len;
1463 r = clone_bio(tio, bio, sector, *len);
1464 if (r < 0) {
1465 free_tio(tio);
1466 return r;
Alasdair G Kergonb0d8ed42013-03-01 22:45:49 +00001467 }
Mike Snitzer978e51b2017-12-09 15:16:42 -05001468 (void) __map_bio(tio);
Mike Snitzerc80914e2016-03-02 12:33:03 -05001469
NeilBrownf31c21e2017-11-22 14:25:18 +11001470 return 0;
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001471}
1472
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001473typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
Mike Snitzer23508a92012-12-21 20:23:37 +00001474
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001475static unsigned get_num_discard_bios(struct dm_target *ti)
Mike Snitzer23508a92012-12-21 20:23:37 +00001476{
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001477 return ti->num_discard_bios;
Mike Snitzer23508a92012-12-21 20:23:37 +00001478}
1479
Denis Semakin00716542018-03-13 13:23:45 +04001480static unsigned get_num_secure_erase_bios(struct dm_target *ti)
1481{
1482 return ti->num_secure_erase_bios;
1483}
1484
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001485static unsigned get_num_write_same_bios(struct dm_target *ti)
Mike Snitzer23508a92012-12-21 20:23:37 +00001486{
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001487 return ti->num_write_same_bios;
Mike Snitzer23508a92012-12-21 20:23:37 +00001488}
1489
Christoph Hellwigac62d622017-04-05 19:21:05 +02001490static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
1491{
1492 return ti->num_write_zeroes_bios;
1493}
1494
Mike Snitzer3d7f4562017-12-08 15:02:11 -05001495static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
Mike Snitzer61697a62019-01-18 14:19:26 -05001496 unsigned num_bios)
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001497{
Michael Lass51b86f92019-05-21 21:58:07 +02001498 unsigned len;
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001499
Mike Snitzer3d7f4562017-12-08 15:02:11 -05001500 /*
1501 * Even though the device advertised support for this type of
1502 * request, that does not mean every target supports it, and
1503 * reconfiguration might also have changed that since the
1504 * check was performed.
1505 */
Mike Snitzer3d7f4562017-12-08 15:02:11 -05001506 if (!num_bios)
1507 return -EOPNOTSUPP;
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001508
Michael Lass51b86f92019-05-21 21:58:07 +02001509 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1510
Mike Snitzer3d7f4562017-12-08 15:02:11 -05001511 __send_duplicate_bios(ci, ti, num_bios, &len);
Mike Snitzer06a426c2010-08-12 04:14:09 +01001512
Mike Snitzer3d7f4562017-12-08 15:02:11 -05001513 ci->sector += len;
1514 ci->sector_count -= len;
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001515
1516 return 0;
1517}
1518
Mike Snitzer3d7f4562017-12-08 15:02:11 -05001519static int __send_discard(struct clone_info *ci, struct dm_target *ti)
Mike Snitzer23508a92012-12-21 20:23:37 +00001520{
Mike Snitzer61697a62019-01-18 14:19:26 -05001521 return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
Mike Snitzer23508a92012-12-21 20:23:37 +00001522}
1523
Denis Semakin00716542018-03-13 13:23:45 +04001524static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
1525{
Mike Snitzer61697a62019-01-18 14:19:26 -05001526 return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
Denis Semakin00716542018-03-13 13:23:45 +04001527}
1528
Mike Snitzer3d7f4562017-12-08 15:02:11 -05001529static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
Mike Snitzer23508a92012-12-21 20:23:37 +00001530{
Mike Snitzer61697a62019-01-18 14:19:26 -05001531 return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
Mike Snitzer23508a92012-12-21 20:23:37 +00001532}
1533
Mike Snitzer3d7f4562017-12-08 15:02:11 -05001534static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
Christoph Hellwigac62d622017-04-05 19:21:05 +02001535{
Mike Snitzer61697a62019-01-18 14:19:26 -05001536 return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
Christoph Hellwigac62d622017-04-05 19:21:05 +02001537}
1538
Mike Snitzer568c73a2019-01-18 14:10:37 -05001539static bool is_abnormal_io(struct bio *bio)
1540{
1541 bool r = false;
1542
1543 switch (bio_op(bio)) {
1544 case REQ_OP_DISCARD:
1545 case REQ_OP_SECURE_ERASE:
1546 case REQ_OP_WRITE_SAME:
1547 case REQ_OP_WRITE_ZEROES:
1548 r = true;
1549 break;
1550 }
1551
1552 return r;
1553}
1554
Mike Snitzer0519c712018-03-26 11:49:16 -04001555static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1556 int *result)
1557{
1558 struct bio *bio = ci->bio;
1559
1560 if (bio_op(bio) == REQ_OP_DISCARD)
1561 *result = __send_discard(ci, ti);
Denis Semakin00716542018-03-13 13:23:45 +04001562 else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
1563 *result = __send_secure_erase(ci, ti);
Mike Snitzer0519c712018-03-26 11:49:16 -04001564 else if (bio_op(bio) == REQ_OP_WRITE_SAME)
1565 *result = __send_write_same(ci, ti);
1566 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
1567 *result = __send_write_zeroes(ci, ti);
1568 else
1569 return false;
1570
1571 return true;
1572}
1573
Alasdair G Kergone4c93812013-03-01 22:45:47 +00001574/*
Alasdair G Kergone4c93812013-03-01 22:45:47 +00001575 * Select the correct strategy for processing a non-flush bio.
1576 */
Alasdair G Kergon14fe5942013-03-01 22:45:47 +00001577static int __split_and_process_non_flush(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578{
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001579 struct dm_target *ti;
Kent Overstreet1c3b13e2013-10-29 17:17:49 -07001580 unsigned len;
Mike Snitzerc80914e2016-03-02 12:33:03 -05001581 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001583 ti = dm_table_find_target(ci->map, ci->sector);
Mikulas Patocka123d87d2019-08-23 09:55:26 -04001584 if (!ti)
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001585 return -EIO;
1586
Mike Snitzer568c73a2019-01-18 14:10:37 -05001587 if (__process_abnormal_io(ci, ti, &r))
Mike Snitzer0519c712018-03-26 11:49:16 -04001588 return r;
Mike Snitzer3d7f4562017-12-08 15:02:11 -05001589
Christoph Hellwige76239a2018-10-12 19:08:49 +09001590 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001591
Mike Snitzerc80914e2016-03-02 12:33:03 -05001592 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1593 if (r < 0)
1594 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
Kent Overstreet1c3b13e2013-10-29 17:17:49 -07001596 ci->sector += len;
1597 ci->sector_count -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
Kent Overstreet1c3b13e2013-10-29 17:17:49 -07001599 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600}
1601
Mike Snitzer978e51b2017-12-09 15:16:42 -05001602static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1603 struct dm_table *map, struct bio *bio)
1604{
1605 ci->map = map;
1606 ci->io = alloc_io(md, bio);
1607 ci->sector = bio->bi_iter.bi_sector;
1608}
1609
Mike Snitzera1e1cb72019-01-17 10:48:01 -05001610#define __dm_part_stat_sub(part, field, subnd) \
1611 (part_stat_get(part, field) -= (subnd))
1612
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613/*
Alasdair G Kergon14fe5942013-03-01 22:45:47 +00001614 * Entry point to split a bio into clones and submit them to the targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 */
Mike Snitzer978e51b2017-12-09 15:16:42 -05001616static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1617 struct dm_table *map, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618{
1619 struct clone_info ci;
Mike Snitzer978e51b2017-12-09 15:16:42 -05001620 blk_qc_t ret = BLK_QC_T_NONE;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001621 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Mike Snitzer978e51b2017-12-09 15:16:42 -05001623 init_clone_info(&ci, md, map, bio);
Alasdair G Kergonbd2a49b2013-03-01 22:45:46 +00001624
Jens Axboe1eff9d32016-08-05 15:35:16 -06001625 if (bio->bi_opf & REQ_PREFLUSH) {
Jens Axboedbe3ece2018-12-19 09:13:34 -07001626 struct bio flush_bio;
1627
1628 /*
1629 * Use an on-stack bio for this, it's safe since we don't
1630 * need to reference it after submit. It's just used as
1631 * the basis for the clone(s).
1632 */
1633 bio_init(&flush_bio, NULL, 0);
1634 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1635 ci.bio = &flush_bio;
Mike Snitzerb372d362010-09-08 18:07:01 +02001636 ci.sector_count = 0;
Alasdair G Kergon14fe5942013-03-01 22:45:47 +00001637 error = __send_empty_flush(&ci);
Mike Snitzerb372d362010-09-08 18:07:01 +02001638 /* dec_pending submits any data associated with flush */
Ajay Joshi2e2d6f72019-10-27 23:05:48 +09001639 } else if (op_is_zone_mgmt(bio_op(bio))) {
Damien Le Moala4aa5e52017-05-08 16:40:46 -07001640 ci.bio = bio;
1641 ci.sector_count = 0;
1642 error = __split_and_process_non_flush(&ci);
Mike Snitzerb372d362010-09-08 18:07:01 +02001643 } else {
Tejun Heo6a8736d2010-09-08 18:07:00 +02001644 ci.bio = bio;
Tejun Heod87f4c12010-09-03 11:56:19 +02001645 ci.sector_count = bio_sectors(bio);
NeilBrown18a25da2017-09-06 09:43:28 +10001646 while (ci.sector_count && !error) {
Alasdair G Kergon14fe5942013-03-01 22:45:47 +00001647 error = __split_and_process_non_flush(&ci);
NeilBrown18a25da2017-09-06 09:43:28 +10001648 if (current->bio_list && ci.sector_count && !error) {
1649 /*
1650 * Remainder must be passed to generic_make_request()
1651 * so that it gets handled *after* bios already submitted
1652 * have been completely processed.
1653 * We take a clone of the original to store in
Mike Snitzer745dc572017-12-11 20:51:50 -05001654 * ci.io->orig_bio to be used by end_io_acct() and
NeilBrown18a25da2017-09-06 09:43:28 +10001655 * for dec_pending to use for completion handling.
NeilBrown18a25da2017-09-06 09:43:28 +10001656 */
Mike Snitzerf21c6012018-06-15 09:35:33 -04001657 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1658 GFP_NOIO, &md->queue->bio_split);
Mike Snitzer745dc572017-12-11 20:51:50 -05001659 ci.io->orig_bio = b;
Mike Snitzera1e1cb72019-01-17 10:48:01 -05001660
1661 /*
1662 * Adjust IO stats for each split, otherwise upon queue
1663 * reentry there will be redundant IO accounting.
1664 * NOTE: this is a stop-gap fix, a proper fix involves
1665 * significant refactoring of DM core's bio splitting
1666 * (by eliminating DM's splitting and just using bio_split)
1667 */
1668 part_stat_lock();
1669 __dm_part_stat_sub(&dm_disk(md)->part0,
1670 sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1671 part_stat_unlock();
1672
NeilBrown18a25da2017-09-06 09:43:28 +10001673 bio_chain(b, bio);
Mike Snitzer075c18c32019-01-18 01:21:11 -05001674 trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
Mike Snitzer978e51b2017-12-09 15:16:42 -05001675 ret = generic_make_request(bio);
NeilBrown18a25da2017-09-06 09:43:28 +10001676 break;
1677 }
1678 }
Tejun Heod87f4c12010-09-03 11:56:19 +02001679 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
1681 /* drop the extra reference count */
Bart Van Assche54385bf2017-08-09 11:32:10 -07001682 dec_pending(ci.io, errno_to_blk_status(error));
Mike Snitzer978e51b2017-12-09 15:16:42 -05001683 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
1686/*
Mike Snitzer978e51b2017-12-09 15:16:42 -05001687 * Optimized variant of __split_and_process_bio that leverages the
1688 * fact that targets that use it do _not_ have a need to split bios.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 */
Mike Snitzer568c73a2019-01-18 14:10:37 -05001690static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
1691 struct bio *bio, struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692{
Mike Snitzer978e51b2017-12-09 15:16:42 -05001693 struct clone_info ci;
1694 blk_qc_t ret = BLK_QC_T_NONE;
1695 int error = 0;
1696
Mike Snitzer978e51b2017-12-09 15:16:42 -05001697 init_clone_info(&ci, md, map, bio);
1698
1699 if (bio->bi_opf & REQ_PREFLUSH) {
Jens Axboedbe3ece2018-12-19 09:13:34 -07001700 struct bio flush_bio;
1701
1702 /*
1703 * Use an on-stack bio for this, it's safe since we don't
1704 * need to reference it after submit. It's just used as
1705 * the basis for the clone(s).
1706 */
1707 bio_init(&flush_bio, NULL, 0);
1708 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1709 ci.bio = &flush_bio;
Mike Snitzer978e51b2017-12-09 15:16:42 -05001710 ci.sector_count = 0;
1711 error = __send_empty_flush(&ci);
1712 /* dec_pending submits any data associated with flush */
1713 } else {
Mike Snitzer978e51b2017-12-09 15:16:42 -05001714 struct dm_target_io *tio;
1715
Mike Snitzer978e51b2017-12-09 15:16:42 -05001716 ci.bio = bio;
1717 ci.sector_count = bio_sectors(bio);
Mike Snitzer568c73a2019-01-18 14:10:37 -05001718 if (__process_abnormal_io(&ci, ti, &error))
Mike Snitzer0519c712018-03-26 11:49:16 -04001719 goto out;
1720
1721 tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
Mike Snitzer978e51b2017-12-09 15:16:42 -05001722 ret = __clone_and_map_simple_bio(&ci, tio, NULL);
1723 }
1724out:
1725 /* drop the extra reference count */
1726 dec_pending(ci.io, errno_to_blk_status(error));
1727 return ret;
1728}
1729
Mike Snitzer568c73a2019-01-18 14:10:37 -05001730static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
1731{
1732 unsigned len, sector_count;
1733
1734 sector_count = bio_sectors(*bio);
1735 len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
1736
1737 if (sector_count > len) {
1738 struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
1739
1740 bio_chain(split, *bio);
1741 trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
1742 generic_make_request(*bio);
1743 *bio = split;
1744 }
1745}
1746
Mike Snitzer6548c7c2019-01-17 14:33:01 -05001747static blk_qc_t dm_process_bio(struct mapped_device *md,
1748 struct dm_table *map, struct bio *bio)
1749{
Mike Snitzer568c73a2019-01-18 14:10:37 -05001750 blk_qc_t ret = BLK_QC_T_NONE;
1751 struct dm_target *ti = md->immutable_target;
1752
1753 if (unlikely(!map)) {
1754 bio_io_error(bio);
1755 return ret;
1756 }
1757
1758 if (!ti) {
1759 ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
Mikulas Patocka123d87d2019-08-23 09:55:26 -04001760 if (unlikely(!ti)) {
Mike Snitzer568c73a2019-01-18 14:10:37 -05001761 bio_io_error(bio);
1762 return ret;
1763 }
1764 }
1765
1766 /*
1767 * If in ->make_request_fn we need to use blk_queue_split(), otherwise
1768 * queue_limits for abnormal requests (e.g. discard, writesame, etc)
1769 * won't be imposed.
1770 */
1771 if (current->bio_list) {
Mike Snitzer120c9252020-04-02 19:36:26 -04001772 if (is_abnormal_io(bio))
1773 blk_queue_split(md->queue, &bio);
1774 else
Mike Snitzer568c73a2019-01-18 14:10:37 -05001775 dm_queue_split(md, ti, &bio);
1776 }
1777
Mike Snitzer6548c7c2019-01-17 14:33:01 -05001778 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
Mike Snitzer568c73a2019-01-18 14:10:37 -05001779 return __process_bio(md, map, bio, ti);
Mike Snitzer6548c7c2019-01-17 14:33:01 -05001780 else
1781 return __split_and_process_bio(md, map, bio);
1782}
1783
Mikulas Patocka24113d42018-11-06 22:34:59 +01001784static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 struct mapped_device *md = q->queuedata;
Mike Snitzer978e51b2017-12-09 15:16:42 -05001787 blk_qc_t ret = BLK_QC_T_NONE;
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01001788 int srcu_idx;
1789 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01001791 map = dm_get_live_table(md, &srcu_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
Tejun Heo6a8736d2010-09-08 18:07:00 +02001793 /* if we're suspended, we have to queue this io for later */
1794 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01001795 dm_put_live_table(md, srcu_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Jens Axboe1eff9d32016-08-05 15:35:16 -06001797 if (!(bio->bi_opf & REQ_RAHEAD))
Tejun Heo6a8736d2010-09-08 18:07:00 +02001798 queue_io(md, bio);
1799 else
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +01001800 bio_io_error(bio);
Mike Snitzer978e51b2017-12-09 15:16:42 -05001801 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 }
1803
Mike Snitzer6548c7c2019-01-17 14:33:01 -05001804 ret = dm_process_bio(md, map, bio);
Mike Snitzer978e51b2017-12-09 15:16:42 -05001805
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01001806 dm_put_live_table(md, srcu_idx);
Mike Snitzer978e51b2017-12-09 15:16:42 -05001807 return ret;
1808}
1809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810static int dm_any_congested(void *congested_data, int bdi_bits)
1811{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001812 int r = bdi_bits;
1813 struct mapped_device *md = congested_data;
1814 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001816 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Mike Snitzere522c032016-02-02 22:35:06 -05001817 if (dm_request_based(md)) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001818 /*
Mike Snitzere522c032016-02-02 22:35:06 -05001819 * With request-based DM we only need to check the
1820 * top-level queue for congestion.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001821 */
Hou Tao974f51e2020-03-03 16:45:01 +08001822 struct backing_dev_info *bdi = md->queue->backing_dev_info;
1823 r = bdi->wb.congested->state & bdi_bits;
Mike Snitzere522c032016-02-02 22:35:06 -05001824 } else {
1825 map = dm_get_live_table_fast(md);
1826 if (map)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001827 r = dm_table_any_congested(map, bdi_bits);
Mike Snitzere522c032016-02-02 22:35:06 -05001828 dm_put_live_table_fast(md);
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001829 }
1830 }
1831
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 return r;
1833}
1834
1835/*-----------------------------------------------------------------
1836 * An IDR is used to keep track of allocated minor numbers.
1837 *---------------------------------------------------------------*/
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001838static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001840 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001842 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843}
1844
1845/*
1846 * See if the device with a specific minor # is free.
1847 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001848static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849{
Tejun Heoc9d76be2013-02-27 17:04:26 -08001850 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
1852 if (minor >= (1 << MINORBITS))
1853 return -EINVAL;
1854
Tejun Heoc9d76be2013-02-27 17:04:26 -08001855 idr_preload(GFP_KERNEL);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001856 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
Tejun Heoc9d76be2013-02-27 17:04:26 -08001858 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001860 spin_unlock(&_minor_lock);
Tejun Heoc9d76be2013-02-27 17:04:26 -08001861 idr_preload_end();
1862 if (r < 0)
1863 return r == -ENOSPC ? -EBUSY : r;
1864 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865}
1866
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001867static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868{
Tejun Heoc9d76be2013-02-27 17:04:26 -08001869 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
Tejun Heoc9d76be2013-02-27 17:04:26 -08001871 idr_preload(GFP_KERNEL);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001872 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873
Tejun Heoc9d76be2013-02-27 17:04:26 -08001874 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001876 spin_unlock(&_minor_lock);
Tejun Heoc9d76be2013-02-27 17:04:26 -08001877 idr_preload_end();
1878 if (r < 0)
1879 return r;
1880 *minor = r;
1881 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882}
1883
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001884static const struct block_device_operations dm_blk_dops;
Dan Williamsf26c5712017-04-12 12:35:44 -07001885static const struct dax_operations dm_dax_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
Mikulas Patocka53d59142009-04-02 19:55:37 +01001887static void dm_wq_work(struct work_struct *work);
1888
Mike Snitzer0f209722015-04-28 11:50:29 -04001889static void cleanup_mapped_device(struct mapped_device *md)
1890{
Mike Snitzer0f209722015-04-28 11:50:29 -04001891 if (md->wq)
1892 destroy_workqueue(md->wq);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04001893 bioset_exit(&md->bs);
1894 bioset_exit(&md->io_bs);
Mike Snitzer0f209722015-04-28 11:50:29 -04001895
Dan Williamsf26c5712017-04-12 12:35:44 -07001896 if (md->dax_dev) {
1897 kill_dax(md->dax_dev);
1898 put_dax(md->dax_dev);
1899 md->dax_dev = NULL;
1900 }
1901
Mike Snitzer0f209722015-04-28 11:50:29 -04001902 if (md->disk) {
1903 spin_lock(&_minor_lock);
1904 md->disk->private_data = NULL;
1905 spin_unlock(&_minor_lock);
Mike Snitzer0f209722015-04-28 11:50:29 -04001906 del_gendisk(md->disk);
1907 put_disk(md->disk);
1908 }
1909
1910 if (md->queue)
1911 blk_cleanup_queue(md->queue);
1912
Tahsin Erdogand09960b2016-10-10 05:35:19 -07001913 cleanup_srcu_struct(&md->io_barrier);
1914
Mike Snitzer0f209722015-04-28 11:50:29 -04001915 if (md->bdev) {
1916 bdput(md->bdev);
1917 md->bdev = NULL;
1918 }
Mike Snitzer4cc96132016-05-12 16:28:10 -04001919
Mike Snitzerd5ffebd2018-01-05 21:17:20 -05001920 mutex_destroy(&md->suspend_lock);
1921 mutex_destroy(&md->type_lock);
1922 mutex_destroy(&md->table_devices_lock);
1923
Mike Snitzer4cc96132016-05-12 16:28:10 -04001924 dm_mq_cleanup_mapped_device(md);
Mike Snitzer0f209722015-04-28 11:50:29 -04001925}
1926
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927/*
1928 * Allocate and initialise a blank device with a given minor.
1929 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001930static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931{
Mike Snitzer115485e2016-02-22 12:16:21 -05001932 int r, numa_node_id = dm_get_numa_node();
1933 struct mapped_device *md;
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001934 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
Mikulas Patocka856eb092017-10-31 19:33:02 -04001936 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 if (!md) {
1938 DMWARN("unable to allocate device, out of memory.");
1939 return NULL;
1940 }
1941
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001942 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001943 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001944
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001946 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001947 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001948 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001949 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001951 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01001953 r = init_srcu_struct(&md->io_barrier);
1954 if (r < 0)
1955 goto bad_io_barrier;
1956
Mike Snitzer115485e2016-02-22 12:16:21 -05001957 md->numa_node_id = numa_node_id;
Mike Snitzer591ddcf2016-01-31 12:05:42 -05001958 md->init_tio_pdu = false;
Mike Snitzera5664da2010-08-12 04:14:01 +01001959 md->type = DM_TYPE_NONE;
Daniel Walkere61290a2008-02-08 02:10:08 +00001960 mutex_init(&md->suspend_lock);
Mike Snitzera5664da2010-08-12 04:14:01 +01001961 mutex_init(&md->type_lock);
Benjamin Marzinski86f11522014-08-13 13:53:43 -05001962 mutex_init(&md->table_devices_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001963 spin_lock_init(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001965 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001967 atomic_set(&md->uevent_seq, 0);
1968 INIT_LIST_HEAD(&md->uevent_list);
Benjamin Marzinski86f11522014-08-13 13:53:43 -05001969 INIT_LIST_HEAD(&md->table_devices);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001970 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
Mike Snitzer47ace7e2020-01-27 14:07:23 -05001972 /*
1973 * default to bio-based required ->make_request_fn until DM
1974 * table is loaded and md->type established. If request-based
1975 * table is loaded: blk-mq will override accordingly.
1976 */
Christoph Hellwig3d745ea2020-03-27 09:30:11 +01001977 md->queue = blk_alloc_queue(dm_make_request, numa_node_id);
1978 if (!md->queue)
1979 goto bad;
1980 md->queue->queuedata = md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Mike Snitzerc12c9a32018-01-12 09:32:21 -05001982 md->disk = alloc_disk_node(1, md->numa_node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 if (!md->disk)
Mike Snitzer0f209722015-04-28 11:50:29 -04001984 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001986 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001987 INIT_WORK(&md->work, dm_wq_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001988 init_waitqueue_head(&md->eventq);
Mikulas Patocka2995fa72014-01-13 19:37:54 -05001989 init_completion(&md->kobj_holder.completion);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001990
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 md->disk->major = _major;
1992 md->disk->first_minor = minor;
1993 md->disk->fops = &dm_blk_dops;
1994 md->disk->queue = md->queue;
1995 md->disk->private_data = md;
1996 sprintf(md->disk->disk_name, "dm-%d", minor);
Dan Williamsf26c5712017-04-12 12:35:44 -07001997
Dan Williams976431b2018-03-29 17:22:13 -07001998 if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
Pankaj Guptafefc1d92019-07-05 19:33:24 +05301999 md->dax_dev = alloc_dax(md, md->disk->disk_name,
2000 &dm_dax_ops, 0);
Vivek Goyal4e4ced92020-04-01 12:11:25 -04002001 if (IS_ERR(md->dax_dev))
Dan Williams976431b2018-03-29 17:22:13 -07002002 goto bad;
2003 }
Dan Williamsf26c5712017-04-12 12:35:44 -07002004
Mike Snitzerc100ec42018-01-08 20:03:04 -05002005 add_disk_no_queue_reg(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08002006 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
Tejun Heo670368a2013-07-30 08:40:21 -04002008 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
Milan Broz304f3f62008-02-08 02:11:17 +00002009 if (!md->wq)
Mike Snitzer0f209722015-04-28 11:50:29 -04002010 goto bad;
Milan Broz304f3f62008-02-08 02:11:17 +00002011
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002012 md->bdev = bdget_disk(md->disk, 0);
2013 if (!md->bdev)
Mike Snitzer0f209722015-04-28 11:50:29 -04002014 goto bad;
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002015
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002016 dm_stats_init(&md->stats);
2017
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07002018 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002019 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07002020 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002021 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07002022
2023 BUG_ON(old_md != MINOR_ALLOCED);
2024
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 return md;
2026
Mike Snitzer0f209722015-04-28 11:50:29 -04002027bad:
2028 cleanup_mapped_device(md);
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002029bad_io_barrier:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00002031bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07002032 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00002033bad_module_get:
Mikulas Patocka856eb092017-10-31 19:33:02 -04002034 kvfree(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 return NULL;
2036}
2037
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01002038static void unlock_fs(struct mapped_device *md);
2039
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040static void free_dev(struct mapped_device *md)
2041{
Tejun Heof331c022008-09-03 09:01:48 +02002042 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08002043
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002044 unlock_fs(md);
Keith Busch2eb6e1e2014-10-17 17:46:36 -06002045
Mike Snitzer0f209722015-04-28 11:50:29 -04002046 cleanup_mapped_device(md);
Mike Snitzer0f209722015-04-28 11:50:29 -04002047
2048 free_table_devices(&md->table_devices);
2049 dm_stats_cleanup(&md->stats);
Mike Snitzer63a4f062015-03-23 17:01:43 -04002050 free_minor(minor);
2051
Jeff Mahoney10da4f72006-06-26 00:27:25 -07002052 module_put(THIS_MODULE);
Mikulas Patocka856eb092017-10-31 19:33:02 -04002053 kvfree(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054}
2055
Jens Axboe2a2a4c52018-06-07 14:42:06 -06002056static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002057{
Mikulas Patockac0820cf2012-12-21 20:23:38 +00002058 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
Jens Axboe2a2a4c52018-06-07 14:42:06 -06002059 int ret = 0;
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002060
Mike Snitzer0776aa02017-12-08 14:40:52 -05002061 if (dm_table_bio_based(t)) {
Mike Snitzer64f52b02017-12-11 23:17:47 -05002062 /*
2063 * The md may already have mempools that need changing.
2064 * If so, reload bioset because front_pad may have changed
2065 * because a different table was loaded.
2066 */
Kent Overstreet6f1c8192018-05-20 18:25:53 -04002067 bioset_exit(&md->bs);
2068 bioset_exit(&md->io_bs);
Mike Snitzer0776aa02017-12-08 14:40:52 -05002069
Kent Overstreet6f1c8192018-05-20 18:25:53 -04002070 } else if (bioset_initialized(&md->bs)) {
Mike Snitzer4e6e36c2015-06-26 09:42:57 -04002071 /*
2072 * There's no need to reload with request-based dm
2073 * because the size of front_pad doesn't change.
2074 * Note for future: If you are to reload bioset,
2075 * prep-ed requests in the queue may refer
2076 * to bio from the old bioset, so you must walk
2077 * through the queue to unprep.
2078 */
2079 goto out;
Mikulas Patockac0820cf2012-12-21 20:23:38 +00002080 }
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002081
Kent Overstreet6f1c8192018-05-20 18:25:53 -04002082 BUG_ON(!p ||
2083 bioset_initialized(&md->bs) ||
2084 bioset_initialized(&md->io_bs));
Mike Snitzercbc4e3c2015-04-27 16:37:50 -04002085
Jens Axboe2a2a4c52018-06-07 14:42:06 -06002086 ret = bioset_init_from_src(&md->bs, &p->bs);
2087 if (ret)
2088 goto out;
2089 ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
2090 if (ret)
2091 bioset_exit(&md->bs);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002092out:
Mike Snitzer02233342015-03-10 23:49:26 -04002093 /* mempool bind completed, no longer need any mempools in the table */
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002094 dm_table_free_md_mempools(t);
Jens Axboe2a2a4c52018-06-07 14:42:06 -06002095 return ret;
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002096}
2097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098/*
2099 * Bind a table to the device.
2100 */
2101static void event_callback(void *context)
2102{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002103 unsigned long flags;
2104 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 struct mapped_device *md = (struct mapped_device *) context;
2106
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002107 spin_lock_irqsave(&md->uevent_lock, flags);
2108 list_splice_init(&md->uevent_list, &uevents);
2109 spin_unlock_irqrestore(&md->uevent_lock, flags);
2110
Tejun Heoed9e1982008-08-25 19:56:05 +09002111 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002112
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 atomic_inc(&md->event_nr);
2114 wake_up(&md->eventq);
Mikulas Patocka62e08242017-09-20 07:29:49 -04002115 dm_issue_global_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116}
2117
Mike Snitzerc2176492011-01-13 19:53:46 +00002118/*
2119 * Protected by md->suspend_lock obtained by dm_swap_table().
2120 */
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07002121static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122{
Bart Van Assche1ea06542017-04-27 10:11:21 -07002123 lockdep_assert_held(&md->suspend_lock);
2124
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07002125 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002127 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128}
2129
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002130/*
2131 * Returns old map, which caller must destroy.
2132 */
2133static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2134 struct queue_limits *limits)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002136 struct dm_table *old_map;
Jens Axboe165125e2007-07-24 09:28:11 +02002137 struct request_queue *q = md->queue;
Mike Snitzer978e51b2017-12-09 15:16:42 -05002138 bool request_based = dm_table_request_based(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 sector_t size;
Jens Axboe2a2a4c52018-06-07 14:42:06 -06002140 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
Bart Van Assche5a8f1f82016-08-31 15:17:04 -07002142 lockdep_assert_held(&md->suspend_lock);
2143
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002145
2146 /*
2147 * Wipe any geometry if the size of the table changed.
2148 */
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002149 if (size != dm_get_size(md))
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002150 memset(&md->geometry, 0, sizeof(md->geometry));
2151
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002152 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002154 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002155
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002156 /*
2157 * The queue hasn't been stopped yet, if the old table type wasn't
2158 * for request-based during suspension. So stop it to prevent
2159 * I/O mapping before resume.
2160 * This must be done before setting the queue restrictions,
2161 * because request-based dm may be run just after the setting.
2162 */
Mike Snitzer978e51b2017-12-09 15:16:42 -05002163 if (request_based)
Mike Snitzereca7ee62016-02-20 13:45:38 -05002164 dm_stop_queue(q);
Mike Snitzer978e51b2017-12-09 15:16:42 -05002165
2166 if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
Mike Snitzer16f12262016-01-31 17:22:27 -05002167 /*
Mike Snitzer978e51b2017-12-09 15:16:42 -05002168 * Leverage the fact that request-based DM targets and
2169 * NVMe bio based targets are immutable singletons
2170 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
2171 * and __process_bio.
Mike Snitzer16f12262016-01-31 17:22:27 -05002172 */
2173 md->immutable_target = dm_table_get_immutable_target(t);
2174 }
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002175
Jens Axboe2a2a4c52018-06-07 14:42:06 -06002176 ret = __bind_mempools(md, t);
2177 if (ret) {
2178 old_map = ERR_PTR(ret);
2179 goto out;
2180 }
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002181
Eric Dumazeta12f5d42014-11-23 09:34:29 -08002182 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
Mike Snitzer1d3aa6f2016-02-22 14:14:24 -05002183 rcu_assign_pointer(md->map, (void *)t);
Alasdair G Kergon36a04562011-10-31 20:19:04 +00002184 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2185
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002186 dm_table_set_restrictions(t, q, limits);
Hannes Reinecke41abc4e2014-11-05 14:35:50 +01002187 if (old_map)
2188 dm_sync_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002189
Jens Axboe2a2a4c52018-06-07 14:42:06 -06002190out:
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002191 return old_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192}
2193
Alasdair G Kergona7940152009-12-10 23:52:23 +00002194/*
2195 * Returns unbound table for the caller to free.
2196 */
2197static struct dm_table *__unbind(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198{
Eric Dumazeta12f5d42014-11-23 09:34:29 -08002199 struct dm_table *map = rcu_dereference_protected(md->map, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
2201 if (!map)
Alasdair G Kergona7940152009-12-10 23:52:23 +00002202 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
2204 dm_table_event_callback(map, NULL, NULL);
Monam Agarwal9cdb8522014-03-23 23:58:27 +05302205 RCU_INIT_POINTER(md->map, NULL);
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002206 dm_sync_table(md);
Alasdair G Kergona7940152009-12-10 23:52:23 +00002207
2208 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209}
2210
2211/*
2212 * Constructor for a new device.
2213 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002214int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215{
Mike Snitzerc12c9a32018-01-12 09:32:21 -05002216 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 struct mapped_device *md;
2218
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002219 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 if (!md)
2221 return -ENXIO;
2222
Mike Snitzerc12c9a32018-01-12 09:32:21 -05002223 r = dm_sysfs_init(md);
2224 if (r) {
2225 free_dev(md);
2226 return r;
2227 }
Milan Broz784aae72009-01-06 03:05:12 +00002228
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 *result = md;
2230 return 0;
2231}
2232
Mike Snitzera5664da2010-08-12 04:14:01 +01002233/*
2234 * Functions to manage md->type.
2235 * All are required to hold md->type_lock.
2236 */
2237void dm_lock_md_type(struct mapped_device *md)
2238{
2239 mutex_lock(&md->type_lock);
2240}
2241
2242void dm_unlock_md_type(struct mapped_device *md)
2243{
2244 mutex_unlock(&md->type_lock);
2245}
2246
Bart Van Assche7e0d5742017-04-27 10:11:23 -07002247void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
Mike Snitzera5664da2010-08-12 04:14:01 +01002248{
Mike Snitzer00c4fc32013-08-27 18:57:03 -04002249 BUG_ON(!mutex_is_locked(&md->type_lock));
Mike Snitzera5664da2010-08-12 04:14:01 +01002250 md->type = type;
2251}
2252
Bart Van Assche7e0d5742017-04-27 10:11:23 -07002253enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
Mike Snitzera5664da2010-08-12 04:14:01 +01002254{
2255 return md->type;
2256}
2257
Alasdair G Kergon36a04562011-10-31 20:19:04 +00002258struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2259{
2260 return md->immutable_target_type;
2261}
2262
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01002263/*
Mike Snitzerf84cb8a2013-09-19 12:13:58 -04002264 * The queue_limits are only valid as long as you have a reference
2265 * count on 'md'.
2266 */
2267struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2268{
2269 BUG_ON(!atomic_read(&md->holders));
2270 return &md->queue->limits;
2271}
2272EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2273
Hou Tao974f51e2020-03-03 16:45:01 +08002274static void dm_init_congested_fn(struct mapped_device *md)
2275{
2276 md->queue->backing_dev_info->congested_data = md;
2277 md->queue->backing_dev_info->congested_fn = dm_any_congested;
2278}
2279
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01002280/*
2281 * Setup the DM device's queue based on md's type
2282 */
Mike Snitzer591ddcf2016-01-31 12:05:42 -05002283int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01002284{
Mike Snitzerbfebd1c2015-03-08 00:51:47 -05002285 int r;
Mike Snitzerc100ec42018-01-08 20:03:04 -05002286 struct queue_limits limits;
Bart Van Assche7e0d5742017-04-27 10:11:23 -07002287 enum dm_queue_mode type = dm_get_md_type(md);
Mike Snitzerbfebd1c2015-03-08 00:51:47 -05002288
Toshi Kani545ed202016-06-22 17:54:53 -06002289 switch (type) {
Mike Snitzerbfebd1c2015-03-08 00:51:47 -05002290 case DM_TYPE_REQUEST_BASED:
Mike Snitzere83068a2016-05-24 21:16:51 -04002291 r = dm_mq_init_request_queue(md, t);
Mike Snitzerbfebd1c2015-03-08 00:51:47 -05002292 if (r) {
Mike Snitzereca7ee62016-02-20 13:45:38 -05002293 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
Mike Snitzerbfebd1c2015-03-08 00:51:47 -05002294 return r;
2295 }
Hou Tao974f51e2020-03-03 16:45:01 +08002296 dm_init_congested_fn(md);
Mike Snitzerbfebd1c2015-03-08 00:51:47 -05002297 break;
2298 case DM_TYPE_BIO_BASED:
Toshi Kani545ed202016-06-22 17:54:53 -06002299 case DM_TYPE_DAX_BIO_BASED:
Mike Snitzer978e51b2017-12-09 15:16:42 -05002300 case DM_TYPE_NVME_BIO_BASED:
Hou Tao974f51e2020-03-03 16:45:01 +08002301 dm_init_congested_fn(md);
Mike Snitzerbfebd1c2015-03-08 00:51:47 -05002302 break;
Bart Van Assche7e0d5742017-04-27 10:11:23 -07002303 case DM_TYPE_NONE:
2304 WARN_ON_ONCE(true);
2305 break;
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01002306 }
2307
Mike Snitzerc100ec42018-01-08 20:03:04 -05002308 r = dm_calculate_queue_limits(t, &limits);
2309 if (r) {
2310 DMERR("Cannot calculate initial queue limits");
2311 return r;
2312 }
2313 dm_table_set_restrictions(t, md->queue, &limits);
2314 blk_register_queue(md->disk);
2315
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01002316 return 0;
2317}
2318
Mikulas Patocka2bec1f42015-02-17 14:30:53 -05002319struct mapped_device *dm_get_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320{
2321 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 unsigned minor = MINOR(dev);
2323
2324 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2325 return NULL;
2326
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002327 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
2329 md = idr_find(&_minor_idr, minor);
Mike Snitzer49de5762017-11-06 16:40:10 -05002330 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2331 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2332 md = NULL;
2333 goto out;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002334 }
Mike Snitzer49de5762017-11-06 16:40:10 -05002335 dm_get(md);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002336out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002337 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
David Teigland637842c2006-01-06 00:20:00 -08002339 return md;
2340}
Alasdair G Kergon3cf2e4b2011-10-31 20:19:06 +00002341EXPORT_SYMBOL_GPL(dm_get_md);
David Teiglandd229a952006-01-06 00:20:01 -08002342
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002343void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08002344{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002345 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346}
2347
2348void dm_set_mdptr(struct mapped_device *md, void *ptr)
2349{
2350 md->interface_ptr = ptr;
2351}
2352
2353void dm_get(struct mapped_device *md)
2354{
2355 atomic_inc(&md->holders);
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002356 BUG_ON(test_bit(DMF_FREEING, &md->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357}
2358
Mikulas Patocka09ee96b2015-02-26 11:41:28 -05002359int dm_hold(struct mapped_device *md)
2360{
2361 spin_lock(&_minor_lock);
2362 if (test_bit(DMF_FREEING, &md->flags)) {
2363 spin_unlock(&_minor_lock);
2364 return -EBUSY;
2365 }
2366 dm_get(md);
2367 spin_unlock(&_minor_lock);
2368 return 0;
2369}
2370EXPORT_SYMBOL_GPL(dm_hold);
2371
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002372const char *dm_device_name(struct mapped_device *md)
2373{
2374 return md->name;
2375}
2376EXPORT_SYMBOL_GPL(dm_device_name);
2377
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002378static void __dm_destroy(struct mapped_device *md, bool wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379{
Mike Anderson1134e5a2006-03-27 01:17:54 -08002380 struct dm_table *map;
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002381 int srcu_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002383 might_sleep();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002384
Mike Snitzer63a4f062015-03-23 17:01:43 -04002385 spin_lock(&_minor_lock);
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002386 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2387 set_bit(DMF_FREEING, &md->flags);
2388 spin_unlock(&_minor_lock);
2389
Mike Snitzerc12c9a32018-01-12 09:32:21 -05002390 blk_set_queue_dying(md->queue);
Bart Van Assche3b785fb2016-08-31 15:17:49 -07002391
Mikulas Patockaab7c7bb2015-02-27 14:04:27 -05002392 /*
2393 * Take suspend_lock so that presuspend and postsuspend methods
2394 * do not race with internal suspend.
2395 */
2396 mutex_lock(&md->suspend_lock);
Junichi Nomura2a708cf2015-10-01 08:31:51 +00002397 map = dm_get_live_table(md, &srcu_idx);
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002398 if (!dm_suspended_md(md)) {
2399 dm_table_presuspend_targets(map);
Mikulas Patockaadc0daa2020-02-24 10:20:28 +01002400 set_bit(DMF_SUSPENDED, &md->flags);
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002401 dm_table_postsuspend_targets(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 }
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002403 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2404 dm_put_live_table(md, srcu_idx);
Junichi Nomura2a708cf2015-10-01 08:31:51 +00002405 mutex_unlock(&md->suspend_lock);
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002406
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002407 /*
2408 * Rare, but there may be I/O requests still going to complete,
2409 * for example. Wait for all references to disappear.
2410 * No one should increment the reference count of the mapped_device,
2411 * after the mapped_device state becomes DMF_FREEING.
2412 */
2413 if (wait)
2414 while (atomic_read(&md->holders))
2415 msleep(1);
2416 else if (atomic_read(&md->holders))
2417 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2418 dm_device_name(md), atomic_read(&md->holders));
2419
2420 dm_sysfs_exit(md);
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002421 dm_table_destroy(__unbind(md));
2422 free_dev(md);
2423}
2424
2425void dm_destroy(struct mapped_device *md)
2426{
2427 __dm_destroy(md, true);
2428}
2429
2430void dm_destroy_immediate(struct mapped_device *md)
2431{
2432 __dm_destroy(md, false);
2433}
2434
2435void dm_put(struct mapped_device *md)
2436{
2437 atomic_dec(&md->holders);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438}
Edward Goggin79eb8852007-05-09 02:32:56 -07002439EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440
Bart Van Asscheb48633f2016-08-31 15:16:02 -07002441static int dm_wait_for_completion(struct mapped_device *md, long task_state)
Milan Broz46125c12008-02-08 02:10:30 +00002442{
2443 int r = 0;
Bart Van Assche9f4c3f82016-08-31 15:16:43 -07002444 DEFINE_WAIT(wait);
Milan Broz46125c12008-02-08 02:10:30 +00002445
2446 while (1) {
Bart Van Assche9f4c3f82016-08-31 15:16:43 -07002447 prepare_to_wait(&md->wait, &wait, task_state);
Milan Broz46125c12008-02-08 02:10:30 +00002448
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00002449 if (!md_in_flight(md))
Milan Broz46125c12008-02-08 02:10:30 +00002450 break;
2451
Bart Van Asschee3fabdf2016-08-31 15:16:22 -07002452 if (signal_pending_state(task_state, current)) {
Milan Broz46125c12008-02-08 02:10:30 +00002453 r = -EINTR;
2454 break;
2455 }
2456
2457 io_schedule();
2458 }
Bart Van Assche9f4c3f82016-08-31 15:16:43 -07002459 finish_wait(&md->wait, &wait);
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002460
Milan Broz46125c12008-02-08 02:10:30 +00002461 return r;
2462}
2463
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464/*
2465 * Process the deferred bios
2466 */
Mikulas Patockaef208582009-04-02 19:55:38 +01002467static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468{
Mikulas Patockaef208582009-04-02 19:55:38 +01002469 struct mapped_device *md = container_of(work, struct mapped_device,
2470 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002471 struct bio *c;
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002472 int srcu_idx;
2473 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002475 map = dm_get_live_table(md, &srcu_idx);
Mikulas Patockaef208582009-04-02 19:55:38 +01002476
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002477 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002478 spin_lock_irq(&md->deferred_lock);
2479 c = bio_list_pop(&md->deferred);
2480 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002481
Tejun Heo6a8736d2010-09-08 18:07:00 +02002482 if (!c)
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002483 break;
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002484
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002485 if (dm_request_based(md))
Mike Snitzer6548c7c2019-01-17 14:33:01 -05002486 (void) generic_make_request(c);
Tejun Heo6a8736d2010-09-08 18:07:00 +02002487 else
Mike Snitzer6548c7c2019-01-17 14:33:01 -05002488 (void) dm_process_bio(md, map, c);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002489 }
Milan Broz73d410c2008-02-08 02:10:25 +00002490
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002491 dm_put_live_table(md, srcu_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492}
2493
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002494static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00002495{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002496 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002497 smp_mb__after_atomic();
Mikulas Patocka53d59142009-04-02 19:55:37 +01002498 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00002499}
2500
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501/*
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002502 * Swap in a new table, returning the old one for the caller to destroy.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 */
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002504struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505{
Mike Christie87eb5b22013-03-01 22:45:48 +00002506 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002507 struct queue_limits limits;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002508 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
Daniel Walkere61290a2008-02-08 02:10:08 +00002510 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511
2512 /* device must be suspended */
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002513 if (!dm_suspended_md(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002514 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
Mike Snitzer3ae70652012-09-26 23:45:45 +01002516 /*
2517 * If the new table has no data devices, retain the existing limits.
2518 * This helps multipath with queue_if_no_path if all paths disappear,
2519 * then new I/O is queued based on these limits, and then some paths
2520 * reappear.
2521 */
2522 if (dm_table_has_no_data_devices(table)) {
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002523 live_map = dm_get_live_table_fast(md);
Mike Snitzer3ae70652012-09-26 23:45:45 +01002524 if (live_map)
2525 limits = md->queue->limits;
Mikulas Patocka83d5e5b2013-07-10 23:41:18 +01002526 dm_put_live_table_fast(md);
Mike Snitzer3ae70652012-09-26 23:45:45 +01002527 }
2528
Mike Christie87eb5b22013-03-01 22:45:48 +00002529 if (!live_map) {
2530 r = dm_calculate_queue_limits(table, &limits);
2531 if (r) {
2532 map = ERR_PTR(r);
2533 goto out;
2534 }
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002535 }
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002536
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002537 map = __bind(md, table, &limits);
Mikulas Patocka62e08242017-09-20 07:29:49 -04002538 dm_issue_global_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002540out:
Daniel Walkere61290a2008-02-08 02:10:08 +00002541 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002542 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543}
2544
2545/*
2546 * Functions to lock and unlock any filesystem running on the
2547 * device.
2548 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002549static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002551 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552
2553 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002554
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002555 md->frozen_sb = freeze_bdev(md->bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002556 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002557 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002558 md->frozen_sb = NULL;
2559 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002560 }
2561
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002562 set_bit(DMF_FROZEN, &md->flags);
2563
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 return 0;
2565}
2566
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002567static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002569 if (!test_bit(DMF_FROZEN, &md->flags))
2570 return;
2571
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002572 thaw_bdev(md->bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002574 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575}
2576
2577/*
Bart Van Asscheb48633f2016-08-31 15:16:02 -07002578 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2579 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2580 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2581 *
Mike Snitzerffcc3932014-10-28 18:34:52 -04002582 * If __dm_suspend returns 0, the device is completely quiescent
2583 * now. There is no request-processing activity. All new requests
2584 * are being added to md->deferred list.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002585 */
Mike Snitzerffcc3932014-10-28 18:34:52 -04002586static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
Bart Van Asscheb48633f2016-08-31 15:16:02 -07002587 unsigned suspend_flags, long task_state,
Mike Snitzereaf9a732016-08-02 13:07:20 -04002588 int dmf_suspended_flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589{
Mike Snitzerffcc3932014-10-28 18:34:52 -04002590 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2591 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2592 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593
Bart Van Assche5a8f1f82016-08-31 15:17:04 -07002594 lockdep_assert_held(&md->suspend_lock);
2595
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002596 /*
2597 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2598 * This flag is cleared before dm_suspend returns.
2599 */
2600 if (noflush)
2601 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Bart Van Assche86331f32017-04-27 10:11:26 -07002602 else
2603 pr_debug("%s: suspending with flush\n", dm_device_name(md));
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002604
Mike Snitzerd67ee212014-10-28 20:13:31 -04002605 /*
2606 * This gets reverted if there's an error later and the targets
2607 * provide the .presuspend_undo hook.
2608 */
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002609 dm_table_presuspend_targets(map);
2610
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002611 /*
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002612 * Flush I/O to the device.
2613 * Any I/O submitted after lock_fs() may not be flushed.
2614 * noflush takes precedence over do_lockfs.
2615 * (lock_fs() flushes I/Os and waits for them to complete.)
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002616 */
2617 if (!noflush && do_lockfs) {
2618 r = lock_fs(md);
Mike Snitzerd67ee212014-10-28 20:13:31 -04002619 if (r) {
2620 dm_table_presuspend_undo_targets(map);
Mike Snitzerffcc3932014-10-28 18:34:52 -04002621 return r;
Mike Snitzerd67ee212014-10-28 20:13:31 -04002622 }
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
2625 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002626 * Here we must make sure that no processes are submitting requests
2627 * to target drivers i.e. no one may be executing
2628 * __split_and_process_bio. This is called from dm_request and
2629 * dm_wq_work.
2630 *
2631 * To get all processes out of __split_and_process_bio in dm_request,
2632 * we take the write lock. To prevent any process from reentering
Tejun Heo6a8736d2010-09-08 18:07:00 +02002633 * __split_and_process_bio from dm_request and quiesce the thread
2634 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2635 * flush_workqueue(md->wq).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002637 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
Hannes Reinecke41abc4e2014-11-05 14:35:50 +01002638 if (map)
2639 synchronize_srcu(&md->io_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002641 /*
Tejun Heo29e40132010-09-08 18:07:00 +02002642 * Stop md->queue before flushing md->wq in case request-based
2643 * dm defers requests to md->wq from md->queue.
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002644 */
Jens Axboe6a23e052018-10-10 20:49:26 -06002645 if (dm_request_based(md))
Mike Snitzereca7ee62016-02-20 13:45:38 -05002646 dm_stop_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002647
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002648 flush_workqueue(md->wq);
2649
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002651 * At this point no more requests are entering target request routines.
2652 * We call dm_wait_for_completion to wait for all existing requests
2653 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 */
Bart Van Asscheb48633f2016-08-31 15:16:02 -07002655 r = dm_wait_for_completion(md, task_state);
Mike Snitzereaf9a732016-08-02 13:07:20 -04002656 if (!r)
2657 set_bit(dmf_suspended_flag, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658
Milan Broz6d6f10d2008-02-08 02:10:22 +00002659 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01002660 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Hannes Reinecke41abc4e2014-11-05 14:35:50 +01002661 if (map)
2662 synchronize_srcu(&md->io_barrier);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002663
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00002665 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002666 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00002667
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002668 if (dm_request_based(md))
Mike Snitzereca7ee62016-02-20 13:45:38 -05002669 dm_start_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002670
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002671 unlock_fs(md);
Mike Snitzerd67ee212014-10-28 20:13:31 -04002672 dm_table_presuspend_undo_targets(map);
Mike Snitzerffcc3932014-10-28 18:34:52 -04002673 /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002674 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002675
Mike Snitzerffcc3932014-10-28 18:34:52 -04002676 return r;
2677}
2678
2679/*
2680 * We need to be able to change a mapping table under a mounted
2681 * filesystem. For example we might want to move some data in
2682 * the background. Before the table can be swapped with
2683 * dm_bind_table, dm_suspend must be called to flush any in
2684 * flight bios and ensure that any further io gets deferred.
2685 */
2686/*
2687 * Suspend mechanism in request-based dm.
2688 *
2689 * 1. Flush all I/Os by lock_fs() if needed.
2690 * 2. Stop dispatching any I/O by stopping the request_queue.
2691 * 3. Wait for all in-flight I/Os to be completed or requeued.
2692 *
2693 * To abort suspend, start the request_queue.
2694 */
2695int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2696{
2697 struct dm_table *map = NULL;
2698 int r = 0;
2699
2700retry:
2701 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2702
2703 if (dm_suspended_md(md)) {
2704 r = -EINVAL;
2705 goto out_unlock;
2706 }
2707
2708 if (dm_suspended_internally_md(md)) {
2709 /* already internally suspended, wait for internal resume */
2710 mutex_unlock(&md->suspend_lock);
2711 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2712 if (r)
2713 return r;
2714 goto retry;
2715 }
2716
Eric Dumazeta12f5d42014-11-23 09:34:29 -08002717 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
Mike Snitzerffcc3932014-10-28 18:34:52 -04002718
Mike Snitzereaf9a732016-08-02 13:07:20 -04002719 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
Mike Snitzerffcc3932014-10-28 18:34:52 -04002720 if (r)
2721 goto out_unlock;
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002722
Kiyoshi Ueda4d4471c2009-12-10 23:52:26 +00002723 dm_table_postsuspend_targets(map);
2724
Alasdair G Kergond2874832006-11-08 17:44:43 -08002725out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00002726 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002727 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728}
2729
Mike Snitzerffcc3932014-10-28 18:34:52 -04002730static int __dm_resume(struct mapped_device *md, struct dm_table *map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731{
Mike Snitzerffcc3932014-10-28 18:34:52 -04002732 if (map) {
2733 int r = dm_table_resume_targets(map);
2734 if (r)
2735 return r;
2736 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002737
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002738 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002739
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002740 /*
2741 * Flushing deferred I/Os must be done after targets are resumed
2742 * so that mapping of targets can work correctly.
2743 * Request-based dm is queueing the deferred I/Os in its request_queue.
2744 */
2745 if (dm_request_based(md))
Mike Snitzereca7ee62016-02-20 13:45:38 -05002746 dm_start_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002747
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002748 unlock_fs(md);
2749
Mike Snitzerffcc3932014-10-28 18:34:52 -04002750 return 0;
2751}
2752
2753int dm_resume(struct mapped_device *md)
2754{
Minfei Huang8dc23652016-09-06 16:00:29 +08002755 int r;
Mike Snitzerffcc3932014-10-28 18:34:52 -04002756 struct dm_table *map = NULL;
2757
2758retry:
Minfei Huang8dc23652016-09-06 16:00:29 +08002759 r = -EINVAL;
Mike Snitzerffcc3932014-10-28 18:34:52 -04002760 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2761
2762 if (!dm_suspended_md(md))
2763 goto out;
2764
2765 if (dm_suspended_internally_md(md)) {
2766 /* already internally suspended, wait for internal resume */
2767 mutex_unlock(&md->suspend_lock);
2768 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2769 if (r)
2770 return r;
2771 goto retry;
2772 }
2773
Eric Dumazeta12f5d42014-11-23 09:34:29 -08002774 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
Mike Snitzerffcc3932014-10-28 18:34:52 -04002775 if (!map || !dm_table_get_size(map))
2776 goto out;
2777
2778 r = __dm_resume(md, map);
2779 if (r)
2780 goto out;
2781
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002782 clear_bit(DMF_SUSPENDED, &md->flags);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002783out:
Daniel Walkere61290a2008-02-08 02:10:08 +00002784 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002785
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002786 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787}
2788
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002789/*
2790 * Internal suspend/resume works like userspace-driven suspend. It waits
2791 * until all bios finish and prevents issuing new bios to the target drivers.
2792 * It may be used only from the kernel.
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002793 */
2794
Mike Snitzerffcc3932014-10-28 18:34:52 -04002795static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2796{
2797 struct dm_table *map = NULL;
2798
Bart Van Assche1ea06542017-04-27 10:11:21 -07002799 lockdep_assert_held(&md->suspend_lock);
2800
Mikulas Patocka96b26c82015-01-08 18:52:26 -05002801 if (md->internal_suspend_count++)
Mike Snitzerffcc3932014-10-28 18:34:52 -04002802 return; /* nested internal suspend */
2803
2804 if (dm_suspended_md(md)) {
2805 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2806 return; /* nest suspend */
2807 }
2808
Eric Dumazeta12f5d42014-11-23 09:34:29 -08002809 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
Mike Snitzerffcc3932014-10-28 18:34:52 -04002810
2811 /*
2812 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2813 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2814 * would require changing .presuspend to return an error -- avoid this
2815 * until there is a need for more elaborate variants of internal suspend.
2816 */
Mike Snitzereaf9a732016-08-02 13:07:20 -04002817 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2818 DMF_SUSPENDED_INTERNALLY);
Mike Snitzerffcc3932014-10-28 18:34:52 -04002819
2820 dm_table_postsuspend_targets(map);
2821}
2822
2823static void __dm_internal_resume(struct mapped_device *md)
2824{
Mikulas Patocka96b26c82015-01-08 18:52:26 -05002825 BUG_ON(!md->internal_suspend_count);
2826
2827 if (--md->internal_suspend_count)
Mike Snitzerffcc3932014-10-28 18:34:52 -04002828 return; /* resume from nested internal suspend */
2829
2830 if (dm_suspended_md(md))
2831 goto done; /* resume from nested suspend */
2832
2833 /*
2834 * NOTE: existing callers don't need to call dm_table_resume_targets
2835 * (which may fail -- so best to avoid it for now by passing NULL map)
2836 */
2837 (void) __dm_resume(md, NULL);
2838
2839done:
2840 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2841 smp_mb__after_atomic();
2842 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2843}
2844
2845void dm_internal_suspend_noflush(struct mapped_device *md)
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002846{
2847 mutex_lock(&md->suspend_lock);
Mike Snitzerffcc3932014-10-28 18:34:52 -04002848 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2849 mutex_unlock(&md->suspend_lock);
2850}
2851EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2852
2853void dm_internal_resume(struct mapped_device *md)
2854{
2855 mutex_lock(&md->suspend_lock);
2856 __dm_internal_resume(md);
2857 mutex_unlock(&md->suspend_lock);
2858}
2859EXPORT_SYMBOL_GPL(dm_internal_resume);
2860
2861/*
2862 * Fast variants of internal suspend/resume hold md->suspend_lock,
2863 * which prevents interaction with userspace-driven suspend.
2864 */
2865
2866void dm_internal_suspend_fast(struct mapped_device *md)
2867{
2868 mutex_lock(&md->suspend_lock);
2869 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002870 return;
2871
2872 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2873 synchronize_srcu(&md->io_barrier);
2874 flush_workqueue(md->wq);
2875 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2876}
Mikulas Patockab735fed2015-02-26 11:40:35 -05002877EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002878
Mike Snitzerffcc3932014-10-28 18:34:52 -04002879void dm_internal_resume_fast(struct mapped_device *md)
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002880{
Mike Snitzerffcc3932014-10-28 18:34:52 -04002881 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002882 goto done;
2883
2884 dm_queue_flush(md);
2885
2886done:
2887 mutex_unlock(&md->suspend_lock);
2888}
Mikulas Patockab735fed2015-02-26 11:40:35 -05002889EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -04002890
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891/*-----------------------------------------------------------------
2892 * Event notification.
2893 *---------------------------------------------------------------*/
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002894int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
Milan Broz60935eb2009-06-22 10:12:30 +01002895 unsigned cookie)
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002896{
Milan Broz60935eb2009-06-22 10:12:30 +01002897 char udev_cookie[DM_COOKIE_LENGTH];
2898 char *envp[] = { udev_cookie, NULL };
2899
2900 if (!cookie)
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002901 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
Milan Broz60935eb2009-06-22 10:12:30 +01002902 else {
2903 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2904 DM_COOKIE_ENV_VAR_NAME, cookie);
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002905 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2906 action, envp);
Milan Broz60935eb2009-06-22 10:12:30 +01002907 }
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002908}
2909
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002910uint32_t dm_next_uevent_seq(struct mapped_device *md)
2911{
2912 return atomic_add_return(1, &md->uevent_seq);
2913}
2914
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915uint32_t dm_get_event_nr(struct mapped_device *md)
2916{
2917 return atomic_read(&md->event_nr);
2918}
2919
2920int dm_wait_event(struct mapped_device *md, int event_nr)
2921{
2922 return wait_event_interruptible(md->eventq,
2923 (event_nr != atomic_read(&md->event_nr)));
2924}
2925
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002926void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2927{
2928 unsigned long flags;
2929
2930 spin_lock_irqsave(&md->uevent_lock, flags);
2931 list_add(elist, &md->uevent_list);
2932 spin_unlock_irqrestore(&md->uevent_lock, flags);
2933}
2934
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935/*
2936 * The gendisk is only valid as long as you have a reference
2937 * count on 'md'.
2938 */
2939struct gendisk *dm_disk(struct mapped_device *md)
2940{
2941 return md->disk;
2942}
Sami Tolvanen65ff5b72015-03-18 15:52:14 +00002943EXPORT_SYMBOL_GPL(dm_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944
Milan Broz784aae72009-01-06 03:05:12 +00002945struct kobject *dm_kobject(struct mapped_device *md)
2946{
Mikulas Patocka2995fa72014-01-13 19:37:54 -05002947 return &md->kobj_holder.kobj;
Milan Broz784aae72009-01-06 03:05:12 +00002948}
2949
Milan Broz784aae72009-01-06 03:05:12 +00002950struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2951{
2952 struct mapped_device *md;
2953
Mikulas Patocka2995fa72014-01-13 19:37:54 -05002954 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
Milan Broz784aae72009-01-06 03:05:12 +00002955
Hou Taob9a41d22017-11-01 15:42:36 +08002956 spin_lock(&_minor_lock);
2957 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2958 md = NULL;
2959 goto out;
2960 }
Milan Broz784aae72009-01-06 03:05:12 +00002961 dm_get(md);
Hou Taob9a41d22017-11-01 15:42:36 +08002962out:
2963 spin_unlock(&_minor_lock);
2964
Milan Broz784aae72009-01-06 03:05:12 +00002965 return md;
2966}
2967
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002968int dm_suspended_md(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969{
2970 return test_bit(DMF_SUSPENDED, &md->flags);
2971}
2972
Mike Snitzerffcc3932014-10-28 18:34:52 -04002973int dm_suspended_internally_md(struct mapped_device *md)
2974{
2975 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2976}
2977
Mikulas Patocka2c140a22013-11-01 18:27:41 -04002978int dm_test_deferred_remove_flag(struct mapped_device *md)
2979{
2980 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2981}
2982
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002983int dm_suspended(struct dm_target *ti)
2984{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002985 return dm_suspended_md(dm_table_get_md(ti->table));
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002986}
2987EXPORT_SYMBOL_GPL(dm_suspended);
2988
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002989int dm_noflush_suspending(struct dm_target *ti)
2990{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002991 return __noflush_suspending(dm_table_get_md(ti->table));
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002992}
2993EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2994
Bart Van Assche7e0d5742017-04-27 10:11:23 -07002995struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
Mike Snitzer0776aa02017-12-08 14:40:52 -05002996 unsigned integrity, unsigned per_io_data_size,
2997 unsigned min_pool_size)
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002998{
Mike Snitzer115485e2016-02-22 12:16:21 -05002999 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
Mike Snitzer78d8e582015-06-26 10:01:13 -04003000 unsigned int pool_size = 0;
Mike Snitzer64f52b02017-12-11 23:17:47 -05003001 unsigned int front_pad, io_front_pad;
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003002 int ret;
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01003003
3004 if (!pools)
Mike Snitzer4e6e36c2015-06-26 09:42:57 -04003005 return NULL;
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01003006
Mike Snitzer78d8e582015-06-26 10:01:13 -04003007 switch (type) {
3008 case DM_TYPE_BIO_BASED:
Toshi Kani545ed202016-06-22 17:54:53 -06003009 case DM_TYPE_DAX_BIO_BASED:
Mike Snitzer22c11852017-12-04 21:07:37 -05003010 case DM_TYPE_NVME_BIO_BASED:
Mike Snitzer0776aa02017-12-08 14:40:52 -05003011 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
Mike Snitzer30187e12016-01-31 13:28:26 -05003012 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
Mike Snitzer64f52b02017-12-11 23:17:47 -05003013 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003014 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
3015 if (ret)
Mike Snitzer64f52b02017-12-11 23:17:47 -05003016 goto out;
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003017 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
Christoph Hellwigeb8db832017-01-22 18:32:46 +01003018 goto out;
Mike Snitzer78d8e582015-06-26 10:01:13 -04003019 break;
3020 case DM_TYPE_REQUEST_BASED:
Mike Snitzer0776aa02017-12-08 14:40:52 -05003021 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
Mike Snitzer78d8e582015-06-26 10:01:13 -04003022 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
Mike Snitzer591ddcf2016-01-31 12:05:42 -05003023 /* per_io_data_size is used for blk-mq pdu at queue allocation */
Mike Snitzer78d8e582015-06-26 10:01:13 -04003024 break;
3025 default:
3026 BUG();
3027 }
3028
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003029 ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
3030 if (ret)
Jun'ichi Nomura5f015202013-03-01 22:45:48 +00003031 goto out;
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01003032
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003033 if (integrity && bioset_integrity_create(&pools->bs, pool_size))
Jun'ichi Nomura5f015202013-03-01 22:45:48 +00003034 goto out;
Martin K. Petersena91a2782011-03-17 11:11:05 +01003035
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01003036 return pools;
Mike Snitzer78d8e582015-06-26 10:01:13 -04003037
Jun'ichi Nomura5f015202013-03-01 22:45:48 +00003038out:
3039 dm_free_md_mempools(pools);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01003040
Mike Snitzer4e6e36c2015-06-26 09:42:57 -04003041 return NULL;
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01003042}
3043
3044void dm_free_md_mempools(struct dm_md_mempools *pools)
3045{
3046 if (!pools)
3047 return;
3048
Kent Overstreet6f1c8192018-05-20 18:25:53 -04003049 bioset_exit(&pools->bs);
3050 bioset_exit(&pools->io_bs);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01003051
3052 kfree(pools);
3053}
3054
Christoph Hellwig9c72bad2016-07-08 21:23:51 +09003055struct dm_pr {
3056 u64 old_key;
3057 u64 new_key;
3058 u32 flags;
3059 bool fail_early;
3060};
3061
3062static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3063 void *data)
3064{
3065 struct mapped_device *md = bdev->bd_disk->private_data;
3066 struct dm_table *table;
3067 struct dm_target *ti;
3068 int ret = -ENOTTY, srcu_idx;
3069
3070 table = dm_get_live_table(md, &srcu_idx);
3071 if (!table || !dm_table_get_size(table))
3072 goto out;
3073
3074 /* We only support devices that have a single target */
3075 if (dm_table_get_num_targets(table) != 1)
3076 goto out;
3077 ti = dm_table_get_target(table, 0);
3078
3079 ret = -EINVAL;
3080 if (!ti->type->iterate_devices)
3081 goto out;
3082
3083 ret = ti->type->iterate_devices(ti, fn, data);
3084out:
3085 dm_put_live_table(md, srcu_idx);
3086 return ret;
3087}
3088
3089/*
3090 * For register / unregister we need to manually call out to every path.
3091 */
3092static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3093 sector_t start, sector_t len, void *data)
3094{
3095 struct dm_pr *pr = data;
3096 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3097
3098 if (!ops || !ops->pr_register)
3099 return -EOPNOTSUPP;
3100 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3101}
3102
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003103static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
Mike Snitzer956a4022016-02-18 16:13:51 -05003104 u32 flags)
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003105{
Christoph Hellwig9c72bad2016-07-08 21:23:51 +09003106 struct dm_pr pr = {
3107 .old_key = old_key,
3108 .new_key = new_key,
3109 .flags = flags,
3110 .fail_early = true,
3111 };
3112 int ret;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003113
Christoph Hellwig9c72bad2016-07-08 21:23:51 +09003114 ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3115 if (ret && new_key) {
3116 /* unregister all paths if we failed to register any path */
3117 pr.old_key = new_key;
3118 pr.new_key = 0;
3119 pr.flags = 0;
3120 pr.fail_early = false;
3121 dm_call_pr(bdev, __dm_pr_register, &pr);
3122 }
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003123
Christoph Hellwig9c72bad2016-07-08 21:23:51 +09003124 return ret;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003125}
3126
3127static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
Mike Snitzer956a4022016-02-18 16:13:51 -05003128 u32 flags)
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003129{
3130 struct mapped_device *md = bdev->bd_disk->private_data;
3131 const struct pr_ops *ops;
Mike Snitzer971888c2018-04-03 15:05:12 -04003132 int r, srcu_idx;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003133
Mike Snitzer5bd5e8d2018-04-03 16:54:10 -04003134 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003135 if (r < 0)
Mike Snitzer971888c2018-04-03 15:05:12 -04003136 goto out;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003137
3138 ops = bdev->bd_disk->fops->pr_ops;
3139 if (ops && ops->pr_reserve)
3140 r = ops->pr_reserve(bdev, key, type, flags);
3141 else
3142 r = -EOPNOTSUPP;
Mike Snitzer971888c2018-04-03 15:05:12 -04003143out:
3144 dm_unprepare_ioctl(md, srcu_idx);
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003145 return r;
3146}
3147
3148static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3149{
3150 struct mapped_device *md = bdev->bd_disk->private_data;
3151 const struct pr_ops *ops;
Mike Snitzer971888c2018-04-03 15:05:12 -04003152 int r, srcu_idx;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003153
Mike Snitzer5bd5e8d2018-04-03 16:54:10 -04003154 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003155 if (r < 0)
Mike Snitzer971888c2018-04-03 15:05:12 -04003156 goto out;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003157
3158 ops = bdev->bd_disk->fops->pr_ops;
3159 if (ops && ops->pr_release)
3160 r = ops->pr_release(bdev, key, type);
3161 else
3162 r = -EOPNOTSUPP;
Mike Snitzer971888c2018-04-03 15:05:12 -04003163out:
3164 dm_unprepare_ioctl(md, srcu_idx);
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003165 return r;
3166}
3167
3168static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
Mike Snitzer956a4022016-02-18 16:13:51 -05003169 enum pr_type type, bool abort)
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003170{
3171 struct mapped_device *md = bdev->bd_disk->private_data;
3172 const struct pr_ops *ops;
Mike Snitzer971888c2018-04-03 15:05:12 -04003173 int r, srcu_idx;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003174
Mike Snitzer5bd5e8d2018-04-03 16:54:10 -04003175 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003176 if (r < 0)
Mike Snitzer971888c2018-04-03 15:05:12 -04003177 goto out;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003178
3179 ops = bdev->bd_disk->fops->pr_ops;
3180 if (ops && ops->pr_preempt)
3181 r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
3182 else
3183 r = -EOPNOTSUPP;
Mike Snitzer971888c2018-04-03 15:05:12 -04003184out:
3185 dm_unprepare_ioctl(md, srcu_idx);
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003186 return r;
3187}
3188
3189static int dm_pr_clear(struct block_device *bdev, u64 key)
3190{
3191 struct mapped_device *md = bdev->bd_disk->private_data;
3192 const struct pr_ops *ops;
Mike Snitzer971888c2018-04-03 15:05:12 -04003193 int r, srcu_idx;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003194
Mike Snitzer5bd5e8d2018-04-03 16:54:10 -04003195 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003196 if (r < 0)
Mike Snitzer971888c2018-04-03 15:05:12 -04003197 goto out;
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003198
3199 ops = bdev->bd_disk->fops->pr_ops;
3200 if (ops && ops->pr_clear)
3201 r = ops->pr_clear(bdev, key);
3202 else
3203 r = -EOPNOTSUPP;
Mike Snitzer971888c2018-04-03 15:05:12 -04003204out:
3205 dm_unprepare_ioctl(md, srcu_idx);
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003206 return r;
3207}
3208
3209static const struct pr_ops dm_pr_ops = {
3210 .pr_register = dm_pr_register,
3211 .pr_reserve = dm_pr_reserve,
3212 .pr_release = dm_pr_release,
3213 .pr_preempt = dm_pr_preempt,
3214 .pr_clear = dm_pr_clear,
3215};
3216
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07003217static const struct block_device_operations dm_blk_dops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 .open = dm_blk_open,
3219 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07003220 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08003221 .getgeo = dm_blk_getgeo,
Christoph Hellwige76239a2018-10-12 19:08:49 +09003222 .report_zones = dm_blk_report_zones,
Christoph Hellwig71cdb692015-10-15 14:10:51 +02003223 .pr_ops = &dm_pr_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224 .owner = THIS_MODULE
3225};
3226
Dan Williamsf26c5712017-04-12 12:35:44 -07003227static const struct dax_operations dm_dax_ops = {
3228 .direct_access = dm_dax_direct_access,
Dan Williams7bf7eac2019-05-16 13:26:29 -07003229 .dax_supported = dm_dax_supported,
Dan Williams7e026c82017-05-29 12:57:56 -07003230 .copy_from_iter = dm_dax_copy_from_iter,
Dan Williamsb3a9a0c2018-05-02 06:46:33 -07003231 .copy_to_iter = dm_dax_copy_to_iter,
Vivek Goyalcdf6cdc2020-02-28 11:34:54 -05003232 .zero_page_range = dm_dax_zero_page_range,
Dan Williamsf26c5712017-04-12 12:35:44 -07003233};
3234
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235/*
3236 * module hooks
3237 */
3238module_init(dm_init);
3239module_exit(dm_exit);
3240
3241module_param(major, uint, 0);
3242MODULE_PARM_DESC(major, "The major number of the device mapper");
Mike Snitzerf4790822013-09-12 18:06:12 -04003243
Mike Snitzere8603132013-09-12 18:06:12 -04003244module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3245MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3246
Mike Snitzer115485e2016-02-22 12:16:21 -05003247module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3248MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3249
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250MODULE_DESCRIPTION(DM_NAME " driver");
3251MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3252MODULE_LICENSE("GPL");