Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. |
Alasdair G Kergon | 2b06cff | 2006-06-26 00:27:32 -0700 | [diff] [blame] | 3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * This file is released under the GPL. |
| 6 | */ |
| 7 | |
| 8 | #include "dm.h" |
| 9 | #include "dm-bio-list.h" |
Mike Anderson | 51e5b2b | 2007-10-19 22:48:00 +0100 | [diff] [blame] | 10 | #include "dm-uevent.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/module.h> |
Arjan van de Ven | 48c9c27 | 2006-03-27 01:18:20 -0800 | [diff] [blame] | 14 | #include <linux/mutex.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/moduleparam.h> |
| 16 | #include <linux/blkpg.h> |
| 17 | #include <linux/bio.h> |
| 18 | #include <linux/buffer_head.h> |
| 19 | #include <linux/mempool.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/idr.h> |
Darrick J. Wong | 3ac51e7 | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 22 | #include <linux/hdreg.h> |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 23 | #include <linux/blktrace_api.h> |
Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 24 | #include <trace/block.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Alasdair G Kergon | 72d9486 | 2006-06-26 00:27:35 -0700 | [diff] [blame] | 26 | #define DM_MSG_PREFIX "core" |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | static const char *_name = DM_NAME; |
| 29 | |
| 30 | static unsigned int major = 0; |
| 31 | static unsigned int _major = 0; |
| 32 | |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 33 | static DEFINE_SPINLOCK(_minor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | /* |
Kiyoshi Ueda | 8fbf26a | 2009-01-06 03:05:06 +0000 | [diff] [blame^] | 35 | * For bio-based dm. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | * One of these is allocated per bio. |
| 37 | */ |
| 38 | struct dm_io { |
| 39 | struct mapped_device *md; |
| 40 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | atomic_t io_count; |
Richard Kennedy | 6ae2fa6 | 2008-07-21 12:00:28 +0100 | [diff] [blame] | 42 | struct bio *bio; |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 43 | unsigned long start_time; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | }; |
| 45 | |
| 46 | /* |
Kiyoshi Ueda | 8fbf26a | 2009-01-06 03:05:06 +0000 | [diff] [blame^] | 47 | * For bio-based dm. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | * One of these is allocated per target within a bio. Hopefully |
| 49 | * this will be simplified out one day. |
| 50 | */ |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 51 | struct dm_target_io { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | struct dm_io *io; |
| 53 | struct dm_target *ti; |
| 54 | union map_info info; |
| 55 | }; |
| 56 | |
Ingo Molnar | 0bfc245 | 2008-11-26 11:59:56 +0100 | [diff] [blame] | 57 | DEFINE_TRACE(block_bio_complete); |
| 58 | |
Kiyoshi Ueda | 8fbf26a | 2009-01-06 03:05:06 +0000 | [diff] [blame^] | 59 | /* |
| 60 | * For request-based dm. |
| 61 | * One of these is allocated per request. |
| 62 | */ |
| 63 | struct dm_rq_target_io { |
| 64 | struct mapped_device *md; |
| 65 | struct dm_target *ti; |
| 66 | struct request *orig, clone; |
| 67 | int error; |
| 68 | union map_info info; |
| 69 | }; |
| 70 | |
| 71 | /* |
| 72 | * For request-based dm. |
| 73 | * One of these is allocated per bio. |
| 74 | */ |
| 75 | struct dm_rq_clone_bio_info { |
| 76 | struct bio *orig; |
| 77 | struct request *rq; |
| 78 | }; |
| 79 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | union map_info *dm_get_mapinfo(struct bio *bio) |
| 81 | { |
Alasdair G Kergon | 17b2f66 | 2006-06-26 00:27:33 -0700 | [diff] [blame] | 82 | if (bio && bio->bi_private) |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 83 | return &((struct dm_target_io *)bio->bi_private)->info; |
Alasdair G Kergon | 17b2f66 | 2006-06-26 00:27:33 -0700 | [diff] [blame] | 84 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | } |
| 86 | |
Jeff Mahoney | ba61fdd | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 87 | #define MINOR_ALLOCED ((void *)-1) |
| 88 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | /* |
| 90 | * Bits for the md->flags field. |
| 91 | */ |
| 92 | #define DMF_BLOCK_IO 0 |
| 93 | #define DMF_SUSPENDED 1 |
Alasdair G Kergon | aa8d7c2 | 2006-01-06 00:20:06 -0800 | [diff] [blame] | 94 | #define DMF_FROZEN 2 |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 95 | #define DMF_FREEING 3 |
Alasdair G Kergon | 5c6bd75 | 2006-06-26 00:27:34 -0700 | [diff] [blame] | 96 | #define DMF_DELETING 4 |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 97 | #define DMF_NOFLUSH_SUSPENDING 5 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 99 | /* |
| 100 | * Work processed by per-device workqueue. |
| 101 | */ |
| 102 | struct dm_wq_req { |
| 103 | enum { |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 104 | DM_WQ_FLUSH_DEFERRED, |
| 105 | } type; |
| 106 | struct work_struct work; |
| 107 | struct mapped_device *md; |
| 108 | void *context; |
| 109 | }; |
| 110 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | struct mapped_device { |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 112 | struct rw_semaphore io_lock; |
Daniel Walker | e61290a | 2008-02-08 02:10:08 +0000 | [diff] [blame] | 113 | struct mutex suspend_lock; |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 114 | spinlock_t pushback_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | rwlock_t map_lock; |
| 116 | atomic_t holders; |
Alasdair G Kergon | 5c6bd75 | 2006-06-26 00:27:34 -0700 | [diff] [blame] | 117 | atomic_t open_count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | |
| 119 | unsigned long flags; |
| 120 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 121 | struct request_queue *queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | struct gendisk *disk; |
Mike Anderson | 7e51f25 | 2006-03-27 01:17:52 -0800 | [diff] [blame] | 123 | char name[16]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | |
| 125 | void *interface_ptr; |
| 126 | |
| 127 | /* |
| 128 | * A list of ios that arrived while we were suspended. |
| 129 | */ |
| 130 | atomic_t pending; |
| 131 | wait_queue_head_t wait; |
Kiyoshi Ueda | 7485936 | 2006-12-08 02:41:02 -0800 | [diff] [blame] | 132 | struct bio_list deferred; |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 133 | struct bio_list pushback; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | |
| 135 | /* |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 136 | * Processing queue (flush/barriers) |
| 137 | */ |
| 138 | struct workqueue_struct *wq; |
| 139 | |
| 140 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | * The current mapping. |
| 142 | */ |
| 143 | struct dm_table *map; |
| 144 | |
| 145 | /* |
| 146 | * io objects are allocated from here. |
| 147 | */ |
| 148 | mempool_t *io_pool; |
| 149 | mempool_t *tio_pool; |
| 150 | |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 151 | struct bio_set *bs; |
| 152 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | /* |
| 154 | * Event handling. |
| 155 | */ |
| 156 | atomic_t event_nr; |
| 157 | wait_queue_head_t eventq; |
Mike Anderson | 7a8c3d3 | 2007-10-19 22:48:01 +0100 | [diff] [blame] | 158 | atomic_t uevent_seq; |
| 159 | struct list_head uevent_list; |
| 160 | spinlock_t uevent_lock; /* Protect access to uevent_list */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | |
| 162 | /* |
| 163 | * freeze/thaw support require holding onto a super block |
| 164 | */ |
| 165 | struct super_block *frozen_sb; |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 166 | struct block_device *suspended_bdev; |
Darrick J. Wong | 3ac51e7 | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 167 | |
| 168 | /* forced geometry settings */ |
| 169 | struct hd_geometry geometry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | }; |
| 171 | |
| 172 | #define MIN_IOS 256 |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 173 | static struct kmem_cache *_io_cache; |
| 174 | static struct kmem_cache *_tio_cache; |
Kiyoshi Ueda | 8fbf26a | 2009-01-06 03:05:06 +0000 | [diff] [blame^] | 175 | static struct kmem_cache *_rq_tio_cache; |
| 176 | static struct kmem_cache *_rq_bio_info_cache; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | static int __init local_init(void) |
| 179 | { |
Kiyoshi Ueda | 51157b4 | 2008-10-21 17:45:08 +0100 | [diff] [blame] | 180 | int r = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | /* allocate a slab for the dm_ios */ |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 183 | _io_cache = KMEM_CACHE(dm_io, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | if (!_io_cache) |
Kiyoshi Ueda | 51157b4 | 2008-10-21 17:45:08 +0100 | [diff] [blame] | 185 | return r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
| 187 | /* allocate a slab for the target ios */ |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 188 | _tio_cache = KMEM_CACHE(dm_target_io, 0); |
Kiyoshi Ueda | 51157b4 | 2008-10-21 17:45:08 +0100 | [diff] [blame] | 189 | if (!_tio_cache) |
| 190 | goto out_free_io_cache; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
Kiyoshi Ueda | 8fbf26a | 2009-01-06 03:05:06 +0000 | [diff] [blame^] | 192 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); |
| 193 | if (!_rq_tio_cache) |
| 194 | goto out_free_tio_cache; |
| 195 | |
| 196 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); |
| 197 | if (!_rq_bio_info_cache) |
| 198 | goto out_free_rq_tio_cache; |
| 199 | |
Mike Anderson | 51e5b2b | 2007-10-19 22:48:00 +0100 | [diff] [blame] | 200 | r = dm_uevent_init(); |
Kiyoshi Ueda | 51157b4 | 2008-10-21 17:45:08 +0100 | [diff] [blame] | 201 | if (r) |
Kiyoshi Ueda | 8fbf26a | 2009-01-06 03:05:06 +0000 | [diff] [blame^] | 202 | goto out_free_rq_bio_info_cache; |
Mike Anderson | 51e5b2b | 2007-10-19 22:48:00 +0100 | [diff] [blame] | 203 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | _major = major; |
| 205 | r = register_blkdev(_major, _name); |
Kiyoshi Ueda | 51157b4 | 2008-10-21 17:45:08 +0100 | [diff] [blame] | 206 | if (r < 0) |
| 207 | goto out_uevent_exit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | |
| 209 | if (!_major) |
| 210 | _major = r; |
| 211 | |
| 212 | return 0; |
Kiyoshi Ueda | 51157b4 | 2008-10-21 17:45:08 +0100 | [diff] [blame] | 213 | |
| 214 | out_uevent_exit: |
| 215 | dm_uevent_exit(); |
Kiyoshi Ueda | 8fbf26a | 2009-01-06 03:05:06 +0000 | [diff] [blame^] | 216 | out_free_rq_bio_info_cache: |
| 217 | kmem_cache_destroy(_rq_bio_info_cache); |
| 218 | out_free_rq_tio_cache: |
| 219 | kmem_cache_destroy(_rq_tio_cache); |
Kiyoshi Ueda | 51157b4 | 2008-10-21 17:45:08 +0100 | [diff] [blame] | 220 | out_free_tio_cache: |
| 221 | kmem_cache_destroy(_tio_cache); |
| 222 | out_free_io_cache: |
| 223 | kmem_cache_destroy(_io_cache); |
| 224 | |
| 225 | return r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | } |
| 227 | |
| 228 | static void local_exit(void) |
| 229 | { |
Kiyoshi Ueda | 8fbf26a | 2009-01-06 03:05:06 +0000 | [diff] [blame^] | 230 | kmem_cache_destroy(_rq_bio_info_cache); |
| 231 | kmem_cache_destroy(_rq_tio_cache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | kmem_cache_destroy(_tio_cache); |
| 233 | kmem_cache_destroy(_io_cache); |
Akinobu Mita | 00d5940 | 2007-07-17 04:03:46 -0700 | [diff] [blame] | 234 | unregister_blkdev(_major, _name); |
Mike Anderson | 51e5b2b | 2007-10-19 22:48:00 +0100 | [diff] [blame] | 235 | dm_uevent_exit(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | |
| 237 | _major = 0; |
| 238 | |
| 239 | DMINFO("cleaned up"); |
| 240 | } |
| 241 | |
Alasdair G Kergon | b9249e5 | 2008-02-08 02:09:51 +0000 | [diff] [blame] | 242 | static int (*_inits[])(void) __initdata = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | local_init, |
| 244 | dm_target_init, |
| 245 | dm_linear_init, |
| 246 | dm_stripe_init, |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 247 | dm_kcopyd_init, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | dm_interface_init, |
| 249 | }; |
| 250 | |
Alasdair G Kergon | b9249e5 | 2008-02-08 02:09:51 +0000 | [diff] [blame] | 251 | static void (*_exits[])(void) = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | local_exit, |
| 253 | dm_target_exit, |
| 254 | dm_linear_exit, |
| 255 | dm_stripe_exit, |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 256 | dm_kcopyd_exit, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | dm_interface_exit, |
| 258 | }; |
| 259 | |
| 260 | static int __init dm_init(void) |
| 261 | { |
| 262 | const int count = ARRAY_SIZE(_inits); |
| 263 | |
| 264 | int r, i; |
| 265 | |
| 266 | for (i = 0; i < count; i++) { |
| 267 | r = _inits[i](); |
| 268 | if (r) |
| 269 | goto bad; |
| 270 | } |
| 271 | |
| 272 | return 0; |
| 273 | |
| 274 | bad: |
| 275 | while (i--) |
| 276 | _exits[i](); |
| 277 | |
| 278 | return r; |
| 279 | } |
| 280 | |
| 281 | static void __exit dm_exit(void) |
| 282 | { |
| 283 | int i = ARRAY_SIZE(_exits); |
| 284 | |
| 285 | while (i--) |
| 286 | _exits[i](); |
| 287 | } |
| 288 | |
| 289 | /* |
| 290 | * Block device functions |
| 291 | */ |
Al Viro | fe5f9f2 | 2008-03-02 10:29:31 -0500 | [diff] [blame] | 292 | static int dm_blk_open(struct block_device *bdev, fmode_t mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | { |
| 294 | struct mapped_device *md; |
| 295 | |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 296 | spin_lock(&_minor_lock); |
| 297 | |
Al Viro | fe5f9f2 | 2008-03-02 10:29:31 -0500 | [diff] [blame] | 298 | md = bdev->bd_disk->private_data; |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 299 | if (!md) |
| 300 | goto out; |
| 301 | |
Alasdair G Kergon | 5c6bd75 | 2006-06-26 00:27:34 -0700 | [diff] [blame] | 302 | if (test_bit(DMF_FREEING, &md->flags) || |
| 303 | test_bit(DMF_DELETING, &md->flags)) { |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 304 | md = NULL; |
| 305 | goto out; |
| 306 | } |
| 307 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | dm_get(md); |
Alasdair G Kergon | 5c6bd75 | 2006-06-26 00:27:34 -0700 | [diff] [blame] | 309 | atomic_inc(&md->open_count); |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 310 | |
| 311 | out: |
| 312 | spin_unlock(&_minor_lock); |
| 313 | |
| 314 | return md ? 0 : -ENXIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | } |
| 316 | |
Al Viro | fe5f9f2 | 2008-03-02 10:29:31 -0500 | [diff] [blame] | 317 | static int dm_blk_close(struct gendisk *disk, fmode_t mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | { |
Al Viro | fe5f9f2 | 2008-03-02 10:29:31 -0500 | [diff] [blame] | 319 | struct mapped_device *md = disk->private_data; |
Alasdair G Kergon | 5c6bd75 | 2006-06-26 00:27:34 -0700 | [diff] [blame] | 320 | atomic_dec(&md->open_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | dm_put(md); |
| 322 | return 0; |
| 323 | } |
| 324 | |
Alasdair G Kergon | 5c6bd75 | 2006-06-26 00:27:34 -0700 | [diff] [blame] | 325 | int dm_open_count(struct mapped_device *md) |
| 326 | { |
| 327 | return atomic_read(&md->open_count); |
| 328 | } |
| 329 | |
| 330 | /* |
| 331 | * Guarantees nothing is using the device before it's deleted. |
| 332 | */ |
| 333 | int dm_lock_for_deletion(struct mapped_device *md) |
| 334 | { |
| 335 | int r = 0; |
| 336 | |
| 337 | spin_lock(&_minor_lock); |
| 338 | |
| 339 | if (dm_open_count(md)) |
| 340 | r = -EBUSY; |
| 341 | else |
| 342 | set_bit(DMF_DELETING, &md->flags); |
| 343 | |
| 344 | spin_unlock(&_minor_lock); |
| 345 | |
| 346 | return r; |
| 347 | } |
| 348 | |
Darrick J. Wong | 3ac51e7 | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 349 | static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
| 350 | { |
| 351 | struct mapped_device *md = bdev->bd_disk->private_data; |
| 352 | |
| 353 | return dm_get_geometry(md, geo); |
| 354 | } |
| 355 | |
Al Viro | fe5f9f2 | 2008-03-02 10:29:31 -0500 | [diff] [blame] | 356 | static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, |
Milan Broz | aa129a2 | 2006-10-03 01:15:15 -0700 | [diff] [blame] | 357 | unsigned int cmd, unsigned long arg) |
| 358 | { |
Al Viro | fe5f9f2 | 2008-03-02 10:29:31 -0500 | [diff] [blame] | 359 | struct mapped_device *md = bdev->bd_disk->private_data; |
| 360 | struct dm_table *map = dm_get_table(md); |
Milan Broz | aa129a2 | 2006-10-03 01:15:15 -0700 | [diff] [blame] | 361 | struct dm_target *tgt; |
| 362 | int r = -ENOTTY; |
| 363 | |
Milan Broz | aa129a2 | 2006-10-03 01:15:15 -0700 | [diff] [blame] | 364 | if (!map || !dm_table_get_size(map)) |
| 365 | goto out; |
| 366 | |
| 367 | /* We only support devices that have a single target */ |
| 368 | if (dm_table_get_num_targets(map) != 1) |
| 369 | goto out; |
| 370 | |
| 371 | tgt = dm_table_get_target(map, 0); |
| 372 | |
| 373 | if (dm_suspended(md)) { |
| 374 | r = -EAGAIN; |
| 375 | goto out; |
| 376 | } |
| 377 | |
| 378 | if (tgt->type->ioctl) |
Al Viro | 647b3d0 | 2007-08-28 22:15:59 -0400 | [diff] [blame] | 379 | r = tgt->type->ioctl(tgt, cmd, arg); |
Milan Broz | aa129a2 | 2006-10-03 01:15:15 -0700 | [diff] [blame] | 380 | |
| 381 | out: |
| 382 | dm_table_put(map); |
| 383 | |
Milan Broz | aa129a2 | 2006-10-03 01:15:15 -0700 | [diff] [blame] | 384 | return r; |
| 385 | } |
| 386 | |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 387 | static struct dm_io *alloc_io(struct mapped_device *md) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | { |
| 389 | return mempool_alloc(md->io_pool, GFP_NOIO); |
| 390 | } |
| 391 | |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 392 | static void free_io(struct mapped_device *md, struct dm_io *io) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | { |
| 394 | mempool_free(io, md->io_pool); |
| 395 | } |
| 396 | |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 397 | static struct dm_target_io *alloc_tio(struct mapped_device *md) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | { |
| 399 | return mempool_alloc(md->tio_pool, GFP_NOIO); |
| 400 | } |
| 401 | |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 402 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | { |
| 404 | mempool_free(tio, md->tio_pool); |
| 405 | } |
| 406 | |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 407 | static void start_io_acct(struct dm_io *io) |
| 408 | { |
| 409 | struct mapped_device *md = io->md; |
Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 410 | int cpu; |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 411 | |
| 412 | io->start_time = jiffies; |
| 413 | |
Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 414 | cpu = part_stat_lock(); |
| 415 | part_round_stats(cpu, &dm_disk(md)->part0); |
| 416 | part_stat_unlock(); |
| 417 | dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 418 | } |
| 419 | |
Mikulas Patocka | d221d2e | 2008-11-13 23:39:10 +0000 | [diff] [blame] | 420 | static void end_io_acct(struct dm_io *io) |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 421 | { |
| 422 | struct mapped_device *md = io->md; |
| 423 | struct bio *bio = io->bio; |
| 424 | unsigned long duration = jiffies - io->start_time; |
Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 425 | int pending, cpu; |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 426 | int rw = bio_data_dir(bio); |
| 427 | |
Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 428 | cpu = part_stat_lock(); |
| 429 | part_round_stats(cpu, &dm_disk(md)->part0); |
| 430 | part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); |
| 431 | part_stat_unlock(); |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 432 | |
Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 433 | dm_disk(md)->part0.in_flight = pending = |
| 434 | atomic_dec_return(&md->pending); |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 435 | |
Mikulas Patocka | d221d2e | 2008-11-13 23:39:10 +0000 | [diff] [blame] | 436 | /* nudge anyone waiting on suspend queue */ |
| 437 | if (!pending) |
| 438 | wake_up(&md->wait); |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 439 | } |
| 440 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | /* |
| 442 | * Add the bio to the list of deferred io. |
| 443 | */ |
| 444 | static int queue_io(struct mapped_device *md, struct bio *bio) |
| 445 | { |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 446 | down_write(&md->io_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | |
| 448 | if (!test_bit(DMF_BLOCK_IO, &md->flags)) { |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 449 | up_write(&md->io_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | return 1; |
| 451 | } |
| 452 | |
| 453 | bio_list_add(&md->deferred, bio); |
| 454 | |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 455 | up_write(&md->io_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | return 0; /* deferred successfully */ |
| 457 | } |
| 458 | |
| 459 | /* |
| 460 | * Everyone (including functions in this file), should use this |
| 461 | * function to access the md->map field, and make sure they call |
| 462 | * dm_table_put() when finished. |
| 463 | */ |
| 464 | struct dm_table *dm_get_table(struct mapped_device *md) |
| 465 | { |
| 466 | struct dm_table *t; |
| 467 | |
| 468 | read_lock(&md->map_lock); |
| 469 | t = md->map; |
| 470 | if (t) |
| 471 | dm_table_get(t); |
| 472 | read_unlock(&md->map_lock); |
| 473 | |
| 474 | return t; |
| 475 | } |
| 476 | |
Darrick J. Wong | 3ac51e7 | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 477 | /* |
| 478 | * Get the geometry associated with a dm device |
| 479 | */ |
| 480 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) |
| 481 | { |
| 482 | *geo = md->geometry; |
| 483 | |
| 484 | return 0; |
| 485 | } |
| 486 | |
| 487 | /* |
| 488 | * Set the geometry of a device. |
| 489 | */ |
| 490 | int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) |
| 491 | { |
| 492 | sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; |
| 493 | |
| 494 | if (geo->start > sz) { |
| 495 | DMWARN("Start sector is beyond the geometry limits."); |
| 496 | return -EINVAL; |
| 497 | } |
| 498 | |
| 499 | md->geometry = *geo; |
| 500 | |
| 501 | return 0; |
| 502 | } |
| 503 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | /*----------------------------------------------------------------- |
| 505 | * CRUD START: |
| 506 | * A more elegant soln is in the works that uses the queue |
| 507 | * merge fn, unfortunately there are a couple of changes to |
| 508 | * the block layer that I want to make for this. So in the |
| 509 | * interests of getting something for people to use I give |
| 510 | * you this clearly demarcated crap. |
| 511 | *---------------------------------------------------------------*/ |
| 512 | |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 513 | static int __noflush_suspending(struct mapped_device *md) |
| 514 | { |
| 515 | return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); |
| 516 | } |
| 517 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | /* |
| 519 | * Decrements the number of outstanding ios that a bio has been |
| 520 | * cloned into, completing the original io if necc. |
| 521 | */ |
Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 522 | static void dec_pending(struct dm_io *io, int error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | { |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 524 | unsigned long flags; |
| 525 | |
| 526 | /* Push-back supersedes any I/O errors */ |
| 527 | if (error && !(io->error > 0 && __noflush_suspending(io->md))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | io->error = error; |
| 529 | |
| 530 | if (atomic_dec_and_test(&io->io_count)) { |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 531 | if (io->error == DM_ENDIO_REQUEUE) { |
| 532 | /* |
| 533 | * Target requested pushing back the I/O. |
| 534 | * This must be handled before the sleeper on |
| 535 | * suspend queue merges the pushback list. |
| 536 | */ |
| 537 | spin_lock_irqsave(&io->md->pushback_lock, flags); |
| 538 | if (__noflush_suspending(io->md)) |
| 539 | bio_list_add(&io->md->pushback, io->bio); |
| 540 | else |
| 541 | /* noflush suspend was interrupted. */ |
| 542 | io->error = -EIO; |
| 543 | spin_unlock_irqrestore(&io->md->pushback_lock, flags); |
| 544 | } |
| 545 | |
Mikulas Patocka | d221d2e | 2008-11-13 23:39:10 +0000 | [diff] [blame] | 546 | end_io_acct(io); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 548 | if (io->error != DM_ENDIO_REQUEUE) { |
Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 549 | trace_block_bio_complete(io->md->queue, io->bio); |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 550 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 551 | bio_endio(io->bio, io->error); |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 552 | } |
| 553 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | free_io(io->md, io); |
| 555 | } |
| 556 | } |
| 557 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 558 | static void clone_endio(struct bio *bio, int error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | { |
| 560 | int r = 0; |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 561 | struct dm_target_io *tio = bio->bi_private; |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 562 | struct mapped_device *md = tio->io->md; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | dm_endio_fn endio = tio->ti->type->end_io; |
| 564 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | if (!bio_flagged(bio, BIO_UPTODATE) && !error) |
| 566 | error = -EIO; |
| 567 | |
| 568 | if (endio) { |
| 569 | r = endio(tio->ti, bio, error, &tio->info); |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 570 | if (r < 0 || r == DM_ENDIO_REQUEUE) |
| 571 | /* |
| 572 | * error and requeue request are handled |
| 573 | * in dec_pending(). |
| 574 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | error = r; |
Kiyoshi Ueda | 45cbcd7 | 2006-12-08 02:41:05 -0800 | [diff] [blame] | 576 | else if (r == DM_ENDIO_INCOMPLETE) |
| 577 | /* The target will handle the io */ |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 578 | return; |
Kiyoshi Ueda | 45cbcd7 | 2006-12-08 02:41:05 -0800 | [diff] [blame] | 579 | else if (r) { |
| 580 | DMWARN("unimplemented target endio return value: %d", r); |
| 581 | BUG(); |
| 582 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | } |
| 584 | |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 585 | dec_pending(tio->io, error); |
| 586 | |
| 587 | /* |
| 588 | * Store md for cleanup instead of tio which is about to get freed. |
| 589 | */ |
| 590 | bio->bi_private = md->bs; |
| 591 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | bio_put(bio); |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 593 | free_tio(md, tio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | } |
| 595 | |
| 596 | static sector_t max_io_len(struct mapped_device *md, |
| 597 | sector_t sector, struct dm_target *ti) |
| 598 | { |
| 599 | sector_t offset = sector - ti->begin; |
| 600 | sector_t len = ti->len - offset; |
| 601 | |
| 602 | /* |
| 603 | * Does the target need to split even further ? |
| 604 | */ |
| 605 | if (ti->split_io) { |
| 606 | sector_t boundary; |
| 607 | boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) |
| 608 | - offset; |
| 609 | if (len > boundary) |
| 610 | len = boundary; |
| 611 | } |
| 612 | |
| 613 | return len; |
| 614 | } |
| 615 | |
| 616 | static void __map_bio(struct dm_target *ti, struct bio *clone, |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 617 | struct dm_target_io *tio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | { |
| 619 | int r; |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 620 | sector_t sector; |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 621 | struct mapped_device *md; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | |
| 623 | /* |
| 624 | * Sanity checks. |
| 625 | */ |
| 626 | BUG_ON(!clone->bi_size); |
| 627 | |
| 628 | clone->bi_end_io = clone_endio; |
| 629 | clone->bi_private = tio; |
| 630 | |
| 631 | /* |
| 632 | * Map the clone. If r == 0 we don't need to do |
| 633 | * anything, the target has assumed ownership of |
| 634 | * this io. |
| 635 | */ |
| 636 | atomic_inc(&tio->io->io_count); |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 637 | sector = clone->bi_sector; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | r = ti->type->map(ti, clone, &tio->info); |
Kiyoshi Ueda | 45cbcd7 | 2006-12-08 02:41:05 -0800 | [diff] [blame] | 639 | if (r == DM_MAPIO_REMAPPED) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | /* the bio has been remapped so dispatch it */ |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 641 | |
Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 642 | trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, |
Alan D. Brunelle | c7149d6 | 2007-08-07 15:30:23 +0200 | [diff] [blame] | 643 | tio->io->bio->bi_bdev->bd_dev, |
| 644 | clone->bi_sector, sector); |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 645 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | generic_make_request(clone); |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 647 | } else if (r < 0 || r == DM_MAPIO_REQUEUE) { |
| 648 | /* error the io and bail out, or requeue it if needed */ |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 649 | md = tio->io->md; |
| 650 | dec_pending(tio->io, r); |
| 651 | /* |
| 652 | * Store bio_set for cleanup. |
| 653 | */ |
| 654 | clone->bi_private = md->bs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | bio_put(clone); |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 656 | free_tio(md, tio); |
Kiyoshi Ueda | 45cbcd7 | 2006-12-08 02:41:05 -0800 | [diff] [blame] | 657 | } else if (r) { |
| 658 | DMWARN("unimplemented target map return value: %d", r); |
| 659 | BUG(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | } |
| 661 | } |
| 662 | |
| 663 | struct clone_info { |
| 664 | struct mapped_device *md; |
| 665 | struct dm_table *map; |
| 666 | struct bio *bio; |
| 667 | struct dm_io *io; |
| 668 | sector_t sector; |
| 669 | sector_t sector_count; |
| 670 | unsigned short idx; |
| 671 | }; |
| 672 | |
Peter Osterlund | 3676347 | 2005-09-06 15:16:42 -0700 | [diff] [blame] | 673 | static void dm_bio_destructor(struct bio *bio) |
| 674 | { |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 675 | struct bio_set *bs = bio->bi_private; |
| 676 | |
| 677 | bio_free(bio, bs); |
Peter Osterlund | 3676347 | 2005-09-06 15:16:42 -0700 | [diff] [blame] | 678 | } |
| 679 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | /* |
| 681 | * Creates a little bio that is just does part of a bvec. |
| 682 | */ |
| 683 | static struct bio *split_bvec(struct bio *bio, sector_t sector, |
| 684 | unsigned short idx, unsigned int offset, |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 685 | unsigned int len, struct bio_set *bs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | { |
| 687 | struct bio *clone; |
| 688 | struct bio_vec *bv = bio->bi_io_vec + idx; |
| 689 | |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 690 | clone = bio_alloc_bioset(GFP_NOIO, 1, bs); |
Peter Osterlund | 3676347 | 2005-09-06 15:16:42 -0700 | [diff] [blame] | 691 | clone->bi_destructor = dm_bio_destructor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | *clone->bi_io_vec = *bv; |
| 693 | |
| 694 | clone->bi_sector = sector; |
| 695 | clone->bi_bdev = bio->bi_bdev; |
| 696 | clone->bi_rw = bio->bi_rw; |
| 697 | clone->bi_vcnt = 1; |
| 698 | clone->bi_size = to_bytes(len); |
| 699 | clone->bi_io_vec->bv_offset = offset; |
| 700 | clone->bi_io_vec->bv_len = clone->bi_size; |
Martin K. Petersen | f3e1d26 | 2008-10-21 17:45:04 +0100 | [diff] [blame] | 701 | clone->bi_flags |= 1 << BIO_CLONED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | |
| 703 | return clone; |
| 704 | } |
| 705 | |
| 706 | /* |
| 707 | * Creates a bio that consists of range of complete bvecs. |
| 708 | */ |
| 709 | static struct bio *clone_bio(struct bio *bio, sector_t sector, |
| 710 | unsigned short idx, unsigned short bv_count, |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 711 | unsigned int len, struct bio_set *bs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | { |
| 713 | struct bio *clone; |
| 714 | |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 715 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); |
| 716 | __bio_clone(clone, bio); |
| 717 | clone->bi_destructor = dm_bio_destructor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | clone->bi_sector = sector; |
| 719 | clone->bi_idx = idx; |
| 720 | clone->bi_vcnt = idx + bv_count; |
| 721 | clone->bi_size = to_bytes(len); |
| 722 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); |
| 723 | |
| 724 | return clone; |
| 725 | } |
| 726 | |
Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 727 | static int __clone_and_map(struct clone_info *ci) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | { |
| 729 | struct bio *clone, *bio = ci->bio; |
Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 730 | struct dm_target *ti; |
| 731 | sector_t len = 0, max; |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 732 | struct dm_target_io *tio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | |
Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 734 | ti = dm_table_find_target(ci->map, ci->sector); |
| 735 | if (!dm_target_is_valid(ti)) |
| 736 | return -EIO; |
| 737 | |
| 738 | max = max_io_len(ci->md, ci->sector, ti); |
| 739 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | /* |
| 741 | * Allocate a target io object. |
| 742 | */ |
| 743 | tio = alloc_tio(ci->md); |
| 744 | tio->io = ci->io; |
| 745 | tio->ti = ti; |
| 746 | memset(&tio->info, 0, sizeof(tio->info)); |
| 747 | |
| 748 | if (ci->sector_count <= max) { |
| 749 | /* |
| 750 | * Optimise for the simple case where we can do all of |
| 751 | * the remaining io with a single clone. |
| 752 | */ |
| 753 | clone = clone_bio(bio, ci->sector, ci->idx, |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 754 | bio->bi_vcnt - ci->idx, ci->sector_count, |
| 755 | ci->md->bs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | __map_bio(ti, clone, tio); |
| 757 | ci->sector_count = 0; |
| 758 | |
| 759 | } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { |
| 760 | /* |
| 761 | * There are some bvecs that don't span targets. |
| 762 | * Do as many of these as possible. |
| 763 | */ |
| 764 | int i; |
| 765 | sector_t remaining = max; |
| 766 | sector_t bv_len; |
| 767 | |
| 768 | for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { |
| 769 | bv_len = to_sector(bio->bi_io_vec[i].bv_len); |
| 770 | |
| 771 | if (bv_len > remaining) |
| 772 | break; |
| 773 | |
| 774 | remaining -= bv_len; |
| 775 | len += bv_len; |
| 776 | } |
| 777 | |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 778 | clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, |
| 779 | ci->md->bs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | __map_bio(ti, clone, tio); |
| 781 | |
| 782 | ci->sector += len; |
| 783 | ci->sector_count -= len; |
| 784 | ci->idx = i; |
| 785 | |
| 786 | } else { |
| 787 | /* |
Alasdair G Kergon | d2044a9 | 2006-03-22 00:07:42 -0800 | [diff] [blame] | 788 | * Handle a bvec that must be split between two or more targets. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | */ |
| 790 | struct bio_vec *bv = bio->bi_io_vec + ci->idx; |
Alasdair G Kergon | d2044a9 | 2006-03-22 00:07:42 -0800 | [diff] [blame] | 791 | sector_t remaining = to_sector(bv->bv_len); |
| 792 | unsigned int offset = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | |
Alasdair G Kergon | d2044a9 | 2006-03-22 00:07:42 -0800 | [diff] [blame] | 794 | do { |
| 795 | if (offset) { |
| 796 | ti = dm_table_find_target(ci->map, ci->sector); |
Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 797 | if (!dm_target_is_valid(ti)) |
| 798 | return -EIO; |
| 799 | |
Alasdair G Kergon | d2044a9 | 2006-03-22 00:07:42 -0800 | [diff] [blame] | 800 | max = max_io_len(ci->md, ci->sector, ti); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | |
Alasdair G Kergon | d2044a9 | 2006-03-22 00:07:42 -0800 | [diff] [blame] | 802 | tio = alloc_tio(ci->md); |
| 803 | tio->io = ci->io; |
| 804 | tio->ti = ti; |
| 805 | memset(&tio->info, 0, sizeof(tio->info)); |
| 806 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | |
Alasdair G Kergon | d2044a9 | 2006-03-22 00:07:42 -0800 | [diff] [blame] | 808 | len = min(remaining, max); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | |
Alasdair G Kergon | d2044a9 | 2006-03-22 00:07:42 -0800 | [diff] [blame] | 810 | clone = split_bvec(bio, ci->sector, ci->idx, |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 811 | bv->bv_offset + offset, len, |
| 812 | ci->md->bs); |
Alasdair G Kergon | d2044a9 | 2006-03-22 00:07:42 -0800 | [diff] [blame] | 813 | |
| 814 | __map_bio(ti, clone, tio); |
| 815 | |
| 816 | ci->sector += len; |
| 817 | ci->sector_count -= len; |
| 818 | offset += to_bytes(len); |
| 819 | } while (remaining -= len); |
| 820 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | ci->idx++; |
| 822 | } |
Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 823 | |
| 824 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | } |
| 826 | |
| 827 | /* |
| 828 | * Split the bio into several clones. |
| 829 | */ |
Milan Broz | 9e4e5f8 | 2007-10-19 22:38:53 +0100 | [diff] [blame] | 830 | static int __split_bio(struct mapped_device *md, struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | { |
| 832 | struct clone_info ci; |
Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 833 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | |
| 835 | ci.map = dm_get_table(md); |
Milan Broz | 9e4e5f8 | 2007-10-19 22:38:53 +0100 | [diff] [blame] | 836 | if (unlikely(!ci.map)) |
| 837 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | |
| 839 | ci.md = md; |
| 840 | ci.bio = bio; |
| 841 | ci.io = alloc_io(md); |
| 842 | ci.io->error = 0; |
| 843 | atomic_set(&ci.io->io_count, 1); |
| 844 | ci.io->bio = bio; |
| 845 | ci.io->md = md; |
| 846 | ci.sector = bio->bi_sector; |
| 847 | ci.sector_count = bio_sectors(bio); |
| 848 | ci.idx = bio->bi_idx; |
| 849 | |
Jun'ichi "Nick" Nomura | 3eaf840 | 2006-02-01 03:04:53 -0800 | [diff] [blame] | 850 | start_io_acct(ci.io); |
Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 851 | while (ci.sector_count && !error) |
| 852 | error = __clone_and_map(&ci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | |
| 854 | /* drop the extra reference count */ |
Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 855 | dec_pending(ci.io, error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | dm_table_put(ci.map); |
Milan Broz | 9e4e5f8 | 2007-10-19 22:38:53 +0100 | [diff] [blame] | 857 | |
| 858 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | } |
| 860 | /*----------------------------------------------------------------- |
| 861 | * CRUD END |
| 862 | *---------------------------------------------------------------*/ |
| 863 | |
Milan Broz | f6fccb1 | 2008-07-21 12:00:37 +0100 | [diff] [blame] | 864 | static int dm_merge_bvec(struct request_queue *q, |
| 865 | struct bvec_merge_data *bvm, |
| 866 | struct bio_vec *biovec) |
| 867 | { |
| 868 | struct mapped_device *md = q->queuedata; |
| 869 | struct dm_table *map = dm_get_table(md); |
| 870 | struct dm_target *ti; |
| 871 | sector_t max_sectors; |
Mikulas Patocka | 5037108 | 2008-10-01 14:39:17 +0100 | [diff] [blame] | 872 | int max_size = 0; |
Milan Broz | f6fccb1 | 2008-07-21 12:00:37 +0100 | [diff] [blame] | 873 | |
| 874 | if (unlikely(!map)) |
Mikulas Patocka | 5037108 | 2008-10-01 14:39:17 +0100 | [diff] [blame] | 875 | goto out; |
Milan Broz | f6fccb1 | 2008-07-21 12:00:37 +0100 | [diff] [blame] | 876 | |
| 877 | ti = dm_table_find_target(map, bvm->bi_sector); |
Mikulas Patocka | b01cd5a | 2008-10-01 14:39:24 +0100 | [diff] [blame] | 878 | if (!dm_target_is_valid(ti)) |
| 879 | goto out_table; |
Milan Broz | f6fccb1 | 2008-07-21 12:00:37 +0100 | [diff] [blame] | 880 | |
| 881 | /* |
| 882 | * Find maximum amount of I/O that won't need splitting |
| 883 | */ |
| 884 | max_sectors = min(max_io_len(md, bvm->bi_sector, ti), |
| 885 | (sector_t) BIO_MAX_SECTORS); |
| 886 | max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; |
| 887 | if (max_size < 0) |
| 888 | max_size = 0; |
| 889 | |
| 890 | /* |
| 891 | * merge_bvec_fn() returns number of bytes |
| 892 | * it can accept at this offset |
| 893 | * max is precomputed maximal io size |
| 894 | */ |
| 895 | if (max_size && ti->type->merge) |
| 896 | max_size = ti->type->merge(ti, bvm, biovec, max_size); |
| 897 | |
Mikulas Patocka | b01cd5a | 2008-10-01 14:39:24 +0100 | [diff] [blame] | 898 | out_table: |
Mikulas Patocka | 5037108 | 2008-10-01 14:39:17 +0100 | [diff] [blame] | 899 | dm_table_put(map); |
| 900 | |
| 901 | out: |
Milan Broz | f6fccb1 | 2008-07-21 12:00:37 +0100 | [diff] [blame] | 902 | /* |
| 903 | * Always allow an entire first page |
| 904 | */ |
| 905 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) |
| 906 | max_size = biovec->bv_len; |
| 907 | |
Milan Broz | f6fccb1 | 2008-07-21 12:00:37 +0100 | [diff] [blame] | 908 | return max_size; |
| 909 | } |
| 910 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | /* |
| 912 | * The request function that just remaps the bio built up by |
| 913 | * dm_merge_bvec. |
| 914 | */ |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 915 | static int dm_request(struct request_queue *q, struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | { |
Milan Broz | 9e4e5f8 | 2007-10-19 22:38:53 +0100 | [diff] [blame] | 917 | int r = -EIO; |
Kevin Corry | 12f03a4 | 2006-02-01 03:04:52 -0800 | [diff] [blame] | 918 | int rw = bio_data_dir(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | struct mapped_device *md = q->queuedata; |
Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 920 | int cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | |
Stefan Bader | 07a83c4 | 2007-07-12 17:28:33 +0100 | [diff] [blame] | 922 | /* |
| 923 | * There is no use in forwarding any barrier request since we can't |
| 924 | * guarantee it is (or can be) handled by the targets correctly. |
| 925 | */ |
| 926 | if (unlikely(bio_barrier(bio))) { |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 927 | bio_endio(bio, -EOPNOTSUPP); |
Stefan Bader | 07a83c4 | 2007-07-12 17:28:33 +0100 | [diff] [blame] | 928 | return 0; |
| 929 | } |
| 930 | |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 931 | down_read(&md->io_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | |
Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 933 | cpu = part_stat_lock(); |
| 934 | part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); |
| 935 | part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); |
| 936 | part_stat_unlock(); |
Kevin Corry | 12f03a4 | 2006-02-01 03:04:52 -0800 | [diff] [blame] | 937 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | /* |
| 939 | * If we're suspended we have to queue |
| 940 | * this io for later. |
| 941 | */ |
| 942 | while (test_bit(DMF_BLOCK_IO, &md->flags)) { |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 943 | up_read(&md->io_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | |
Milan Broz | 9e4e5f8 | 2007-10-19 22:38:53 +0100 | [diff] [blame] | 945 | if (bio_rw(bio) != READA) |
| 946 | r = queue_io(md, bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | |
Milan Broz | 9e4e5f8 | 2007-10-19 22:38:53 +0100 | [diff] [blame] | 948 | if (r <= 0) |
| 949 | goto out_req; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | |
| 951 | /* |
| 952 | * We're in a while loop, because someone could suspend |
| 953 | * before we get to the following read lock. |
| 954 | */ |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 955 | down_read(&md->io_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | } |
| 957 | |
Milan Broz | 9e4e5f8 | 2007-10-19 22:38:53 +0100 | [diff] [blame] | 958 | r = __split_bio(md, bio); |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 959 | up_read(&md->io_lock); |
Milan Broz | 9e4e5f8 | 2007-10-19 22:38:53 +0100 | [diff] [blame] | 960 | |
| 961 | out_req: |
| 962 | if (r < 0) |
| 963 | bio_io_error(bio); |
| 964 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | return 0; |
| 966 | } |
| 967 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 968 | static void dm_unplug_all(struct request_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | { |
| 970 | struct mapped_device *md = q->queuedata; |
| 971 | struct dm_table *map = dm_get_table(md); |
| 972 | |
| 973 | if (map) { |
| 974 | dm_table_unplug_all(map); |
| 975 | dm_table_put(map); |
| 976 | } |
| 977 | } |
| 978 | |
| 979 | static int dm_any_congested(void *congested_data, int bdi_bits) |
| 980 | { |
Chandra Seetharaman | 8a57dfc | 2008-11-13 23:39:14 +0000 | [diff] [blame] | 981 | int r = bdi_bits; |
| 982 | struct mapped_device *md = congested_data; |
| 983 | struct dm_table *map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | |
Chandra Seetharaman | 8a57dfc | 2008-11-13 23:39:14 +0000 | [diff] [blame] | 985 | atomic_inc(&md->pending); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 986 | |
Chandra Seetharaman | 8a57dfc | 2008-11-13 23:39:14 +0000 | [diff] [blame] | 987 | if (!test_bit(DMF_BLOCK_IO, &md->flags)) { |
| 988 | map = dm_get_table(md); |
| 989 | if (map) { |
| 990 | r = dm_table_any_congested(map, bdi_bits); |
| 991 | dm_table_put(map); |
| 992 | } |
| 993 | } |
| 994 | |
| 995 | if (!atomic_dec_return(&md->pending)) |
| 996 | /* nudge anyone waiting on suspend queue */ |
| 997 | wake_up(&md->wait); |
| 998 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | return r; |
| 1000 | } |
| 1001 | |
| 1002 | /*----------------------------------------------------------------- |
| 1003 | * An IDR is used to keep track of allocated minor numbers. |
| 1004 | *---------------------------------------------------------------*/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | static DEFINE_IDR(_minor_idr); |
| 1006 | |
Alasdair G Kergon | 2b06cff | 2006-06-26 00:27:32 -0700 | [diff] [blame] | 1007 | static void free_minor(int minor) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1008 | { |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1009 | spin_lock(&_minor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | idr_remove(&_minor_idr, minor); |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1011 | spin_unlock(&_minor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | } |
| 1013 | |
| 1014 | /* |
| 1015 | * See if the device with a specific minor # is free. |
| 1016 | */ |
Frederik Deweerdt | cf13ab8 | 2008-04-24 22:10:59 +0100 | [diff] [blame] | 1017 | static int specific_minor(int minor) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1018 | { |
| 1019 | int r, m; |
| 1020 | |
| 1021 | if (minor >= (1 << MINORBITS)) |
| 1022 | return -EINVAL; |
| 1023 | |
Jeff Mahoney | 62f75c2 | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 1024 | r = idr_pre_get(&_minor_idr, GFP_KERNEL); |
| 1025 | if (!r) |
| 1026 | return -ENOMEM; |
| 1027 | |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1028 | spin_lock(&_minor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1029 | |
| 1030 | if (idr_find(&_minor_idr, minor)) { |
| 1031 | r = -EBUSY; |
| 1032 | goto out; |
| 1033 | } |
| 1034 | |
Jeff Mahoney | ba61fdd | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 1035 | r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); |
Jeff Mahoney | 62f75c2 | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 1036 | if (r) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1038 | |
| 1039 | if (m != minor) { |
| 1040 | idr_remove(&_minor_idr, m); |
| 1041 | r = -EBUSY; |
| 1042 | goto out; |
| 1043 | } |
| 1044 | |
| 1045 | out: |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1046 | spin_unlock(&_minor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | return r; |
| 1048 | } |
| 1049 | |
Frederik Deweerdt | cf13ab8 | 2008-04-24 22:10:59 +0100 | [diff] [blame] | 1050 | static int next_free_minor(int *minor) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | { |
Alasdair G Kergon | 2b06cff | 2006-06-26 00:27:32 -0700 | [diff] [blame] | 1052 | int r, m; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1053 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | r = idr_pre_get(&_minor_idr, GFP_KERNEL); |
Jeff Mahoney | 62f75c2 | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 1055 | if (!r) |
| 1056 | return -ENOMEM; |
| 1057 | |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1058 | spin_lock(&_minor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | |
Jeff Mahoney | ba61fdd | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 1060 | r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); |
Frederik Deweerdt | cf13ab8 | 2008-04-24 22:10:59 +0100 | [diff] [blame] | 1061 | if (r) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | |
| 1064 | if (m >= (1 << MINORBITS)) { |
| 1065 | idr_remove(&_minor_idr, m); |
| 1066 | r = -ENOSPC; |
| 1067 | goto out; |
| 1068 | } |
| 1069 | |
| 1070 | *minor = m; |
| 1071 | |
| 1072 | out: |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1073 | spin_unlock(&_minor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1074 | return r; |
| 1075 | } |
| 1076 | |
| 1077 | static struct block_device_operations dm_blk_dops; |
| 1078 | |
| 1079 | /* |
| 1080 | * Allocate and initialise a blank device with a given minor. |
| 1081 | */ |
Alasdair G Kergon | 2b06cff | 2006-06-26 00:27:32 -0700 | [diff] [blame] | 1082 | static struct mapped_device *alloc_dev(int minor) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 | { |
| 1084 | int r; |
Frederik Deweerdt | cf13ab8 | 2008-04-24 22:10:59 +0100 | [diff] [blame] | 1085 | struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); |
Jeff Mahoney | ba61fdd | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 1086 | void *old_md; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | |
| 1088 | if (!md) { |
| 1089 | DMWARN("unable to allocate device, out of memory."); |
| 1090 | return NULL; |
| 1091 | } |
| 1092 | |
Jeff Mahoney | 10da4f7 | 2006-06-26 00:27:25 -0700 | [diff] [blame] | 1093 | if (!try_module_get(THIS_MODULE)) |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1094 | goto bad_module_get; |
Jeff Mahoney | 10da4f7 | 2006-06-26 00:27:25 -0700 | [diff] [blame] | 1095 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | /* get a minor number for the dev */ |
Alasdair G Kergon | 2b06cff | 2006-06-26 00:27:32 -0700 | [diff] [blame] | 1097 | if (minor == DM_ANY_MINOR) |
Frederik Deweerdt | cf13ab8 | 2008-04-24 22:10:59 +0100 | [diff] [blame] | 1098 | r = next_free_minor(&minor); |
Alasdair G Kergon | 2b06cff | 2006-06-26 00:27:32 -0700 | [diff] [blame] | 1099 | else |
Frederik Deweerdt | cf13ab8 | 2008-04-24 22:10:59 +0100 | [diff] [blame] | 1100 | r = specific_minor(minor); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1101 | if (r < 0) |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1102 | goto bad_minor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1104 | init_rwsem(&md->io_lock); |
Daniel Walker | e61290a | 2008-02-08 02:10:08 +0000 | [diff] [blame] | 1105 | mutex_init(&md->suspend_lock); |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 1106 | spin_lock_init(&md->pushback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1107 | rwlock_init(&md->map_lock); |
| 1108 | atomic_set(&md->holders, 1); |
Alasdair G Kergon | 5c6bd75 | 2006-06-26 00:27:34 -0700 | [diff] [blame] | 1109 | atomic_set(&md->open_count, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | atomic_set(&md->event_nr, 0); |
Mike Anderson | 7a8c3d3 | 2007-10-19 22:48:01 +0100 | [diff] [blame] | 1111 | atomic_set(&md->uevent_seq, 0); |
| 1112 | INIT_LIST_HEAD(&md->uevent_list); |
| 1113 | spin_lock_init(&md->uevent_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | |
| 1115 | md->queue = blk_alloc_queue(GFP_KERNEL); |
| 1116 | if (!md->queue) |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1117 | goto bad_queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | |
| 1119 | md->queue->queuedata = md; |
| 1120 | md->queue->backing_dev_info.congested_fn = dm_any_congested; |
| 1121 | md->queue->backing_dev_info.congested_data = md; |
| 1122 | blk_queue_make_request(md->queue, dm_request); |
Jens Axboe | daef265 | 2006-01-10 10:48:02 +0100 | [diff] [blame] | 1123 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | md->queue->unplug_fn = dm_unplug_all; |
Milan Broz | f6fccb1 | 2008-07-21 12:00:37 +0100 | [diff] [blame] | 1125 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | |
Matthew Dobson | 93d2341 | 2006-03-26 01:37:50 -0800 | [diff] [blame] | 1127 | md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); |
Kiyoshi Ueda | 7485936 | 2006-12-08 02:41:02 -0800 | [diff] [blame] | 1128 | if (!md->io_pool) |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1129 | goto bad_io_pool; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | |
Matthew Dobson | 93d2341 | 2006-03-26 01:37:50 -0800 | [diff] [blame] | 1131 | md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | if (!md->tio_pool) |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1133 | goto bad_tio_pool; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | |
Jens Axboe | bb799ca | 2008-12-10 15:35:05 +0100 | [diff] [blame] | 1135 | md->bs = bioset_create(16, 0); |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 1136 | if (!md->bs) |
| 1137 | goto bad_no_bioset; |
| 1138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | md->disk = alloc_disk(1); |
| 1140 | if (!md->disk) |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1141 | goto bad_disk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | |
Jeff Mahoney | f0b0411 | 2006-06-26 00:27:25 -0700 | [diff] [blame] | 1143 | atomic_set(&md->pending, 0); |
| 1144 | init_waitqueue_head(&md->wait); |
| 1145 | init_waitqueue_head(&md->eventq); |
| 1146 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | md->disk->major = _major; |
| 1148 | md->disk->first_minor = minor; |
| 1149 | md->disk->fops = &dm_blk_dops; |
| 1150 | md->disk->queue = md->queue; |
| 1151 | md->disk->private_data = md; |
| 1152 | sprintf(md->disk->disk_name, "dm-%d", minor); |
| 1153 | add_disk(md->disk); |
Mike Anderson | 7e51f25 | 2006-03-27 01:17:52 -0800 | [diff] [blame] | 1154 | format_dev_t(md->name, MKDEV(_major, minor)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1155 | |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 1156 | md->wq = create_singlethread_workqueue("kdmflush"); |
| 1157 | if (!md->wq) |
| 1158 | goto bad_thread; |
| 1159 | |
Jeff Mahoney | ba61fdd | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 1160 | /* Populate the mapping, nobody knows we exist yet */ |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1161 | spin_lock(&_minor_lock); |
Jeff Mahoney | ba61fdd | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 1162 | old_md = idr_replace(&_minor_idr, md, minor); |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1163 | spin_unlock(&_minor_lock); |
Jeff Mahoney | ba61fdd | 2006-06-26 00:27:21 -0700 | [diff] [blame] | 1164 | |
| 1165 | BUG_ON(old_md != MINOR_ALLOCED); |
| 1166 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1167 | return md; |
| 1168 | |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 1169 | bad_thread: |
| 1170 | put_disk(md->disk); |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1171 | bad_disk: |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 1172 | bioset_free(md->bs); |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1173 | bad_no_bioset: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1174 | mempool_destroy(md->tio_pool); |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1175 | bad_tio_pool: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | mempool_destroy(md->io_pool); |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1177 | bad_io_pool: |
Al Viro | 1312f40 | 2006-03-12 11:02:03 -0500 | [diff] [blame] | 1178 | blk_cleanup_queue(md->queue); |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1179 | bad_queue: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1180 | free_minor(minor); |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1181 | bad_minor: |
Jeff Mahoney | 10da4f7 | 2006-06-26 00:27:25 -0700 | [diff] [blame] | 1182 | module_put(THIS_MODULE); |
Milan Broz | 6ed7ade | 2008-02-08 02:10:19 +0000 | [diff] [blame] | 1183 | bad_module_get: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 | kfree(md); |
| 1185 | return NULL; |
| 1186 | } |
| 1187 | |
Jun'ichi Nomura | ae9da83 | 2007-10-19 22:38:43 +0100 | [diff] [blame] | 1188 | static void unlock_fs(struct mapped_device *md); |
| 1189 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | static void free_dev(struct mapped_device *md) |
| 1191 | { |
Tejun Heo | f331c02 | 2008-09-03 09:01:48 +0200 | [diff] [blame] | 1192 | int minor = MINOR(disk_devt(md->disk)); |
Jun'ichi Nomura | 63d94e4 | 2006-02-24 13:04:25 -0800 | [diff] [blame] | 1193 | |
Jun'ichi Nomura | d9dde59 | 2006-02-24 13:04:24 -0800 | [diff] [blame] | 1194 | if (md->suspended_bdev) { |
Jun'ichi Nomura | ae9da83 | 2007-10-19 22:38:43 +0100 | [diff] [blame] | 1195 | unlock_fs(md); |
Jun'ichi Nomura | d9dde59 | 2006-02-24 13:04:24 -0800 | [diff] [blame] | 1196 | bdput(md->suspended_bdev); |
| 1197 | } |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 1198 | destroy_workqueue(md->wq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 | mempool_destroy(md->tio_pool); |
| 1200 | mempool_destroy(md->io_pool); |
Stefan Bader | 9faf400 | 2006-10-03 01:15:41 -0700 | [diff] [blame] | 1201 | bioset_free(md->bs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | del_gendisk(md->disk); |
Jun'ichi Nomura | 63d94e4 | 2006-02-24 13:04:25 -0800 | [diff] [blame] | 1203 | free_minor(minor); |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 1204 | |
| 1205 | spin_lock(&_minor_lock); |
| 1206 | md->disk->private_data = NULL; |
| 1207 | spin_unlock(&_minor_lock); |
| 1208 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 | put_disk(md->disk); |
Al Viro | 1312f40 | 2006-03-12 11:02:03 -0500 | [diff] [blame] | 1210 | blk_cleanup_queue(md->queue); |
Jeff Mahoney | 10da4f7 | 2006-06-26 00:27:25 -0700 | [diff] [blame] | 1211 | module_put(THIS_MODULE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | kfree(md); |
| 1213 | } |
| 1214 | |
| 1215 | /* |
| 1216 | * Bind a table to the device. |
| 1217 | */ |
| 1218 | static void event_callback(void *context) |
| 1219 | { |
Mike Anderson | 7a8c3d3 | 2007-10-19 22:48:01 +0100 | [diff] [blame] | 1220 | unsigned long flags; |
| 1221 | LIST_HEAD(uevents); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 | struct mapped_device *md = (struct mapped_device *) context; |
| 1223 | |
Mike Anderson | 7a8c3d3 | 2007-10-19 22:48:01 +0100 | [diff] [blame] | 1224 | spin_lock_irqsave(&md->uevent_lock, flags); |
| 1225 | list_splice_init(&md->uevent_list, &uevents); |
| 1226 | spin_unlock_irqrestore(&md->uevent_lock, flags); |
| 1227 | |
Tejun Heo | ed9e198 | 2008-08-25 19:56:05 +0900 | [diff] [blame] | 1228 | dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); |
Mike Anderson | 7a8c3d3 | 2007-10-19 22:48:01 +0100 | [diff] [blame] | 1229 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | atomic_inc(&md->event_nr); |
| 1231 | wake_up(&md->eventq); |
| 1232 | } |
| 1233 | |
Alasdair G Kergon | 4e90188be | 2005-07-28 21:15:59 -0700 | [diff] [blame] | 1234 | static void __set_size(struct mapped_device *md, sector_t size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 | { |
Alasdair G Kergon | 4e90188be | 2005-07-28 21:15:59 -0700 | [diff] [blame] | 1236 | set_capacity(md->disk, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | |
Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1238 | mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 1239 | i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); |
Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1240 | mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 | } |
| 1242 | |
| 1243 | static int __bind(struct mapped_device *md, struct dm_table *t) |
| 1244 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1245 | struct request_queue *q = md->queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1246 | sector_t size; |
| 1247 | |
| 1248 | size = dm_table_get_size(t); |
Darrick J. Wong | 3ac51e7 | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 1249 | |
| 1250 | /* |
| 1251 | * Wipe any geometry if the size of the table changed. |
| 1252 | */ |
| 1253 | if (size != get_capacity(md->disk)) |
| 1254 | memset(&md->geometry, 0, sizeof(md->geometry)); |
| 1255 | |
Jun'ichi Nomura | bfa152f | 2007-01-26 00:57:07 -0800 | [diff] [blame] | 1256 | if (md->suspended_bdev) |
| 1257 | __set_size(md, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1258 | if (size == 0) |
| 1259 | return 0; |
| 1260 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 | dm_table_get(t); |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1262 | dm_table_event_callback(t, event_callback, md); |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1263 | |
| 1264 | write_lock(&md->map_lock); |
| 1265 | md->map = t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 | dm_table_set_restrictions(t, q); |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1267 | write_unlock(&md->map_lock); |
| 1268 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | return 0; |
| 1270 | } |
| 1271 | |
| 1272 | static void __unbind(struct mapped_device *md) |
| 1273 | { |
| 1274 | struct dm_table *map = md->map; |
| 1275 | |
| 1276 | if (!map) |
| 1277 | return; |
| 1278 | |
| 1279 | dm_table_event_callback(map, NULL, NULL); |
| 1280 | write_lock(&md->map_lock); |
| 1281 | md->map = NULL; |
| 1282 | write_unlock(&md->map_lock); |
| 1283 | dm_table_put(map); |
| 1284 | } |
| 1285 | |
| 1286 | /* |
| 1287 | * Constructor for a new device. |
| 1288 | */ |
Alasdair G Kergon | 2b06cff | 2006-06-26 00:27:32 -0700 | [diff] [blame] | 1289 | int dm_create(int minor, struct mapped_device **result) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | { |
| 1291 | struct mapped_device *md; |
| 1292 | |
Alasdair G Kergon | 2b06cff | 2006-06-26 00:27:32 -0700 | [diff] [blame] | 1293 | md = alloc_dev(minor); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | if (!md) |
| 1295 | return -ENXIO; |
| 1296 | |
| 1297 | *result = md; |
| 1298 | return 0; |
| 1299 | } |
| 1300 | |
David Teigland | 637842c | 2006-01-06 00:20:00 -0800 | [diff] [blame] | 1301 | static struct mapped_device *dm_find_md(dev_t dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | { |
| 1303 | struct mapped_device *md; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1304 | unsigned minor = MINOR(dev); |
| 1305 | |
| 1306 | if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) |
| 1307 | return NULL; |
| 1308 | |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1309 | spin_lock(&_minor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 | |
| 1311 | md = idr_find(&_minor_idr, minor); |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 1312 | if (md && (md == MINOR_ALLOCED || |
Tejun Heo | f331c02 | 2008-09-03 09:01:48 +0200 | [diff] [blame] | 1313 | (MINOR(disk_devt(dm_disk(md))) != minor) || |
Alasdair G Kergon | 17b2f66 | 2006-06-26 00:27:33 -0700 | [diff] [blame] | 1314 | test_bit(DMF_FREEING, &md->flags))) { |
David Teigland | 637842c | 2006-01-06 00:20:00 -0800 | [diff] [blame] | 1315 | md = NULL; |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 1316 | goto out; |
| 1317 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 1319 | out: |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1320 | spin_unlock(&_minor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 | |
David Teigland | 637842c | 2006-01-06 00:20:00 -0800 | [diff] [blame] | 1322 | return md; |
| 1323 | } |
| 1324 | |
David Teigland | d229a95 | 2006-01-06 00:20:01 -0800 | [diff] [blame] | 1325 | struct mapped_device *dm_get_md(dev_t dev) |
| 1326 | { |
| 1327 | struct mapped_device *md = dm_find_md(dev); |
| 1328 | |
| 1329 | if (md) |
| 1330 | dm_get(md); |
| 1331 | |
| 1332 | return md; |
| 1333 | } |
| 1334 | |
Alasdair G Kergon | 9ade92a | 2006-03-27 01:17:53 -0800 | [diff] [blame] | 1335 | void *dm_get_mdptr(struct mapped_device *md) |
David Teigland | 637842c | 2006-01-06 00:20:00 -0800 | [diff] [blame] | 1336 | { |
Alasdair G Kergon | 9ade92a | 2006-03-27 01:17:53 -0800 | [diff] [blame] | 1337 | return md->interface_ptr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1338 | } |
| 1339 | |
| 1340 | void dm_set_mdptr(struct mapped_device *md, void *ptr) |
| 1341 | { |
| 1342 | md->interface_ptr = ptr; |
| 1343 | } |
| 1344 | |
| 1345 | void dm_get(struct mapped_device *md) |
| 1346 | { |
| 1347 | atomic_inc(&md->holders); |
| 1348 | } |
| 1349 | |
Alasdair G Kergon | 72d9486 | 2006-06-26 00:27:35 -0700 | [diff] [blame] | 1350 | const char *dm_device_name(struct mapped_device *md) |
| 1351 | { |
| 1352 | return md->name; |
| 1353 | } |
| 1354 | EXPORT_SYMBOL_GPL(dm_device_name); |
| 1355 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1356 | void dm_put(struct mapped_device *md) |
| 1357 | { |
Mike Anderson | 1134e5a | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 1358 | struct dm_table *map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 1360 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); |
| 1361 | |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1362 | if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { |
Mike Anderson | 1134e5a | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 1363 | map = dm_get_table(md); |
Tejun Heo | f331c02 | 2008-09-03 09:01:48 +0200 | [diff] [blame] | 1364 | idr_replace(&_minor_idr, MINOR_ALLOCED, |
| 1365 | MINOR(disk_devt(dm_disk(md)))); |
Jeff Mahoney | fba9f90 | 2006-06-26 00:27:23 -0700 | [diff] [blame] | 1366 | set_bit(DMF_FREEING, &md->flags); |
Jeff Mahoney | f32c10b | 2006-06-26 00:27:22 -0700 | [diff] [blame] | 1367 | spin_unlock(&_minor_lock); |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1368 | if (!dm_suspended(md)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 | dm_table_presuspend_targets(map); |
| 1370 | dm_table_postsuspend_targets(map); |
| 1371 | } |
Mike Anderson | 1134e5a | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 1372 | dm_table_put(map); |
Mikulas Patocka | a1b51e9 | 2009-01-06 03:04:53 +0000 | [diff] [blame] | 1373 | __unbind(md); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1374 | free_dev(md); |
| 1375 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | } |
Edward Goggin | 79eb885 | 2007-05-09 02:32:56 -0700 | [diff] [blame] | 1377 | EXPORT_SYMBOL_GPL(dm_put); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1378 | |
Milan Broz | 46125c1 | 2008-02-08 02:10:30 +0000 | [diff] [blame] | 1379 | static int dm_wait_for_completion(struct mapped_device *md) |
| 1380 | { |
| 1381 | int r = 0; |
| 1382 | |
| 1383 | while (1) { |
| 1384 | set_current_state(TASK_INTERRUPTIBLE); |
| 1385 | |
| 1386 | smp_mb(); |
| 1387 | if (!atomic_read(&md->pending)) |
| 1388 | break; |
| 1389 | |
| 1390 | if (signal_pending(current)) { |
| 1391 | r = -EINTR; |
| 1392 | break; |
| 1393 | } |
| 1394 | |
| 1395 | io_schedule(); |
| 1396 | } |
| 1397 | set_current_state(TASK_RUNNING); |
| 1398 | |
| 1399 | return r; |
| 1400 | } |
| 1401 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1402 | /* |
| 1403 | * Process the deferred bios |
| 1404 | */ |
Milan Broz | 6d6f10d | 2008-02-08 02:10:22 +0000 | [diff] [blame] | 1405 | static void __flush_deferred_io(struct mapped_device *md) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1406 | { |
Milan Broz | 6d6f10d | 2008-02-08 02:10:22 +0000 | [diff] [blame] | 1407 | struct bio *c; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | |
Milan Broz | 6d6f10d | 2008-02-08 02:10:22 +0000 | [diff] [blame] | 1409 | while ((c = bio_list_pop(&md->deferred))) { |
Milan Broz | 9e4e5f8 | 2007-10-19 22:38:53 +0100 | [diff] [blame] | 1410 | if (__split_bio(md, c)) |
| 1411 | bio_io_error(c); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1412 | } |
Milan Broz | 73d410c | 2008-02-08 02:10:25 +0000 | [diff] [blame] | 1413 | |
| 1414 | clear_bit(DMF_BLOCK_IO, &md->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | } |
| 1416 | |
Milan Broz | 6d6f10d | 2008-02-08 02:10:22 +0000 | [diff] [blame] | 1417 | static void __merge_pushback_list(struct mapped_device *md) |
| 1418 | { |
| 1419 | unsigned long flags; |
| 1420 | |
| 1421 | spin_lock_irqsave(&md->pushback_lock, flags); |
| 1422 | clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); |
| 1423 | bio_list_merge_head(&md->deferred, &md->pushback); |
| 1424 | bio_list_init(&md->pushback); |
| 1425 | spin_unlock_irqrestore(&md->pushback_lock, flags); |
| 1426 | } |
| 1427 | |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 1428 | static void dm_wq_work(struct work_struct *work) |
| 1429 | { |
| 1430 | struct dm_wq_req *req = container_of(work, struct dm_wq_req, work); |
| 1431 | struct mapped_device *md = req->md; |
| 1432 | |
| 1433 | down_write(&md->io_lock); |
| 1434 | switch (req->type) { |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 1435 | case DM_WQ_FLUSH_DEFERRED: |
| 1436 | __flush_deferred_io(md); |
| 1437 | break; |
| 1438 | default: |
| 1439 | DMERR("dm_wq_work: unrecognised work type %d", req->type); |
| 1440 | BUG(); |
| 1441 | } |
| 1442 | up_write(&md->io_lock); |
| 1443 | } |
| 1444 | |
| 1445 | static void dm_wq_queue(struct mapped_device *md, int type, void *context, |
| 1446 | struct dm_wq_req *req) |
| 1447 | { |
| 1448 | req->type = type; |
| 1449 | req->md = md; |
| 1450 | req->context = context; |
| 1451 | INIT_WORK(&req->work, dm_wq_work); |
| 1452 | queue_work(md->wq, &req->work); |
| 1453 | } |
| 1454 | |
| 1455 | static void dm_queue_flush(struct mapped_device *md, int type, void *context) |
| 1456 | { |
| 1457 | struct dm_wq_req req; |
| 1458 | |
| 1459 | dm_wq_queue(md, type, context, &req); |
| 1460 | flush_workqueue(md->wq); |
| 1461 | } |
| 1462 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1463 | /* |
| 1464 | * Swap in a new table (destroying old one). |
| 1465 | */ |
| 1466 | int dm_swap_table(struct mapped_device *md, struct dm_table *table) |
| 1467 | { |
Alasdair G Kergon | 93c534a | 2005-07-12 15:53:05 -0700 | [diff] [blame] | 1468 | int r = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1469 | |
Daniel Walker | e61290a | 2008-02-08 02:10:08 +0000 | [diff] [blame] | 1470 | mutex_lock(&md->suspend_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1471 | |
| 1472 | /* device must be suspended */ |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1473 | if (!dm_suspended(md)) |
Alasdair G Kergon | 93c534a | 2005-07-12 15:53:05 -0700 | [diff] [blame] | 1474 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1475 | |
Jun'ichi Nomura | bfa152f | 2007-01-26 00:57:07 -0800 | [diff] [blame] | 1476 | /* without bdev, the device size cannot be changed */ |
| 1477 | if (!md->suspended_bdev) |
| 1478 | if (get_capacity(md->disk) != dm_table_get_size(table)) |
| 1479 | goto out; |
| 1480 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1481 | __unbind(md); |
| 1482 | r = __bind(md, table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | |
Alasdair G Kergon | 93c534a | 2005-07-12 15:53:05 -0700 | [diff] [blame] | 1484 | out: |
Daniel Walker | e61290a | 2008-02-08 02:10:08 +0000 | [diff] [blame] | 1485 | mutex_unlock(&md->suspend_lock); |
Alasdair G Kergon | 93c534a | 2005-07-12 15:53:05 -0700 | [diff] [blame] | 1486 | return r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1487 | } |
| 1488 | |
| 1489 | /* |
| 1490 | * Functions to lock and unlock any filesystem running on the |
| 1491 | * device. |
| 1492 | */ |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1493 | static int lock_fs(struct mapped_device *md) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 | { |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 1495 | int r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1496 | |
| 1497 | WARN_ON(md->frozen_sb); |
Alasdair G Kergon | dfbe03f | 2005-05-05 16:16:04 -0700 | [diff] [blame] | 1498 | |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 1499 | md->frozen_sb = freeze_bdev(md->suspended_bdev); |
Alasdair G Kergon | dfbe03f | 2005-05-05 16:16:04 -0700 | [diff] [blame] | 1500 | if (IS_ERR(md->frozen_sb)) { |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1501 | r = PTR_ERR(md->frozen_sb); |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 1502 | md->frozen_sb = NULL; |
| 1503 | return r; |
Alasdair G Kergon | dfbe03f | 2005-05-05 16:16:04 -0700 | [diff] [blame] | 1504 | } |
| 1505 | |
Alasdair G Kergon | aa8d7c2 | 2006-01-06 00:20:06 -0800 | [diff] [blame] | 1506 | set_bit(DMF_FROZEN, &md->flags); |
| 1507 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1508 | /* don't bdput right now, we don't want the bdev |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 1509 | * to go away while it is locked. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | */ |
| 1511 | return 0; |
| 1512 | } |
| 1513 | |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1514 | static void unlock_fs(struct mapped_device *md) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | { |
Alasdair G Kergon | aa8d7c2 | 2006-01-06 00:20:06 -0800 | [diff] [blame] | 1516 | if (!test_bit(DMF_FROZEN, &md->flags)) |
| 1517 | return; |
| 1518 | |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 1519 | thaw_bdev(md->suspended_bdev, md->frozen_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | md->frozen_sb = NULL; |
Alasdair G Kergon | aa8d7c2 | 2006-01-06 00:20:06 -0800 | [diff] [blame] | 1521 | clear_bit(DMF_FROZEN, &md->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 | } |
| 1523 | |
| 1524 | /* |
| 1525 | * We need to be able to change a mapping table under a mounted |
| 1526 | * filesystem. For example we might want to move some data in |
| 1527 | * the background. Before the table can be swapped with |
| 1528 | * dm_bind_table, dm_suspend must be called to flush any in |
| 1529 | * flight bios and ensure that any further io gets deferred. |
| 1530 | */ |
Kiyoshi Ueda | a3d77d3 | 2006-12-08 02:41:04 -0800 | [diff] [blame] | 1531 | int dm_suspend(struct mapped_device *md, unsigned suspend_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 | { |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1533 | struct dm_table *map = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1534 | DECLARE_WAITQUEUE(wait, current); |
Milan Broz | 46125c1 | 2008-02-08 02:10:30 +0000 | [diff] [blame] | 1535 | int r = 0; |
Kiyoshi Ueda | a3d77d3 | 2006-12-08 02:41:04 -0800 | [diff] [blame] | 1536 | int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 1537 | int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 | |
Daniel Walker | e61290a | 2008-02-08 02:10:08 +0000 | [diff] [blame] | 1539 | mutex_lock(&md->suspend_lock); |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1540 | |
Milan Broz | 73d410c | 2008-02-08 02:10:25 +0000 | [diff] [blame] | 1541 | if (dm_suspended(md)) { |
| 1542 | r = -EINVAL; |
Alasdair G Kergon | d287483 | 2006-11-08 17:44:43 -0800 | [diff] [blame] | 1543 | goto out_unlock; |
Milan Broz | 73d410c | 2008-02-08 02:10:25 +0000 | [diff] [blame] | 1544 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 | |
| 1546 | map = dm_get_table(md); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 | |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 1548 | /* |
| 1549 | * DMF_NOFLUSH_SUSPENDING must be set before presuspend. |
| 1550 | * This flag is cleared before dm_suspend returns. |
| 1551 | */ |
| 1552 | if (noflush) |
| 1553 | set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); |
| 1554 | |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1555 | /* This does not get reverted if there's an error later. */ |
| 1556 | dm_table_presuspend_targets(map); |
| 1557 | |
Jun'ichi Nomura | bfa152f | 2007-01-26 00:57:07 -0800 | [diff] [blame] | 1558 | /* bdget() can stall if the pending I/Os are not flushed */ |
| 1559 | if (!noflush) { |
| 1560 | md->suspended_bdev = bdget_disk(md->disk, 0); |
| 1561 | if (!md->suspended_bdev) { |
| 1562 | DMWARN("bdget failed in dm_suspend"); |
| 1563 | r = -ENOMEM; |
Kiyoshi Ueda | f431d96 | 2008-10-21 17:45:07 +0100 | [diff] [blame] | 1564 | goto out; |
Jun'ichi Nomura | bfa152f | 2007-01-26 00:57:07 -0800 | [diff] [blame] | 1565 | } |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 1566 | |
Milan Broz | 6d6f10d | 2008-02-08 02:10:22 +0000 | [diff] [blame] | 1567 | /* |
| 1568 | * Flush I/O to the device. noflush supersedes do_lockfs, |
| 1569 | * because lock_fs() needs to flush I/Os. |
| 1570 | */ |
| 1571 | if (do_lockfs) { |
| 1572 | r = lock_fs(md); |
| 1573 | if (r) |
| 1574 | goto out; |
| 1575 | } |
Alasdair G Kergon | aa8d7c2 | 2006-01-06 00:20:06 -0800 | [diff] [blame] | 1576 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1577 | |
| 1578 | /* |
Alasdair G Kergon | 354e007 | 2005-05-05 16:16:05 -0700 | [diff] [blame] | 1579 | * First we set the BLOCK_IO flag so no more ios will be mapped. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1580 | */ |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1581 | down_write(&md->io_lock); |
| 1582 | set_bit(DMF_BLOCK_IO, &md->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1583 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1584 | add_wait_queue(&md->wait, &wait); |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1585 | up_write(&md->io_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1586 | |
| 1587 | /* unplug */ |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1588 | if (map) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1589 | dm_table_unplug_all(map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 | |
| 1591 | /* |
Milan Broz | 46125c1 | 2008-02-08 02:10:30 +0000 | [diff] [blame] | 1592 | * Wait for the already-mapped ios to complete. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1593 | */ |
Milan Broz | 46125c1 | 2008-02-08 02:10:30 +0000 | [diff] [blame] | 1594 | r = dm_wait_for_completion(md); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1596 | down_write(&md->io_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | remove_wait_queue(&md->wait, &wait); |
| 1598 | |
Milan Broz | 6d6f10d | 2008-02-08 02:10:22 +0000 | [diff] [blame] | 1599 | if (noflush) |
| 1600 | __merge_pushback_list(md); |
Milan Broz | 94d6351 | 2008-02-08 02:10:27 +0000 | [diff] [blame] | 1601 | up_write(&md->io_lock); |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 1602 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1603 | /* were we interrupted ? */ |
Milan Broz | 46125c1 | 2008-02-08 02:10:30 +0000 | [diff] [blame] | 1604 | if (r < 0) { |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 1605 | dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); |
Milan Broz | 73d410c | 2008-02-08 02:10:25 +0000 | [diff] [blame] | 1606 | |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1607 | unlock_fs(md); |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 1608 | goto out; /* pushback list is already flushed, so skip flush */ |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1609 | } |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1610 | |
| 1611 | dm_table_postsuspend_targets(map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1612 | |
| 1613 | set_bit(DMF_SUSPENDED, &md->flags); |
| 1614 | |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1615 | out: |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 1616 | if (r && md->suspended_bdev) { |
| 1617 | bdput(md->suspended_bdev); |
| 1618 | md->suspended_bdev = NULL; |
| 1619 | } |
| 1620 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1621 | dm_table_put(map); |
Alasdair G Kergon | d287483 | 2006-11-08 17:44:43 -0800 | [diff] [blame] | 1622 | |
| 1623 | out_unlock: |
Daniel Walker | e61290a | 2008-02-08 02:10:08 +0000 | [diff] [blame] | 1624 | mutex_unlock(&md->suspend_lock); |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1625 | return r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | } |
| 1627 | |
| 1628 | int dm_resume(struct mapped_device *md) |
| 1629 | { |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1630 | int r = -EINVAL; |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1631 | struct dm_table *map = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1632 | |
Daniel Walker | e61290a | 2008-02-08 02:10:08 +0000 | [diff] [blame] | 1633 | mutex_lock(&md->suspend_lock); |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1634 | if (!dm_suspended(md)) |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1635 | goto out; |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1636 | |
| 1637 | map = dm_get_table(md); |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1638 | if (!map || !dm_table_get_size(map)) |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1639 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1640 | |
Milan Broz | 8757b77 | 2006-10-03 01:15:36 -0700 | [diff] [blame] | 1641 | r = dm_table_resume_targets(map); |
| 1642 | if (r) |
| 1643 | goto out; |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1644 | |
Milan Broz | 304f3f6 | 2008-02-08 02:11:17 +0000 | [diff] [blame] | 1645 | dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1646 | |
| 1647 | unlock_fs(md); |
| 1648 | |
Jun'ichi Nomura | bfa152f | 2007-01-26 00:57:07 -0800 | [diff] [blame] | 1649 | if (md->suspended_bdev) { |
| 1650 | bdput(md->suspended_bdev); |
| 1651 | md->suspended_bdev = NULL; |
| 1652 | } |
Alasdair G Kergon | e39e2e9 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 1653 | |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1654 | clear_bit(DMF_SUSPENDED, &md->flags); |
| 1655 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1656 | dm_table_unplug_all(map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1657 | |
Alasdair G Kergon | 69267a3 | 2007-12-13 14:15:57 +0000 | [diff] [blame] | 1658 | dm_kobject_uevent(md); |
Hannes Reinecke | 8560ed6 | 2006-10-03 01:15:35 -0700 | [diff] [blame] | 1659 | |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1660 | r = 0; |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1661 | |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1662 | out: |
| 1663 | dm_table_put(map); |
Daniel Walker | e61290a | 2008-02-08 02:10:08 +0000 | [diff] [blame] | 1664 | mutex_unlock(&md->suspend_lock); |
Alasdair G Kergon | 2ca3310 | 2005-07-28 21:16:00 -0700 | [diff] [blame] | 1665 | |
Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 1666 | return r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | } |
| 1668 | |
| 1669 | /*----------------------------------------------------------------- |
| 1670 | * Event notification. |
| 1671 | *---------------------------------------------------------------*/ |
Alasdair G Kergon | 69267a3 | 2007-12-13 14:15:57 +0000 | [diff] [blame] | 1672 | void dm_kobject_uevent(struct mapped_device *md) |
| 1673 | { |
Tejun Heo | ed9e198 | 2008-08-25 19:56:05 +0900 | [diff] [blame] | 1674 | kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); |
Alasdair G Kergon | 69267a3 | 2007-12-13 14:15:57 +0000 | [diff] [blame] | 1675 | } |
| 1676 | |
Mike Anderson | 7a8c3d3 | 2007-10-19 22:48:01 +0100 | [diff] [blame] | 1677 | uint32_t dm_next_uevent_seq(struct mapped_device *md) |
| 1678 | { |
| 1679 | return atomic_add_return(1, &md->uevent_seq); |
| 1680 | } |
| 1681 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1682 | uint32_t dm_get_event_nr(struct mapped_device *md) |
| 1683 | { |
| 1684 | return atomic_read(&md->event_nr); |
| 1685 | } |
| 1686 | |
| 1687 | int dm_wait_event(struct mapped_device *md, int event_nr) |
| 1688 | { |
| 1689 | return wait_event_interruptible(md->eventq, |
| 1690 | (event_nr != atomic_read(&md->event_nr))); |
| 1691 | } |
| 1692 | |
Mike Anderson | 7a8c3d3 | 2007-10-19 22:48:01 +0100 | [diff] [blame] | 1693 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist) |
| 1694 | { |
| 1695 | unsigned long flags; |
| 1696 | |
| 1697 | spin_lock_irqsave(&md->uevent_lock, flags); |
| 1698 | list_add(elist, &md->uevent_list); |
| 1699 | spin_unlock_irqrestore(&md->uevent_lock, flags); |
| 1700 | } |
| 1701 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1702 | /* |
| 1703 | * The gendisk is only valid as long as you have a reference |
| 1704 | * count on 'md'. |
| 1705 | */ |
| 1706 | struct gendisk *dm_disk(struct mapped_device *md) |
| 1707 | { |
| 1708 | return md->disk; |
| 1709 | } |
| 1710 | |
| 1711 | int dm_suspended(struct mapped_device *md) |
| 1712 | { |
| 1713 | return test_bit(DMF_SUSPENDED, &md->flags); |
| 1714 | } |
| 1715 | |
Kiyoshi Ueda | 2e93ccc | 2006-12-08 02:41:09 -0800 | [diff] [blame] | 1716 | int dm_noflush_suspending(struct dm_target *ti) |
| 1717 | { |
| 1718 | struct mapped_device *md = dm_table_get_md(ti->table); |
| 1719 | int r = __noflush_suspending(md); |
| 1720 | |
| 1721 | dm_put(md); |
| 1722 | |
| 1723 | return r; |
| 1724 | } |
| 1725 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); |
| 1726 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1727 | static struct block_device_operations dm_blk_dops = { |
| 1728 | .open = dm_blk_open, |
| 1729 | .release = dm_blk_close, |
Milan Broz | aa129a2 | 2006-10-03 01:15:15 -0700 | [diff] [blame] | 1730 | .ioctl = dm_blk_ioctl, |
Darrick J. Wong | 3ac51e7 | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 1731 | .getgeo = dm_blk_getgeo, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 | .owner = THIS_MODULE |
| 1733 | }; |
| 1734 | |
| 1735 | EXPORT_SYMBOL(dm_get_mapinfo); |
| 1736 | |
| 1737 | /* |
| 1738 | * module hooks |
| 1739 | */ |
| 1740 | module_init(dm_init); |
| 1741 | module_exit(dm_exit); |
| 1742 | |
| 1743 | module_param(major, uint, 0); |
| 1744 | MODULE_PARM_DESC(major, "The major number of the device mapper"); |
| 1745 | MODULE_DESCRIPTION(DM_NAME " driver"); |
| 1746 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); |
| 1747 | MODULE_LICENSE("GPL"); |