Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to generic helpers functions |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/scatterlist.h> |
| 9 | |
| 10 | #include "blk.h" |
| 11 | |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 12 | struct bio_batch { |
| 13 | atomic_t done; |
| 14 | unsigned long flags; |
| 15 | struct completion *wait; |
| 16 | }; |
| 17 | |
| 18 | static void bio_batch_end_io(struct bio *bio, int err) |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 19 | { |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 20 | struct bio_batch *bb = bio->bi_private; |
| 21 | |
Lukas Czerner | 8af1954 | 2011-05-06 19:30:01 -0600 | [diff] [blame] | 22 | if (err && (err != -EOPNOTSUPP)) |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 23 | clear_bit(BIO_UPTODATE, &bb->flags); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 24 | if (atomic_dec_and_test(&bb->done)) |
| 25 | complete(bb->wait); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 26 | bio_put(bio); |
| 27 | } |
| 28 | |
| 29 | /** |
| 30 | * blkdev_issue_discard - queue a discard |
| 31 | * @bdev: blockdev to issue discard for |
| 32 | * @sector: start sector |
| 33 | * @nr_sects: number of sectors to discard |
| 34 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 35 | * @flags: BLKDEV_IFL_* flags to control behaviour |
| 36 | * |
| 37 | * Description: |
| 38 | * Issue a discard request for the sectors in question. |
| 39 | */ |
| 40 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
| 41 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) |
| 42 | { |
| 43 | DECLARE_COMPLETION_ONSTACK(wait); |
| 44 | struct request_queue *q = bdev_get_queue(bdev); |
Christoph Hellwig | 8c55536 | 2010-08-18 05:29:22 -0400 | [diff] [blame] | 45 | int type = REQ_WRITE | REQ_DISCARD; |
Geert Uytterhoeven | 97597dc | 2013-11-04 14:00:06 +0100 | [diff] [blame] | 46 | unsigned int max_discard_sectors, granularity; |
| 47 | int alignment; |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 48 | struct bio_batch bb; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 49 | struct bio *bio; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 50 | int ret = 0; |
Shaohua Li | 0cfbcaf | 2012-12-14 11:15:51 +0800 | [diff] [blame] | 51 | struct blk_plug plug; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 52 | |
| 53 | if (!q) |
| 54 | return -ENXIO; |
| 55 | |
| 56 | if (!blk_queue_discard(q)) |
| 57 | return -EOPNOTSUPP; |
| 58 | |
Paolo Bonzini | f6ff53d | 2012-08-02 09:48:49 +0200 | [diff] [blame] | 59 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
| 60 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
Geert Uytterhoeven | 97597dc | 2013-11-04 14:00:06 +0100 | [diff] [blame] | 61 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; |
Paolo Bonzini | f6ff53d | 2012-08-02 09:48:49 +0200 | [diff] [blame] | 62 | |
Jens Axboe | 10d1f9e | 2010-07-15 10:49:31 -0600 | [diff] [blame] | 63 | /* |
| 64 | * Ensure that max_discard_sectors is of the proper |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 65 | * granularity, so that requests stay aligned after a split. |
Jens Axboe | 10d1f9e | 2010-07-15 10:49:31 -0600 | [diff] [blame] | 66 | */ |
| 67 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
Geert Uytterhoeven | 97597dc | 2013-11-04 14:00:06 +0100 | [diff] [blame] | 68 | max_discard_sectors -= max_discard_sectors % granularity; |
Jens Axboe | 4c64500 | 2011-07-23 20:34:59 +0200 | [diff] [blame] | 69 | if (unlikely(!max_discard_sectors)) { |
Mike Snitzer | 0f79960 | 2011-07-06 21:30:50 +0200 | [diff] [blame] | 70 | /* Avoid infinite loop below. Being cautious never hurts. */ |
| 71 | return -EOPNOTSUPP; |
Jens Axboe | 10d1f9e | 2010-07-15 10:49:31 -0600 | [diff] [blame] | 72 | } |
| 73 | |
Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 74 | if (flags & BLKDEV_DISCARD_SECURE) { |
Adrian Hunter | 8d57a98 | 2010-08-11 14:17:49 -0700 | [diff] [blame] | 75 | if (!blk_queue_secdiscard(q)) |
| 76 | return -EOPNOTSUPP; |
Christoph Hellwig | 8c55536 | 2010-08-18 05:29:22 -0400 | [diff] [blame] | 77 | type |= REQ_SECURE; |
Adrian Hunter | 8d57a98 | 2010-08-11 14:17:49 -0700 | [diff] [blame] | 78 | } |
| 79 | |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 80 | atomic_set(&bb.done, 1); |
| 81 | bb.flags = 1 << BIO_UPTODATE; |
| 82 | bb.wait = &wait; |
| 83 | |
Shaohua Li | 0cfbcaf | 2012-12-14 11:15:51 +0800 | [diff] [blame] | 84 | blk_start_plug(&plug); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 85 | while (nr_sects) { |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 86 | unsigned int req_sects; |
Shaohua Li | 8dd2cb7 | 2012-12-14 11:15:36 +0800 | [diff] [blame] | 87 | sector_t end_sect, tmp; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 88 | |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 89 | bio = bio_alloc(gfp_mask, 1); |
Christoph Hellwig | 66ac028 | 2010-06-18 16:59:42 +0200 | [diff] [blame] | 90 | if (!bio) { |
| 91 | ret = -ENOMEM; |
| 92 | break; |
| 93 | } |
| 94 | |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 95 | req_sects = min_t(sector_t, nr_sects, max_discard_sectors); |
| 96 | |
| 97 | /* |
| 98 | * If splitting a request, and the next starting sector would be |
| 99 | * misaligned, stop the discard at the previous aligned sector. |
| 100 | */ |
| 101 | end_sect = sector + req_sects; |
Shaohua Li | 8dd2cb7 | 2012-12-14 11:15:36 +0800 | [diff] [blame] | 102 | tmp = end_sect; |
| 103 | if (req_sects < nr_sects && |
| 104 | sector_div(tmp, granularity) != alignment) { |
| 105 | end_sect = end_sect - alignment; |
| 106 | sector_div(end_sect, granularity); |
| 107 | end_sect = end_sect * granularity + alignment; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 108 | req_sects = end_sect - sector; |
| 109 | } |
| 110 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 111 | bio->bi_iter.bi_sector = sector; |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 112 | bio->bi_end_io = bio_batch_end_io; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 113 | bio->bi_bdev = bdev; |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 114 | bio->bi_private = &bb; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 115 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 116 | bio->bi_iter.bi_size = req_sects << 9; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 117 | nr_sects -= req_sects; |
| 118 | sector = end_sect; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 119 | |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 120 | atomic_inc(&bb.done); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 121 | submit_bio(type, bio); |
Jens Axboe | c8123f8 | 2014-02-12 09:34:01 -0700 | [diff] [blame] | 122 | |
| 123 | /* |
| 124 | * We can loop for a long time in here, if someone does |
| 125 | * full device discards (like mkfs). Be nice and allow |
| 126 | * us to schedule out to avoid softlocking if preempt |
| 127 | * is disabled. |
| 128 | */ |
| 129 | cond_resched(); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 130 | } |
Shaohua Li | 0cfbcaf | 2012-12-14 11:15:51 +0800 | [diff] [blame] | 131 | blk_finish_plug(&plug); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 132 | |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 133 | /* Wait for bios in-flight */ |
| 134 | if (!atomic_dec_and_test(&bb.done)) |
Vladimir Davydov | 5577022 | 2013-02-14 18:19:59 +0400 | [diff] [blame] | 135 | wait_for_completion_io(&wait); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 136 | |
Lukas Czerner | 8af1954 | 2011-05-06 19:30:01 -0600 | [diff] [blame] | 137 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 138 | ret = -EIO; |
Christoph Hellwig | 66ac028 | 2010-06-18 16:59:42 +0200 | [diff] [blame] | 139 | |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 140 | return ret; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 141 | } |
| 142 | EXPORT_SYMBOL(blkdev_issue_discard); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 143 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 144 | /** |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 145 | * blkdev_issue_write_same - queue a write same operation |
| 146 | * @bdev: target blockdev |
| 147 | * @sector: start sector |
| 148 | * @nr_sects: number of sectors to write |
| 149 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 150 | * @page: page containing data to write |
| 151 | * |
| 152 | * Description: |
| 153 | * Issue a write same request for the sectors in question. |
| 154 | */ |
| 155 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
| 156 | sector_t nr_sects, gfp_t gfp_mask, |
| 157 | struct page *page) |
| 158 | { |
| 159 | DECLARE_COMPLETION_ONSTACK(wait); |
| 160 | struct request_queue *q = bdev_get_queue(bdev); |
| 161 | unsigned int max_write_same_sectors; |
| 162 | struct bio_batch bb; |
| 163 | struct bio *bio; |
| 164 | int ret = 0; |
| 165 | |
| 166 | if (!q) |
| 167 | return -ENXIO; |
| 168 | |
| 169 | max_write_same_sectors = q->limits.max_write_same_sectors; |
| 170 | |
| 171 | if (max_write_same_sectors == 0) |
| 172 | return -EOPNOTSUPP; |
| 173 | |
| 174 | atomic_set(&bb.done, 1); |
| 175 | bb.flags = 1 << BIO_UPTODATE; |
| 176 | bb.wait = &wait; |
| 177 | |
| 178 | while (nr_sects) { |
| 179 | bio = bio_alloc(gfp_mask, 1); |
| 180 | if (!bio) { |
| 181 | ret = -ENOMEM; |
| 182 | break; |
| 183 | } |
| 184 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 185 | bio->bi_iter.bi_sector = sector; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 186 | bio->bi_end_io = bio_batch_end_io; |
| 187 | bio->bi_bdev = bdev; |
| 188 | bio->bi_private = &bb; |
| 189 | bio->bi_vcnt = 1; |
| 190 | bio->bi_io_vec->bv_page = page; |
| 191 | bio->bi_io_vec->bv_offset = 0; |
| 192 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); |
| 193 | |
| 194 | if (nr_sects > max_write_same_sectors) { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 195 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 196 | nr_sects -= max_write_same_sectors; |
| 197 | sector += max_write_same_sectors; |
| 198 | } else { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 199 | bio->bi_iter.bi_size = nr_sects << 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 200 | nr_sects = 0; |
| 201 | } |
| 202 | |
| 203 | atomic_inc(&bb.done); |
| 204 | submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); |
| 205 | } |
| 206 | |
| 207 | /* Wait for bios in-flight */ |
| 208 | if (!atomic_dec_and_test(&bb.done)) |
Vladimir Davydov | 5577022 | 2013-02-14 18:19:59 +0400 | [diff] [blame] | 209 | wait_for_completion_io(&wait); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 210 | |
| 211 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
| 212 | ret = -ENOTSUPP; |
| 213 | |
| 214 | return ret; |
| 215 | } |
| 216 | EXPORT_SYMBOL(blkdev_issue_write_same); |
| 217 | |
| 218 | /** |
Ben Hutchings | 291d24f | 2011-03-01 13:45:24 -0500 | [diff] [blame] | 219 | * blkdev_issue_zeroout - generate number of zero filed write bios |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 220 | * @bdev: blockdev to issue |
| 221 | * @sector: start sector |
| 222 | * @nr_sects: number of sectors to write |
| 223 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 224 | * |
| 225 | * Description: |
| 226 | * Generate and issue number of bios with zerofiled pages. |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 227 | */ |
| 228 | |
Fabian Frederick | 3508678 | 2014-05-26 22:19:14 +0200 | [diff] [blame] | 229 | static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
| 230 | sector_t nr_sects, gfp_t gfp_mask) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 231 | { |
Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 232 | int ret; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 233 | struct bio *bio; |
| 234 | struct bio_batch bb; |
Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 235 | unsigned int sz; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 236 | DECLARE_COMPLETION_ONSTACK(wait); |
| 237 | |
Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 238 | atomic_set(&bb.done, 1); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 239 | bb.flags = 1 << BIO_UPTODATE; |
| 240 | bb.wait = &wait; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 241 | |
Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 242 | ret = 0; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 243 | while (nr_sects != 0) { |
| 244 | bio = bio_alloc(gfp_mask, |
| 245 | min(nr_sects, (sector_t)BIO_MAX_PAGES)); |
Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 246 | if (!bio) { |
| 247 | ret = -ENOMEM; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 248 | break; |
Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 249 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 250 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 251 | bio->bi_iter.bi_sector = sector; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 252 | bio->bi_bdev = bdev; |
| 253 | bio->bi_end_io = bio_batch_end_io; |
Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 254 | bio->bi_private = &bb; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 255 | |
Jens Axboe | 0341aaf | 2010-04-29 09:28:21 +0200 | [diff] [blame] | 256 | while (nr_sects != 0) { |
| 257 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 258 | ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); |
| 259 | nr_sects -= ret >> 9; |
| 260 | sector += ret >> 9; |
| 261 | if (ret < (sz << 9)) |
| 262 | break; |
| 263 | } |
Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 264 | ret = 0; |
Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 265 | atomic_inc(&bb.done); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 266 | submit_bio(WRITE, bio); |
| 267 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 268 | |
Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 269 | /* Wait for bios in-flight */ |
Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 270 | if (!atomic_dec_and_test(&bb.done)) |
Vladimir Davydov | 5577022 | 2013-02-14 18:19:59 +0400 | [diff] [blame] | 271 | wait_for_completion_io(&wait); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 272 | |
| 273 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
| 274 | /* One of bios in the batch was completed with error.*/ |
| 275 | ret = -EIO; |
| 276 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 277 | return ret; |
| 278 | } |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 279 | |
| 280 | /** |
| 281 | * blkdev_issue_zeroout - zero-fill a block range |
| 282 | * @bdev: blockdev to write |
| 283 | * @sector: start sector |
| 284 | * @nr_sects: number of sectors to write |
| 285 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 286 | * @discard: whether to discard the block range |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 287 | * |
| 288 | * Description: |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 289 | * Zero-fill a block range. If the discard flag is set and the block |
| 290 | * device guarantees that subsequent READ operations to the block range |
| 291 | * in question will return zeroes, the blocks will be discarded. Should |
| 292 | * the discard request fail, if the discard flag is not set, or if |
| 293 | * discard_zeroes_data is not supported, this function will resort to |
| 294 | * zeroing the blocks manually, thus provisioning (allocating, |
| 295 | * anchoring) them. If the block device supports the WRITE SAME command |
| 296 | * blkdev_issue_zeroout() will use it to optimize the process of |
| 297 | * clearing the block range. Otherwise the zeroing will be performed |
| 298 | * using regular WRITE calls. |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 299 | */ |
| 300 | |
| 301 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 302 | sector_t nr_sects, gfp_t gfp_mask, bool discard) |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 303 | { |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 304 | struct request_queue *q = bdev_get_queue(bdev); |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 305 | |
Martin K. Petersen | 9f9ee1f | 2015-02-05 10:14:54 -0700 | [diff] [blame^] | 306 | if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data && |
| 307 | blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) |
| 308 | return 0; |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 309 | |
Martin K. Petersen | 9f9ee1f | 2015-02-05 10:14:54 -0700 | [diff] [blame^] | 310 | if (bdev_write_same(bdev) && |
| 311 | blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, |
| 312 | ZERO_PAGE(0)) == 0) |
| 313 | return 0; |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 314 | |
| 315 | return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); |
| 316 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 317 | EXPORT_SYMBOL(blkdev_issue_zeroout); |