Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 2 | /* |
| 3 | * Functions related to generic helpers functions |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/scatterlist.h> |
| 10 | |
| 11 | #include "blk.h" |
| 12 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 13 | struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 14 | { |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 15 | struct bio *new = bio_alloc(gfp, nr_pages); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 16 | |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 17 | if (bio) { |
| 18 | bio_chain(bio, new); |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 19 | submit_bio(bio); |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 20 | } |
| 21 | |
| 22 | return new; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 23 | } |
Chaitanya Kulkarni | c28a614 | 2021-06-09 18:32:48 -0700 | [diff] [blame] | 24 | EXPORT_SYMBOL_GPL(blk_next_bio); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 25 | |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 26 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 27 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
Mike Christie | 469e321 | 2016-06-05 14:31:49 -0500 | [diff] [blame] | 28 | struct bio **biop) |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 29 | { |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 30 | struct request_queue *q = bdev_get_queue(bdev); |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 31 | struct bio *bio = *biop; |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 32 | unsigned int op; |
Coly Li | 9b15d10 | 2020-07-17 10:42:30 +0800 | [diff] [blame] | 33 | sector_t bs_mask, part_offset = 0; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 34 | |
| 35 | if (!q) |
| 36 | return -ENXIO; |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 37 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 38 | if (bdev_read_only(bdev)) |
| 39 | return -EPERM; |
| 40 | |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 41 | if (flags & BLKDEV_DISCARD_SECURE) { |
| 42 | if (!blk_queue_secure_erase(q)) |
| 43 | return -EOPNOTSUPP; |
| 44 | op = REQ_OP_SECURE_ERASE; |
| 45 | } else { |
| 46 | if (!blk_queue_discard(q)) |
| 47 | return -EOPNOTSUPP; |
| 48 | op = REQ_OP_DISCARD; |
| 49 | } |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 50 | |
Coly Li | b35fd74 | 2020-08-06 01:25:03 +0800 | [diff] [blame] | 51 | /* In case the discard granularity isn't set by buggy device driver */ |
| 52 | if (WARN_ON_ONCE(!q->limits.discard_granularity)) { |
| 53 | char dev_name[BDEVNAME_SIZE]; |
| 54 | |
| 55 | bdevname(bdev, dev_name); |
| 56 | pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name); |
| 57 | return -EOPNOTSUPP; |
| 58 | } |
| 59 | |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 60 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 61 | if ((sector | nr_sects) & bs_mask) |
| 62 | return -EINVAL; |
| 63 | |
Ming Lei | ba5d738 | 2018-10-29 20:57:18 +0800 | [diff] [blame] | 64 | if (!nr_sects) |
| 65 | return -EINVAL; |
| 66 | |
Coly Li | 9b15d10 | 2020-07-17 10:42:30 +0800 | [diff] [blame] | 67 | /* In case the discard request is in a partition */ |
Christoph Hellwig | fa01b1e | 2020-09-03 07:40:57 +0200 | [diff] [blame] | 68 | if (bdev_is_partition(bdev)) |
Christoph Hellwig | 29ff57c | 2020-11-24 09:34:24 +0100 | [diff] [blame] | 69 | part_offset = bdev->bd_start_sect; |
Coly Li | 9b15d10 | 2020-07-17 10:42:30 +0800 | [diff] [blame] | 70 | |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 71 | while (nr_sects) { |
Coly Li | 9b15d10 | 2020-07-17 10:42:30 +0800 | [diff] [blame] | 72 | sector_t granularity_aligned_lba, req_sects; |
| 73 | sector_t sector_mapped = sector + part_offset; |
| 74 | |
| 75 | granularity_aligned_lba = round_up(sector_mapped, |
| 76 | q->limits.discard_granularity >> SECTOR_SHIFT); |
| 77 | |
| 78 | /* |
| 79 | * Check whether the discard bio starts at a discard_granularity |
| 80 | * aligned LBA, |
| 81 | * - If no: set (granularity_aligned_lba - sector_mapped) to |
| 82 | * bi_size of the first split bio, then the second bio will |
| 83 | * start at a discard_granularity aligned LBA on the device. |
| 84 | * - If yes: use bio_aligned_discard_max_sectors() as the max |
| 85 | * possible bi_size of the first split bio. Then when this bio |
| 86 | * is split in device drive, the split ones are very probably |
| 87 | * to be aligned to discard_granularity of the device's queue. |
| 88 | */ |
| 89 | if (granularity_aligned_lba == sector_mapped) |
| 90 | req_sects = min_t(sector_t, nr_sects, |
| 91 | bio_aligned_discard_max_sectors(q)); |
| 92 | else |
| 93 | req_sects = min_t(sector_t, nr_sects, |
| 94 | granularity_aligned_lba - sector_mapped); |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 95 | |
Dave Chinner | 4800bf7 | 2018-11-14 08:17:18 -0700 | [diff] [blame] | 96 | WARN_ON_ONCE((req_sects << 9) > UINT_MAX); |
| 97 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 98 | bio = blk_next_bio(bio, 0, gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 99 | bio->bi_iter.bi_sector = sector; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 100 | bio_set_dev(bio, bdev); |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 101 | bio_set_op_attrs(bio, op, 0); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 102 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 103 | bio->bi_iter.bi_size = req_sects << 9; |
Ming Lei | ba5d738 | 2018-10-29 20:57:18 +0800 | [diff] [blame] | 104 | sector += req_sects; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 105 | nr_sects -= req_sects; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 106 | |
Jens Axboe | c8123f8 | 2014-02-12 09:34:01 -0700 | [diff] [blame] | 107 | /* |
| 108 | * We can loop for a long time in here, if someone does |
| 109 | * full device discards (like mkfs). Be nice and allow |
| 110 | * us to schedule out to avoid softlocking if preempt |
| 111 | * is disabled. |
| 112 | */ |
| 113 | cond_resched(); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 114 | } |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 115 | |
| 116 | *biop = bio; |
| 117 | return 0; |
| 118 | } |
| 119 | EXPORT_SYMBOL(__blkdev_issue_discard); |
| 120 | |
| 121 | /** |
| 122 | * blkdev_issue_discard - queue a discard |
| 123 | * @bdev: blockdev to issue discard for |
| 124 | * @sector: start sector |
| 125 | * @nr_sects: number of sectors to discard |
| 126 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Eric Biggers | e554911 | 2017-01-23 11:41:39 -0800 | [diff] [blame] | 127 | * @flags: BLKDEV_DISCARD_* flags to control behaviour |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 128 | * |
| 129 | * Description: |
| 130 | * Issue a discard request for the sectors in question. |
| 131 | */ |
| 132 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
| 133 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) |
| 134 | { |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 135 | struct bio *bio = NULL; |
| 136 | struct blk_plug plug; |
| 137 | int ret; |
| 138 | |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 139 | blk_start_plug(&plug); |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 140 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 141 | &bio); |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 142 | if (!ret && bio) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 143 | ret = submit_bio_wait(bio); |
Christoph Hellwig | 48920ff | 2017-04-05 19:21:23 +0200 | [diff] [blame] | 144 | if (ret == -EOPNOTSUPP) |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 145 | ret = 0; |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 146 | bio_put(bio); |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 147 | } |
Shaohua Li | 0cfbcaf | 2012-12-14 11:15:51 +0800 | [diff] [blame] | 148 | blk_finish_plug(&plug); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 149 | |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 150 | return ret; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 151 | } |
| 152 | EXPORT_SYMBOL(blkdev_issue_discard); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 153 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 154 | /** |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 155 | * __blkdev_issue_write_same - generate number of bios with same page |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 156 | * @bdev: target blockdev |
| 157 | * @sector: start sector |
| 158 | * @nr_sects: number of sectors to write |
| 159 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 160 | * @page: page containing data to write |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 161 | * @biop: pointer to anchor bio |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 162 | * |
| 163 | * Description: |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 164 | * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 165 | */ |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 166 | static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
| 167 | sector_t nr_sects, gfp_t gfp_mask, struct page *page, |
| 168 | struct bio **biop) |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 169 | { |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 170 | struct request_queue *q = bdev_get_queue(bdev); |
| 171 | unsigned int max_write_same_sectors; |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 172 | struct bio *bio = *biop; |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 173 | sector_t bs_mask; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 174 | |
| 175 | if (!q) |
| 176 | return -ENXIO; |
| 177 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 178 | if (bdev_read_only(bdev)) |
| 179 | return -EPERM; |
| 180 | |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 181 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 182 | if ((sector | nr_sects) & bs_mask) |
| 183 | return -EINVAL; |
| 184 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 185 | if (!bdev_write_same(bdev)) |
| 186 | return -EOPNOTSUPP; |
| 187 | |
Ming Lin | b49a087 | 2015-05-22 00:46:56 -0700 | [diff] [blame] | 188 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
Ming Lei | 34ffec6 | 2018-10-29 20:57:19 +0800 | [diff] [blame] | 189 | max_write_same_sectors = bio_allowed_max_sectors(q); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 190 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 191 | while (nr_sects) { |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 192 | bio = blk_next_bio(bio, 1, gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 193 | bio->bi_iter.bi_sector = sector; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 194 | bio_set_dev(bio, bdev); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 195 | bio->bi_vcnt = 1; |
| 196 | bio->bi_io_vec->bv_page = page; |
| 197 | bio->bi_io_vec->bv_offset = 0; |
| 198 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 199 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 200 | |
| 201 | if (nr_sects > max_write_same_sectors) { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 202 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 203 | nr_sects -= max_write_same_sectors; |
| 204 | sector += max_write_same_sectors; |
| 205 | } else { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 206 | bio->bi_iter.bi_size = nr_sects << 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 207 | nr_sects = 0; |
| 208 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 209 | cond_resched(); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 210 | } |
| 211 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 212 | *biop = bio; |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | /** |
| 217 | * blkdev_issue_write_same - queue a write same operation |
| 218 | * @bdev: target blockdev |
| 219 | * @sector: start sector |
| 220 | * @nr_sects: number of sectors to write |
| 221 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 222 | * @page: page containing data |
| 223 | * |
| 224 | * Description: |
| 225 | * Issue a write same request for the sectors in question. |
| 226 | */ |
| 227 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
| 228 | sector_t nr_sects, gfp_t gfp_mask, |
| 229 | struct page *page) |
| 230 | { |
| 231 | struct bio *bio = NULL; |
| 232 | struct blk_plug plug; |
| 233 | int ret; |
| 234 | |
| 235 | blk_start_plug(&plug); |
| 236 | ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, |
| 237 | &bio); |
| 238 | if (ret == 0 && bio) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 239 | ret = submit_bio_wait(bio); |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 240 | bio_put(bio); |
| 241 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 242 | blk_finish_plug(&plug); |
Christoph Hellwig | 3f40bf2 | 2016-07-19 11:23:34 +0200 | [diff] [blame] | 243 | return ret; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 244 | } |
| 245 | EXPORT_SYMBOL(blkdev_issue_write_same); |
| 246 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 247 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
| 248 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
Christoph Hellwig | d928be9 | 2017-04-05 19:21:09 +0200 | [diff] [blame] | 249 | struct bio **biop, unsigned flags) |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 250 | { |
| 251 | struct bio *bio = *biop; |
| 252 | unsigned int max_write_zeroes_sectors; |
| 253 | struct request_queue *q = bdev_get_queue(bdev); |
| 254 | |
| 255 | if (!q) |
| 256 | return -ENXIO; |
| 257 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 258 | if (bdev_read_only(bdev)) |
| 259 | return -EPERM; |
| 260 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 261 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ |
| 262 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); |
| 263 | |
| 264 | if (max_write_zeroes_sectors == 0) |
| 265 | return -EOPNOTSUPP; |
| 266 | |
| 267 | while (nr_sects) { |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 268 | bio = blk_next_bio(bio, 0, gfp_mask); |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 269 | bio->bi_iter.bi_sector = sector; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 270 | bio_set_dev(bio, bdev); |
Christoph Hellwig | d928be9 | 2017-04-05 19:21:09 +0200 | [diff] [blame] | 271 | bio->bi_opf = REQ_OP_WRITE_ZEROES; |
| 272 | if (flags & BLKDEV_ZERO_NOUNMAP) |
| 273 | bio->bi_opf |= REQ_NOUNMAP; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 274 | |
| 275 | if (nr_sects > max_write_zeroes_sectors) { |
| 276 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; |
| 277 | nr_sects -= max_write_zeroes_sectors; |
| 278 | sector += max_write_zeroes_sectors; |
| 279 | } else { |
| 280 | bio->bi_iter.bi_size = nr_sects << 9; |
| 281 | nr_sects = 0; |
| 282 | } |
| 283 | cond_resched(); |
| 284 | } |
| 285 | |
| 286 | *biop = bio; |
| 287 | return 0; |
| 288 | } |
| 289 | |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 290 | /* |
| 291 | * Convert a number of 512B sectors to a number of pages. |
| 292 | * The result is limited to a number of pages that can fit into a BIO. |
| 293 | * Also make sure that the result is always at least 1 (page) for the cases |
| 294 | * where nr_sects is lower than the number of sectors in a page. |
| 295 | */ |
| 296 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) |
| 297 | { |
Mikulas Patocka | 09c2c35 | 2017-09-11 09:46:49 -0600 | [diff] [blame] | 298 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 299 | |
Christoph Hellwig | a8affc0 | 2021-03-11 12:01:37 +0100 | [diff] [blame] | 300 | return min(pages, (sector_t)BIO_MAX_VECS); |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 301 | } |
| 302 | |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 303 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
| 304 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
| 305 | struct bio **biop) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 306 | { |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 307 | struct request_queue *q = bdev_get_queue(bdev); |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 308 | struct bio *bio = *biop; |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 309 | int bi_size = 0; |
Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 310 | unsigned int sz; |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 311 | |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 312 | if (!q) |
| 313 | return -ENXIO; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 314 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 315 | if (bdev_read_only(bdev)) |
| 316 | return -EPERM; |
| 317 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 318 | while (nr_sects != 0) { |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 319 | bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), |
| 320 | gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 321 | bio->bi_iter.bi_sector = sector; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 322 | bio_set_dev(bio, bdev); |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 323 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 324 | |
Jens Axboe | 0341aaf | 2010-04-29 09:28:21 +0200 | [diff] [blame] | 325 | while (nr_sects != 0) { |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 326 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); |
| 327 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 328 | nr_sects -= bi_size >> 9; |
| 329 | sector += bi_size >> 9; |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 330 | if (bi_size < sz) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 331 | break; |
| 332 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 333 | cond_resched(); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 334 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 335 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 336 | *biop = bio; |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 337 | return 0; |
| 338 | } |
| 339 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 340 | /** |
| 341 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
| 342 | * @bdev: blockdev to issue |
| 343 | * @sector: start sector |
| 344 | * @nr_sects: number of sectors to write |
| 345 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 346 | * @biop: pointer to anchor bio |
| 347 | * @flags: controls detailed behavior |
| 348 | * |
| 349 | * Description: |
| 350 | * Zero-fill a block range, either using hardware offload or by explicitly |
| 351 | * writing zeroes to the device. |
| 352 | * |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 353 | * If a device is using logical block provisioning, the underlying space will |
| 354 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. |
| 355 | * |
| 356 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return |
| 357 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. |
| 358 | */ |
| 359 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
| 360 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, |
| 361 | unsigned flags) |
| 362 | { |
| 363 | int ret; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 364 | sector_t bs_mask; |
| 365 | |
| 366 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 367 | if ((sector | nr_sects) & bs_mask) |
| 368 | return -EINVAL; |
| 369 | |
| 370 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
| 371 | biop, flags); |
| 372 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 373 | return ret; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 374 | |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 375 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
| 376 | biop); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 377 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 378 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 379 | |
| 380 | /** |
| 381 | * blkdev_issue_zeroout - zero-fill a block range |
| 382 | * @bdev: blockdev to write |
| 383 | * @sector: start sector |
| 384 | * @nr_sects: number of sectors to write |
| 385 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 386 | * @flags: controls detailed behavior |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 387 | * |
| 388 | * Description: |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 389 | * Zero-fill a block range, either using hardware offload or by explicitly |
| 390 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the |
| 391 | * valid values for %flags. |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 392 | */ |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 393 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 394 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 395 | { |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 396 | int ret = 0; |
| 397 | sector_t bs_mask; |
| 398 | struct bio *bio; |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 399 | struct blk_plug plug; |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 400 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 401 | |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 402 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 403 | if ((sector | nr_sects) & bs_mask) |
| 404 | return -EINVAL; |
| 405 | |
| 406 | retry: |
| 407 | bio = NULL; |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 408 | blk_start_plug(&plug); |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 409 | if (try_write_zeroes) { |
| 410 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, |
| 411 | gfp_mask, &bio, flags); |
| 412 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
| 413 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, |
| 414 | gfp_mask, &bio); |
| 415 | } else { |
| 416 | /* No zeroing offload support */ |
| 417 | ret = -EOPNOTSUPP; |
| 418 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 419 | if (ret == 0 && bio) { |
| 420 | ret = submit_bio_wait(bio); |
| 421 | bio_put(bio); |
Christoph Hellwig | e950fdf | 2016-07-19 11:23:33 +0200 | [diff] [blame] | 422 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 423 | blk_finish_plug(&plug); |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 424 | if (ret && try_write_zeroes) { |
| 425 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
| 426 | try_write_zeroes = false; |
| 427 | goto retry; |
| 428 | } |
| 429 | if (!bdev_write_zeroes_sectors(bdev)) { |
| 430 | /* |
| 431 | * Zeroing offload support was indicated, but the |
| 432 | * device reported ILLEGAL REQUEST (for some devices |
| 433 | * there is no non-destructive way to verify whether |
| 434 | * WRITE ZEROES is actually supported). |
| 435 | */ |
| 436 | ret = -EOPNOTSUPP; |
| 437 | } |
| 438 | } |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 439 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 440 | return ret; |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 441 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 442 | EXPORT_SYMBOL(blkdev_issue_zeroout); |