blob: 5f2c429d437847447bc329a00c11a91f58a28edf [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04002/*
3 * Functions related to generic helpers functions
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include "blk.h"
12
Damien Le Moala2d6b3a2018-10-12 19:08:47 +090013struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040014{
Christoph Hellwig9082e872016-04-16 14:55:27 -040015 struct bio *new = bio_alloc(gfp, nr_pages);
Lukas Czerner5dba3082011-05-06 19:26:27 -060016
Christoph Hellwig9082e872016-04-16 14:55:27 -040017 if (bio) {
18 bio_chain(bio, new);
Mike Christie4e49ea42016-06-05 14:31:41 -050019 submit_bio(bio);
Christoph Hellwig9082e872016-04-16 14:55:27 -040020 }
21
22 return new;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040023}
24
Christoph Hellwig38f25252016-04-16 14:55:28 -040025int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +020026 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -050027 struct bio **biop)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040028{
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040029 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig38f25252016-04-16 14:55:28 -040030 struct bio *bio = *biop;
Christoph Hellwigef295ec2016-10-28 08:48:16 -060031 unsigned int op;
Darrick J. Wong28b2be22016-10-11 13:51:08 -070032 sector_t bs_mask;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040033
34 if (!q)
35 return -ENXIO;
Christoph Hellwig288dab82016-06-09 16:00:36 +020036
Ilya Dryomova13553c2018-01-11 14:09:12 +010037 if (bdev_read_only(bdev))
38 return -EPERM;
39
Christoph Hellwig288dab82016-06-09 16:00:36 +020040 if (flags & BLKDEV_DISCARD_SECURE) {
41 if (!blk_queue_secure_erase(q))
42 return -EOPNOTSUPP;
43 op = REQ_OP_SECURE_ERASE;
44 } else {
45 if (!blk_queue_discard(q))
46 return -EOPNOTSUPP;
47 op = REQ_OP_DISCARD;
48 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040049
Darrick J. Wong28b2be22016-10-11 13:51:08 -070050 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
51 if ((sector | nr_sects) & bs_mask)
52 return -EINVAL;
53
Ming Leiba5d7382018-10-29 20:57:18 +080054 if (!nr_sects)
55 return -EINVAL;
56
Lukas Czerner5dba3082011-05-06 19:26:27 -060057 while (nr_sects) {
Dave Chinner4800bf72018-11-14 08:17:18 -070058 sector_t req_sects = min_t(sector_t, nr_sects,
Ming Leiba5d7382018-10-29 20:57:18 +080059 bio_allowed_max_sectors(q));
Paolo Bonzinic6e66632012-08-02 09:48:50 +020060
Dave Chinner4800bf72018-11-14 08:17:18 -070061 WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
62
Damien Le Moala2d6b3a2018-10-12 19:08:47 +090063 bio = blk_next_bio(bio, 0, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -070064 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +020065 bio_set_dev(bio, bdev);
Christoph Hellwig288dab82016-06-09 16:00:36 +020066 bio_set_op_attrs(bio, op, 0);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040067
Kent Overstreet4f024f32013-10-11 15:44:27 -070068 bio->bi_iter.bi_size = req_sects << 9;
Ming Leiba5d7382018-10-29 20:57:18 +080069 sector += req_sects;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020070 nr_sects -= req_sects;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040071
Jens Axboec8123f82014-02-12 09:34:01 -070072 /*
73 * We can loop for a long time in here, if someone does
74 * full device discards (like mkfs). Be nice and allow
75 * us to schedule out to avoid softlocking if preempt
76 * is disabled.
77 */
78 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -060079 }
Christoph Hellwig38f25252016-04-16 14:55:28 -040080
81 *biop = bio;
82 return 0;
83}
84EXPORT_SYMBOL(__blkdev_issue_discard);
85
86/**
87 * blkdev_issue_discard - queue a discard
88 * @bdev: blockdev to issue discard for
89 * @sector: start sector
90 * @nr_sects: number of sectors to discard
91 * @gfp_mask: memory allocation flags (for bio_alloc)
Eric Biggerse5549112017-01-23 11:41:39 -080092 * @flags: BLKDEV_DISCARD_* flags to control behaviour
Christoph Hellwig38f25252016-04-16 14:55:28 -040093 *
94 * Description:
95 * Issue a discard request for the sectors in question.
96 */
97int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
98 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
99{
Christoph Hellwig38f25252016-04-16 14:55:28 -0400100 struct bio *bio = NULL;
101 struct blk_plug plug;
102 int ret;
103
Christoph Hellwig38f25252016-04-16 14:55:28 -0400104 blk_start_plug(&plug);
Christoph Hellwig288dab82016-06-09 16:00:36 +0200105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
Christoph Hellwig38f25252016-04-16 14:55:28 -0400106 &bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400107 if (!ret && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500108 ret = submit_bio_wait(bio);
Christoph Hellwig48920ff2017-04-05 19:21:23 +0200109 if (ret == -EOPNOTSUPP)
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400110 ret = 0;
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500111 bio_put(bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400112 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800113 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400114
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400115 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400116}
117EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400118
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400119/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800120 * __blkdev_issue_write_same - generate number of bios with same page
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400121 * @bdev: target blockdev
122 * @sector: start sector
123 * @nr_sects: number of sectors to write
124 * @gfp_mask: memory allocation flags (for bio_alloc)
125 * @page: page containing data to write
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800126 * @biop: pointer to anchor bio
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400127 *
128 * Description:
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800129 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400130 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800131static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
132 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
133 struct bio **biop)
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400134{
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400135 struct request_queue *q = bdev_get_queue(bdev);
136 unsigned int max_write_same_sectors;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800137 struct bio *bio = *biop;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700138 sector_t bs_mask;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400139
140 if (!q)
141 return -ENXIO;
142
Ilya Dryomova13553c2018-01-11 14:09:12 +0100143 if (bdev_read_only(bdev))
144 return -EPERM;
145
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700146 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
147 if ((sector | nr_sects) & bs_mask)
148 return -EINVAL;
149
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800150 if (!bdev_write_same(bdev))
151 return -EOPNOTSUPP;
152
Ming Linb49a0872015-05-22 00:46:56 -0700153 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
Ming Lei34ffec62018-10-29 20:57:19 +0800154 max_write_same_sectors = bio_allowed_max_sectors(q);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400155
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400156 while (nr_sects) {
Damien Le Moala2d6b3a2018-10-12 19:08:47 +0900157 bio = blk_next_bio(bio, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700158 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200159 bio_set_dev(bio, bdev);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400160 bio->bi_vcnt = 1;
161 bio->bi_io_vec->bv_page = page;
162 bio->bi_io_vec->bv_offset = 0;
163 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500164 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400165
166 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700167 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400168 nr_sects -= max_write_same_sectors;
169 sector += max_write_same_sectors;
170 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700171 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400172 nr_sects = 0;
173 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800174 cond_resched();
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400175 }
176
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800177 *biop = bio;
178 return 0;
179}
180
181/**
182 * blkdev_issue_write_same - queue a write same operation
183 * @bdev: target blockdev
184 * @sector: start sector
185 * @nr_sects: number of sectors to write
186 * @gfp_mask: memory allocation flags (for bio_alloc)
187 * @page: page containing data
188 *
189 * Description:
190 * Issue a write same request for the sectors in question.
191 */
192int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
193 sector_t nr_sects, gfp_t gfp_mask,
194 struct page *page)
195{
196 struct bio *bio = NULL;
197 struct blk_plug plug;
198 int ret;
199
200 blk_start_plug(&plug);
201 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
202 &bio);
203 if (ret == 0 && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500204 ret = submit_bio_wait(bio);
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500205 bio_put(bio);
206 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800207 blk_finish_plug(&plug);
Christoph Hellwig3f40bf22016-07-19 11:23:34 +0200208 return ret;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400209}
210EXPORT_SYMBOL(blkdev_issue_write_same);
211
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800212static int __blkdev_issue_write_zeroes(struct block_device *bdev,
213 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200214 struct bio **biop, unsigned flags)
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800215{
216 struct bio *bio = *biop;
217 unsigned int max_write_zeroes_sectors;
218 struct request_queue *q = bdev_get_queue(bdev);
219
220 if (!q)
221 return -ENXIO;
222
Ilya Dryomova13553c2018-01-11 14:09:12 +0100223 if (bdev_read_only(bdev))
224 return -EPERM;
225
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800226 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
227 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
228
229 if (max_write_zeroes_sectors == 0)
230 return -EOPNOTSUPP;
231
232 while (nr_sects) {
Damien Le Moala2d6b3a2018-10-12 19:08:47 +0900233 bio = blk_next_bio(bio, 0, gfp_mask);
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800234 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200235 bio_set_dev(bio, bdev);
Christoph Hellwigd928be92017-04-05 19:21:09 +0200236 bio->bi_opf = REQ_OP_WRITE_ZEROES;
237 if (flags & BLKDEV_ZERO_NOUNMAP)
238 bio->bi_opf |= REQ_NOUNMAP;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800239
240 if (nr_sects > max_write_zeroes_sectors) {
241 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
242 nr_sects -= max_write_zeroes_sectors;
243 sector += max_write_zeroes_sectors;
244 } else {
245 bio->bi_iter.bi_size = nr_sects << 9;
246 nr_sects = 0;
247 }
248 cond_resched();
249 }
250
251 *biop = bio;
252 return 0;
253}
254
Damien Le Moal615d22a2017-07-06 20:21:15 +0900255/*
256 * Convert a number of 512B sectors to a number of pages.
257 * The result is limited to a number of pages that can fit into a BIO.
258 * Also make sure that the result is always at least 1 (page) for the cases
259 * where nr_sects is lower than the number of sectors in a page.
260 */
261static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
262{
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600263 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900264
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600265 return min(pages, (sector_t)BIO_MAX_PAGES);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900266}
267
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200268static int __blkdev_issue_zero_pages(struct block_device *bdev,
269 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
270 struct bio **biop)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400271{
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200272 struct request_queue *q = bdev_get_queue(bdev);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800273 struct bio *bio = *biop;
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200274 int bi_size = 0;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100275 unsigned int sz;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700276
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200277 if (!q)
278 return -ENXIO;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400279
Ilya Dryomova13553c2018-01-11 14:09:12 +0100280 if (bdev_read_only(bdev))
281 return -EPERM;
282
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400283 while (nr_sects != 0) {
Damien Le Moala2d6b3a2018-10-12 19:08:47 +0900284 bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
285 gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700286 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200287 bio_set_dev(bio, bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500288 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400289
Jens Axboe0341aaf2010-04-29 09:28:21 +0200290 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900291 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
292 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800293 nr_sects -= bi_size >> 9;
294 sector += bi_size >> 9;
Damien Le Moal615d22a2017-07-06 20:21:15 +0900295 if (bi_size < sz)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400296 break;
297 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800298 cond_resched();
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400299 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400300
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800301 *biop = bio;
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200302 return 0;
303}
304
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400305/**
306 * __blkdev_issue_zeroout - generate number of zero filed write bios
307 * @bdev: blockdev to issue
308 * @sector: start sector
309 * @nr_sects: number of sectors to write
310 * @gfp_mask: memory allocation flags (for bio_alloc)
311 * @biop: pointer to anchor bio
312 * @flags: controls detailed behavior
313 *
314 * Description:
315 * Zero-fill a block range, either using hardware offload or by explicitly
316 * writing zeroes to the device.
317 *
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400318 * If a device is using logical block provisioning, the underlying space will
319 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
320 *
321 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
322 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
323 */
324int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
325 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
326 unsigned flags)
327{
328 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400329 sector_t bs_mask;
330
331 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
332 if ((sector | nr_sects) & bs_mask)
333 return -EINVAL;
334
335 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
336 biop, flags);
337 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200338 return ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400339
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200340 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
341 biop);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400342}
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800343EXPORT_SYMBOL(__blkdev_issue_zeroout);
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400344
345/**
346 * blkdev_issue_zeroout - zero-fill a block range
347 * @bdev: blockdev to write
348 * @sector: start sector
349 * @nr_sects: number of sectors to write
350 * @gfp_mask: memory allocation flags (for bio_alloc)
Christoph Hellwigee472d82017-04-05 19:21:08 +0200351 * @flags: controls detailed behavior
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400352 *
353 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200354 * Zero-fill a block range, either using hardware offload or by explicitly
355 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
356 * valid values for %flags.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400357 */
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400358int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200359 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400360{
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200361 int ret = 0;
362 sector_t bs_mask;
363 struct bio *bio;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800364 struct blk_plug plug;
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200365 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800366
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200367 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
368 if ((sector | nr_sects) & bs_mask)
369 return -EINVAL;
370
371retry:
372 bio = NULL;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800373 blk_start_plug(&plug);
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200374 if (try_write_zeroes) {
375 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
376 gfp_mask, &bio, flags);
377 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
378 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
379 gfp_mask, &bio);
380 } else {
381 /* No zeroing offload support */
382 ret = -EOPNOTSUPP;
383 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800384 if (ret == 0 && bio) {
385 ret = submit_bio_wait(bio);
386 bio_put(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200387 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800388 blk_finish_plug(&plug);
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200389 if (ret && try_write_zeroes) {
390 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
391 try_write_zeroes = false;
392 goto retry;
393 }
394 if (!bdev_write_zeroes_sectors(bdev)) {
395 /*
396 * Zeroing offload support was indicated, but the
397 * device reported ILLEGAL REQUEST (for some devices
398 * there is no non-destructive way to verify whether
399 * WRITE ZEROES is actually supported).
400 */
401 ret = -EOPNOTSUPP;
402 }
403 }
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500404
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800405 return ret;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400406}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400407EXPORT_SYMBOL(blkdev_issue_zeroout);