blob: 63fb971d65745ac0621c69b6bc22ad5b0b76dd84 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04002/*
3 * Functions related to generic helpers functions
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include "blk.h"
12
Mike Christie4e49ea42016-06-05 14:31:41 -050013static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
Christoph Hellwig9082e872016-04-16 14:55:27 -040014 gfp_t gfp)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040015{
Christoph Hellwig9082e872016-04-16 14:55:27 -040016 struct bio *new = bio_alloc(gfp, nr_pages);
Lukas Czerner5dba3082011-05-06 19:26:27 -060017
Christoph Hellwig9082e872016-04-16 14:55:27 -040018 if (bio) {
19 bio_chain(bio, new);
Mike Christie4e49ea42016-06-05 14:31:41 -050020 submit_bio(bio);
Christoph Hellwig9082e872016-04-16 14:55:27 -040021 }
22
23 return new;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040024}
25
Christoph Hellwig38f25252016-04-16 14:55:28 -040026int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +020027 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -050028 struct bio **biop)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040029{
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040030 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig38f25252016-04-16 14:55:28 -040031 struct bio *bio = *biop;
Ming Lina22c4d72015-10-22 09:59:42 -070032 unsigned int granularity;
Christoph Hellwigef295ec2016-10-28 08:48:16 -060033 unsigned int op;
Ming Lina22c4d72015-10-22 09:59:42 -070034 int alignment;
Darrick J. Wong28b2be22016-10-11 13:51:08 -070035 sector_t bs_mask;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040036
37 if (!q)
38 return -ENXIO;
Christoph Hellwig288dab82016-06-09 16:00:36 +020039
40 if (flags & BLKDEV_DISCARD_SECURE) {
41 if (!blk_queue_secure_erase(q))
42 return -EOPNOTSUPP;
43 op = REQ_OP_SECURE_ERASE;
44 } else {
45 if (!blk_queue_discard(q))
46 return -EOPNOTSUPP;
47 op = REQ_OP_DISCARD;
48 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040049
Darrick J. Wong28b2be22016-10-11 13:51:08 -070050 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
51 if ((sector | nr_sects) & bs_mask)
52 return -EINVAL;
53
Ming Lina22c4d72015-10-22 09:59:42 -070054 /* Zero-sector (unknown) and one-sector granularities are the same. */
55 granularity = max(q->limits.discard_granularity >> 9, 1U);
56 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
57
Lukas Czerner5dba3082011-05-06 19:26:27 -060058 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020059 unsigned int req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070060 sector_t end_sect, tmp;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020061
Ming Lina22c4d72015-10-22 09:59:42 -070062 /* Make sure bi_size doesn't overflow */
63 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
64
Christoph Hellwig9082e872016-04-16 14:55:27 -040065 /**
Ming Lina22c4d72015-10-22 09:59:42 -070066 * If splitting a request, and the next starting sector would be
67 * misaligned, stop the discard at the previous aligned sector.
68 */
Paolo Bonzinic6e66632012-08-02 09:48:50 +020069 end_sect = sector + req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070070 tmp = end_sect;
71 if (req_sects < nr_sects &&
72 sector_div(tmp, granularity) != alignment) {
73 end_sect = end_sect - alignment;
74 sector_div(end_sect, granularity);
75 end_sect = end_sect * granularity + alignment;
76 req_sects = end_sect - sector;
77 }
Paolo Bonzinic6e66632012-08-02 09:48:50 +020078
Christoph Hellwigf9d03f92016-12-08 15:20:32 -070079 bio = next_bio(bio, 0, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -070080 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +020081 bio_set_dev(bio, bdev);
Christoph Hellwig288dab82016-06-09 16:00:36 +020082 bio_set_op_attrs(bio, op, 0);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040083
Kent Overstreet4f024f32013-10-11 15:44:27 -070084 bio->bi_iter.bi_size = req_sects << 9;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020085 nr_sects -= req_sects;
86 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040087
Jens Axboec8123f82014-02-12 09:34:01 -070088 /*
89 * We can loop for a long time in here, if someone does
90 * full device discards (like mkfs). Be nice and allow
91 * us to schedule out to avoid softlocking if preempt
92 * is disabled.
93 */
94 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -060095 }
Christoph Hellwig38f25252016-04-16 14:55:28 -040096
97 *biop = bio;
98 return 0;
99}
100EXPORT_SYMBOL(__blkdev_issue_discard);
101
102/**
103 * blkdev_issue_discard - queue a discard
104 * @bdev: blockdev to issue discard for
105 * @sector: start sector
106 * @nr_sects: number of sectors to discard
107 * @gfp_mask: memory allocation flags (for bio_alloc)
Eric Biggerse5549112017-01-23 11:41:39 -0800108 * @flags: BLKDEV_DISCARD_* flags to control behaviour
Christoph Hellwig38f25252016-04-16 14:55:28 -0400109 *
110 * Description:
111 * Issue a discard request for the sectors in question.
112 */
113int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
114 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
115{
Christoph Hellwig38f25252016-04-16 14:55:28 -0400116 struct bio *bio = NULL;
117 struct blk_plug plug;
118 int ret;
119
Christoph Hellwig38f25252016-04-16 14:55:28 -0400120 blk_start_plug(&plug);
Christoph Hellwig288dab82016-06-09 16:00:36 +0200121 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
Christoph Hellwig38f25252016-04-16 14:55:28 -0400122 &bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400123 if (!ret && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500124 ret = submit_bio_wait(bio);
Christoph Hellwig48920ff2017-04-05 19:21:23 +0200125 if (ret == -EOPNOTSUPP)
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400126 ret = 0;
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500127 bio_put(bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400128 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800129 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400130
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400131 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400132}
133EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400134
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400135/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800136 * __blkdev_issue_write_same - generate number of bios with same page
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400137 * @bdev: target blockdev
138 * @sector: start sector
139 * @nr_sects: number of sectors to write
140 * @gfp_mask: memory allocation flags (for bio_alloc)
141 * @page: page containing data to write
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800142 * @biop: pointer to anchor bio
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400143 *
144 * Description:
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800145 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400146 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800147static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
148 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
149 struct bio **biop)
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400150{
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400151 struct request_queue *q = bdev_get_queue(bdev);
152 unsigned int max_write_same_sectors;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800153 struct bio *bio = *biop;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700154 sector_t bs_mask;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400155
156 if (!q)
157 return -ENXIO;
158
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700159 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
160 if ((sector | nr_sects) & bs_mask)
161 return -EINVAL;
162
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800163 if (!bdev_write_same(bdev))
164 return -EOPNOTSUPP;
165
Ming Linb49a0872015-05-22 00:46:56 -0700166 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
167 max_write_same_sectors = UINT_MAX >> 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400168
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400169 while (nr_sects) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500170 bio = next_bio(bio, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700171 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200172 bio_set_dev(bio, bdev);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400173 bio->bi_vcnt = 1;
174 bio->bi_io_vec->bv_page = page;
175 bio->bi_io_vec->bv_offset = 0;
176 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500177 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400178
179 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700180 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400181 nr_sects -= max_write_same_sectors;
182 sector += max_write_same_sectors;
183 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700184 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400185 nr_sects = 0;
186 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800187 cond_resched();
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400188 }
189
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800190 *biop = bio;
191 return 0;
192}
193
194/**
195 * blkdev_issue_write_same - queue a write same operation
196 * @bdev: target blockdev
197 * @sector: start sector
198 * @nr_sects: number of sectors to write
199 * @gfp_mask: memory allocation flags (for bio_alloc)
200 * @page: page containing data
201 *
202 * Description:
203 * Issue a write same request for the sectors in question.
204 */
205int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
206 sector_t nr_sects, gfp_t gfp_mask,
207 struct page *page)
208{
209 struct bio *bio = NULL;
210 struct blk_plug plug;
211 int ret;
212
213 blk_start_plug(&plug);
214 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
215 &bio);
216 if (ret == 0 && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500217 ret = submit_bio_wait(bio);
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500218 bio_put(bio);
219 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800220 blk_finish_plug(&plug);
Christoph Hellwig3f40bf22016-07-19 11:23:34 +0200221 return ret;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400222}
223EXPORT_SYMBOL(blkdev_issue_write_same);
224
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800225static int __blkdev_issue_write_zeroes(struct block_device *bdev,
226 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200227 struct bio **biop, unsigned flags)
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800228{
229 struct bio *bio = *biop;
230 unsigned int max_write_zeroes_sectors;
231 struct request_queue *q = bdev_get_queue(bdev);
232
233 if (!q)
234 return -ENXIO;
235
236 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
237 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
238
239 if (max_write_zeroes_sectors == 0)
240 return -EOPNOTSUPP;
241
242 while (nr_sects) {
243 bio = next_bio(bio, 0, gfp_mask);
244 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200245 bio_set_dev(bio, bdev);
Christoph Hellwigd928be92017-04-05 19:21:09 +0200246 bio->bi_opf = REQ_OP_WRITE_ZEROES;
247 if (flags & BLKDEV_ZERO_NOUNMAP)
248 bio->bi_opf |= REQ_NOUNMAP;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800249
250 if (nr_sects > max_write_zeroes_sectors) {
251 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
252 nr_sects -= max_write_zeroes_sectors;
253 sector += max_write_zeroes_sectors;
254 } else {
255 bio->bi_iter.bi_size = nr_sects << 9;
256 nr_sects = 0;
257 }
258 cond_resched();
259 }
260
261 *biop = bio;
262 return 0;
263}
264
Damien Le Moal615d22a2017-07-06 20:21:15 +0900265/*
266 * Convert a number of 512B sectors to a number of pages.
267 * The result is limited to a number of pages that can fit into a BIO.
268 * Also make sure that the result is always at least 1 (page) for the cases
269 * where nr_sects is lower than the number of sectors in a page.
270 */
271static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
272{
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600273 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900274
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600275 return min(pages, (sector_t)BIO_MAX_PAGES);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900276}
277
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800278/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800279 * __blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400280 * @bdev: blockdev to issue
281 * @sector: start sector
282 * @nr_sects: number of sectors to write
283 * @gfp_mask: memory allocation flags (for bio_alloc)
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800284 * @biop: pointer to anchor bio
Christoph Hellwigee472d82017-04-05 19:21:08 +0200285 * @flags: controls detailed behavior
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400286 *
287 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200288 * Zero-fill a block range, either using hardware offload or by explicitly
289 * writing zeroes to the device.
290 *
Christoph Hellwig71027e92017-04-05 19:21:20 +0200291 * Note that this function may fail with -EOPNOTSUPP if the driver signals
292 * zeroing offload support, but the device fails to process the command (for
293 * some devices there is no non-destructive way to verify whether this
294 * operation is actually supported). In this case the caller should call
295 * retry the call to blkdev_issue_zeroout() and the fallback path will be used.
296 *
Christoph Hellwigee472d82017-04-05 19:21:08 +0200297 * If a device is using logical block provisioning, the underlying space will
298 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
Christoph Hellwigcb365b92017-04-05 19:21:10 +0200299 *
300 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
301 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400302 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800303int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
304 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200305 unsigned flags)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400306{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200307 int ret;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800308 int bi_size = 0;
309 struct bio *bio = *biop;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100310 unsigned int sz;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700311 sector_t bs_mask;
312
313 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
314 if ((sector | nr_sects) & bs_mask)
315 return -EINVAL;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400316
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800317 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200318 biop, flags);
Christoph Hellwigcb365b92017-04-05 19:21:10 +0200319 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800320 goto out;
321
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800322 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400323 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900324 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
325 gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700326 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200327 bio_set_dev(bio, bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500328 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400329
Jens Axboe0341aaf2010-04-29 09:28:21 +0200330 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900331 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
332 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800333 nr_sects -= bi_size >> 9;
334 sector += bi_size >> 9;
Damien Le Moal615d22a2017-07-06 20:21:15 +0900335 if (bi_size < sz)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400336 break;
337 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800338 cond_resched();
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400339 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400340
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800341 *biop = bio;
342out:
343 return ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400344}
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800345EXPORT_SYMBOL(__blkdev_issue_zeroout);
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400346
347/**
348 * blkdev_issue_zeroout - zero-fill a block range
349 * @bdev: blockdev to write
350 * @sector: start sector
351 * @nr_sects: number of sectors to write
352 * @gfp_mask: memory allocation flags (for bio_alloc)
Christoph Hellwigee472d82017-04-05 19:21:08 +0200353 * @flags: controls detailed behavior
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400354 *
355 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200356 * Zero-fill a block range, either using hardware offload or by explicitly
357 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
358 * valid values for %flags.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400359 */
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400360int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200361 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400362{
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800363 int ret;
364 struct bio *bio = NULL;
365 struct blk_plug plug;
366
367 blk_start_plug(&plug);
368 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200369 &bio, flags);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800370 if (ret == 0 && bio) {
371 ret = submit_bio_wait(bio);
372 bio_put(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200373 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800374 blk_finish_plug(&plug);
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500375
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800376 return ret;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400377}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400378EXPORT_SYMBOL(blkdev_issue_zeroout);