blob: ccbce2b2ea053a7d22f9e06c4ad6587a2ccafdc6 [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Christoph Hellwig9082e872016-04-16 14:55:27 -040012static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
13 gfp_t gfp)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040014{
Christoph Hellwig9082e872016-04-16 14:55:27 -040015 struct bio *new = bio_alloc(gfp, nr_pages);
Lukas Czerner5dba3082011-05-06 19:26:27 -060016
Christoph Hellwig9082e872016-04-16 14:55:27 -040017 if (bio) {
18 bio_chain(bio, new);
19 submit_bio(rw, bio);
20 }
21
22 return new;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040023}
24
Christoph Hellwig38f25252016-04-16 14:55:28 -040025int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26 sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040027{
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040028 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig38f25252016-04-16 14:55:28 -040029 struct bio *bio = *biop;
Ming Lina22c4d72015-10-22 09:59:42 -070030 unsigned int granularity;
31 int alignment;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040032
33 if (!q)
34 return -ENXIO;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040035 if (!blk_queue_discard(q))
36 return -EOPNOTSUPP;
Christoph Hellwig38f25252016-04-16 14:55:28 -040037 if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
38 return -EOPNOTSUPP;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040039
Ming Lina22c4d72015-10-22 09:59:42 -070040 /* Zero-sector (unknown) and one-sector granularities are the same. */
41 granularity = max(q->limits.discard_granularity >> 9, 1U);
42 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
43
Lukas Czerner5dba3082011-05-06 19:26:27 -060044 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020045 unsigned int req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070046 sector_t end_sect, tmp;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020047
Ming Lina22c4d72015-10-22 09:59:42 -070048 /* Make sure bi_size doesn't overflow */
49 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
50
Christoph Hellwig9082e872016-04-16 14:55:27 -040051 /**
Ming Lina22c4d72015-10-22 09:59:42 -070052 * If splitting a request, and the next starting sector would be
53 * misaligned, stop the discard at the previous aligned sector.
54 */
Paolo Bonzinic6e66632012-08-02 09:48:50 +020055 end_sect = sector + req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070056 tmp = end_sect;
57 if (req_sects < nr_sects &&
58 sector_div(tmp, granularity) != alignment) {
59 end_sect = end_sect - alignment;
60 sector_div(end_sect, granularity);
61 end_sect = end_sect * granularity + alignment;
62 req_sects = end_sect - sector;
63 }
Paolo Bonzinic6e66632012-08-02 09:48:50 +020064
Christoph Hellwig9082e872016-04-16 14:55:27 -040065 bio = next_bio(bio, type, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -070066 bio->bi_iter.bi_sector = sector;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040067 bio->bi_bdev = bdev;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040068
Kent Overstreet4f024f32013-10-11 15:44:27 -070069 bio->bi_iter.bi_size = req_sects << 9;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020070 nr_sects -= req_sects;
71 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040072
Jens Axboec8123f82014-02-12 09:34:01 -070073 /*
74 * We can loop for a long time in here, if someone does
75 * full device discards (like mkfs). Be nice and allow
76 * us to schedule out to avoid softlocking if preempt
77 * is disabled.
78 */
79 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -060080 }
Christoph Hellwig38f25252016-04-16 14:55:28 -040081
82 *biop = bio;
83 return 0;
84}
85EXPORT_SYMBOL(__blkdev_issue_discard);
86
87/**
88 * blkdev_issue_discard - queue a discard
89 * @bdev: blockdev to issue discard for
90 * @sector: start sector
91 * @nr_sects: number of sectors to discard
92 * @gfp_mask: memory allocation flags (for bio_alloc)
93 * @flags: BLKDEV_IFL_* flags to control behaviour
94 *
95 * Description:
96 * Issue a discard request for the sectors in question.
97 */
98int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
99 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
100{
101 int type = REQ_WRITE | REQ_DISCARD;
102 struct bio *bio = NULL;
103 struct blk_plug plug;
104 int ret;
105
106 if (flags & BLKDEV_DISCARD_SECURE)
107 type |= REQ_SECURE;
108
109 blk_start_plug(&plug);
110 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
111 &bio);
112 if (!ret && bio)
Christoph Hellwig9082e872016-04-16 14:55:27 -0400113 ret = submit_bio_wait(type, bio);
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800114 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400115
Christoph Hellwig9082e872016-04-16 14:55:27 -0400116 return ret != -EOPNOTSUPP ? ret : 0;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400117}
118EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400119
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400120/**
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400121 * blkdev_issue_write_same - queue a write same operation
122 * @bdev: target blockdev
123 * @sector: start sector
124 * @nr_sects: number of sectors to write
125 * @gfp_mask: memory allocation flags (for bio_alloc)
126 * @page: page containing data to write
127 *
128 * Description:
129 * Issue a write same request for the sectors in question.
130 */
131int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
132 sector_t nr_sects, gfp_t gfp_mask,
133 struct page *page)
134{
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400135 struct request_queue *q = bdev_get_queue(bdev);
136 unsigned int max_write_same_sectors;
Christoph Hellwig9082e872016-04-16 14:55:27 -0400137 struct bio *bio = NULL;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400138 int ret = 0;
139
140 if (!q)
141 return -ENXIO;
142
Ming Linb49a0872015-05-22 00:46:56 -0700143 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
144 max_write_same_sectors = UINT_MAX >> 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400145
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400146 while (nr_sects) {
Christoph Hellwig9082e872016-04-16 14:55:27 -0400147 bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700148 bio->bi_iter.bi_sector = sector;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400149 bio->bi_bdev = bdev;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400150 bio->bi_vcnt = 1;
151 bio->bi_io_vec->bv_page = page;
152 bio->bi_io_vec->bv_offset = 0;
153 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
154
155 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700156 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400157 nr_sects -= max_write_same_sectors;
158 sector += max_write_same_sectors;
159 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700160 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400161 nr_sects = 0;
162 }
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400163 }
164
Christoph Hellwig9082e872016-04-16 14:55:27 -0400165 if (bio)
166 ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
167 return ret != -EOPNOTSUPP ? ret : 0;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400168}
169EXPORT_SYMBOL(blkdev_issue_write_same);
170
171/**
Ben Hutchings291d24f2011-03-01 13:45:24 -0500172 * blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400173 * @bdev: blockdev to issue
174 * @sector: start sector
175 * @nr_sects: number of sectors to write
176 * @gfp_mask: memory allocation flags (for bio_alloc)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400177 *
178 * Description:
179 * Generate and issue number of bios with zerofiled pages.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400180 */
181
Fabian Frederick35086782014-05-26 22:19:14 +0200182static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
183 sector_t nr_sects, gfp_t gfp_mask)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400184{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200185 int ret;
Christoph Hellwig9082e872016-04-16 14:55:27 -0400186 struct bio *bio = NULL;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100187 unsigned int sz;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400188
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400189 while (nr_sects != 0) {
Christoph Hellwig9082e872016-04-16 14:55:27 -0400190 bio = next_bio(bio, WRITE,
191 min(nr_sects, (sector_t)BIO_MAX_PAGES),
192 gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700193 bio->bi_iter.bi_sector = sector;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400194 bio->bi_bdev = bdev;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400195
Jens Axboe0341aaf2010-04-29 09:28:21 +0200196 while (nr_sects != 0) {
197 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400198 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
199 nr_sects -= ret >> 9;
200 sector += ret >> 9;
201 if (ret < (sz << 9))
202 break;
203 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400204 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400205
Christoph Hellwig9082e872016-04-16 14:55:27 -0400206 if (bio)
207 return submit_bio_wait(WRITE, bio);
208 return 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400209}
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400210
211/**
212 * blkdev_issue_zeroout - zero-fill a block range
213 * @bdev: blockdev to write
214 * @sector: start sector
215 * @nr_sects: number of sectors to write
216 * @gfp_mask: memory allocation flags (for bio_alloc)
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500217 * @discard: whether to discard the block range
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400218 *
219 * Description:
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500220 * Zero-fill a block range. If the discard flag is set and the block
221 * device guarantees that subsequent READ operations to the block range
222 * in question will return zeroes, the blocks will be discarded. Should
223 * the discard request fail, if the discard flag is not set, or if
224 * discard_zeroes_data is not supported, this function will resort to
225 * zeroing the blocks manually, thus provisioning (allocating,
226 * anchoring) them. If the block device supports the WRITE SAME command
227 * blkdev_issue_zeroout() will use it to optimize the process of
228 * clearing the block range. Otherwise the zeroing will be performed
229 * using regular WRITE calls.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400230 */
231
232int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500233 sector_t nr_sects, gfp_t gfp_mask, bool discard)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400234{
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500235 struct request_queue *q = bdev_get_queue(bdev);
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500236
Martin K. Petersen9f9ee1f2015-02-05 10:14:54 -0700237 if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
238 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
239 return 0;
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500240
Martin K. Petersen9f9ee1f2015-02-05 10:14:54 -0700241 if (bdev_write_same(bdev) &&
242 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
243 ZERO_PAGE(0)) == 0)
244 return 0;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400245
246 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
247}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400248EXPORT_SYMBOL(blkdev_issue_zeroout);