blob: f0b0bae075a0c8ea98c7158985b8d16fec7e9c4d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Jens Axboed6d48192008-01-29 14:04:06 +01002/*
3 * Functions related to segment and merge handling
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
Mike Krinkincda22642015-12-03 17:32:30 +030011#include <trace/events/block.h>
12
Jens Axboed6d48192008-01-29 14:04:06 +010013#include "blk.h"
14
Christoph Hellwige9907002018-09-24 09:43:48 +020015static inline bool bio_will_gap(struct request_queue *q,
16 struct request *prev_rq, struct bio *prev, struct bio *next)
17{
18 struct bio_vec pb, nb;
19
20 if (!bio_has_data(prev) || !queue_virt_boundary(q))
21 return false;
22
23 /*
24 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
25 * is quite difficult to respect the sg gap limit. We work hard to
26 * merge a huge number of small single bios in case of mkfs.
27 */
28 if (prev_rq)
29 bio_get_first_bvec(prev_rq->bio, &pb);
30 else
31 bio_get_first_bvec(prev, &pb);
Johannes Thumshirndf376b22018-11-07 14:58:14 +010032 if (pb.bv_offset & queue_virt_boundary(q))
Christoph Hellwige9907002018-09-24 09:43:48 +020033 return true;
34
35 /*
36 * We don't need to worry about the situation that the merged segment
37 * ends in unaligned virt boundary:
38 *
39 * - if 'pb' ends aligned, the merged segment ends aligned
40 * - if 'pb' ends unaligned, the next bio must include
41 * one single bvec of 'nb', otherwise the 'nb' can't
42 * merge with 'pb'
43 */
44 bio_get_last_bvec(prev, &pb);
45 bio_get_first_bvec(next, &nb);
Christoph Hellwig200a9af2019-05-21 09:01:42 +020046 if (biovec_phys_mergeable(q, &pb, &nb))
Christoph Hellwige9907002018-09-24 09:43:48 +020047 return false;
48 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
49}
50
51static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
52{
53 return bio_will_gap(req->q, req, req->biotail, bio);
54}
55
56static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
57{
58 return bio_will_gap(req->q, NULL, bio, req->bio);
59}
60
Kent Overstreet54efd502015-04-23 22:37:18 -070061static struct bio *blk_bio_discard_split(struct request_queue *q,
62 struct bio *bio,
Ming Leibdced432015-10-20 23:13:52 +080063 struct bio_set *bs,
64 unsigned *nsegs)
Kent Overstreet54efd502015-04-23 22:37:18 -070065{
66 unsigned int max_discard_sectors, granularity;
67 int alignment;
68 sector_t tmp;
69 unsigned split_sectors;
70
Ming Leibdced432015-10-20 23:13:52 +080071 *nsegs = 1;
72
Kent Overstreet54efd502015-04-23 22:37:18 -070073 /* Zero-sector (unknown) and one-sector granularities are the same. */
74 granularity = max(q->limits.discard_granularity >> 9, 1U);
75
Ming Lei1adfc5e2018-10-29 20:57:17 +080076 max_discard_sectors = min(q->limits.max_discard_sectors,
77 bio_allowed_max_sectors(q));
Kent Overstreet54efd502015-04-23 22:37:18 -070078 max_discard_sectors -= max_discard_sectors % granularity;
79
80 if (unlikely(!max_discard_sectors)) {
81 /* XXX: warn */
82 return NULL;
83 }
84
85 if (bio_sectors(bio) <= max_discard_sectors)
86 return NULL;
87
88 split_sectors = max_discard_sectors;
89
90 /*
91 * If the next starting sector would be misaligned, stop the discard at
92 * the previous aligned sector.
93 */
94 alignment = (q->limits.discard_alignment >> 9) % granularity;
95
96 tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
97 tmp = sector_div(tmp, granularity);
98
99 if (split_sectors > tmp)
100 split_sectors -= tmp;
101
102 return bio_split(bio, split_sectors, GFP_NOIO, bs);
103}
104
Christoph Hellwig885fa132017-04-05 19:21:01 +0200105static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
106 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
107{
Christoph Hellwigd665e122019-07-03 05:24:35 -0700108 *nsegs = 0;
Christoph Hellwig885fa132017-04-05 19:21:01 +0200109
110 if (!q->limits.max_write_zeroes_sectors)
111 return NULL;
112
113 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
114 return NULL;
115
116 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
117}
118
Kent Overstreet54efd502015-04-23 22:37:18 -0700119static struct bio *blk_bio_write_same_split(struct request_queue *q,
120 struct bio *bio,
Ming Leibdced432015-10-20 23:13:52 +0800121 struct bio_set *bs,
122 unsigned *nsegs)
Kent Overstreet54efd502015-04-23 22:37:18 -0700123{
Ming Leibdced432015-10-20 23:13:52 +0800124 *nsegs = 1;
125
Kent Overstreet54efd502015-04-23 22:37:18 -0700126 if (!q->limits.max_write_same_sectors)
127 return NULL;
128
129 if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
130 return NULL;
131
132 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
133}
134
Bart Van Assche9cc51692019-08-01 15:50:44 -0700135/*
136 * Return the maximum number of sectors from the start of a bio that may be
137 * submitted as a single request to a block device. If enough sectors remain,
138 * align the end to the physical block size. Otherwise align the end to the
139 * logical block size. This approach minimizes the number of non-aligned
140 * requests that are submitted to a block device if the start of a bio is not
141 * aligned to a physical block boundary.
142 */
Ming Leid0e5fbb2016-01-23 08:05:33 +0800143static inline unsigned get_max_io_size(struct request_queue *q,
144 struct bio *bio)
145{
146 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
Bart Van Assche9cc51692019-08-01 15:50:44 -0700147 unsigned max_sectors = sectors;
148 unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
149 unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
150 unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
Ming Leid0e5fbb2016-01-23 08:05:33 +0800151
Bart Van Assche9cc51692019-08-01 15:50:44 -0700152 max_sectors += start_offset;
153 max_sectors &= ~(pbs - 1);
154 if (max_sectors > start_offset)
155 return max_sectors - start_offset;
Ming Leid0e5fbb2016-01-23 08:05:33 +0800156
Bart Van Assche9cc51692019-08-01 15:50:44 -0700157 return sectors & (lbs - 1);
Ming Leid0e5fbb2016-01-23 08:05:33 +0800158}
159
Ming Lei429120f2019-12-29 10:32:30 +0800160static inline unsigned get_max_segment_size(const struct request_queue *q,
161 struct page *start_page,
162 unsigned long offset)
Ming Leidcebd752019-02-15 19:13:12 +0800163{
164 unsigned long mask = queue_segment_boundary(q);
165
Ming Lei429120f2019-12-29 10:32:30 +0800166 offset = mask & (page_to_phys(start_page) + offset);
Ming Lei4a2f704e2020-01-11 20:57:43 +0800167
168 /*
169 * overflow may be triggered in case of zero page physical address
170 * on 32bit arch, use queue's max segment size when that happens.
171 */
172 return min_not_zero(mask - offset + 1,
173 (unsigned long)queue_max_segment_size(q));
Ming Leidcebd752019-02-15 19:13:12 +0800174}
175
Bart Van Assche708b25b2019-08-01 15:50:43 -0700176/**
177 * bvec_split_segs - verify whether or not a bvec should be split in the middle
178 * @q: [in] request queue associated with the bio associated with @bv
179 * @bv: [in] bvec to examine
180 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
181 * by the number of segments from @bv that may be appended to that
182 * bio without exceeding @max_segs
183 * @sectors: [in,out] Number of sectors in the bio being built. Incremented
184 * by the number of sectors from @bv that may be appended to that
185 * bio without exceeding @max_sectors
186 * @max_segs: [in] upper bound for *@nsegs
187 * @max_sectors: [in] upper bound for *@sectors
188 *
189 * When splitting a bio, it can happen that a bvec is encountered that is too
190 * big to fit in a single segment and hence that it has to be split in the
191 * middle. This function verifies whether or not that should happen. The value
192 * %true is returned if and only if appending the entire @bv to a bio with
193 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
194 * the block driver.
Ming Leidcebd752019-02-15 19:13:12 +0800195 */
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -0700196static bool bvec_split_segs(const struct request_queue *q,
197 const struct bio_vec *bv, unsigned *nsegs,
Bart Van Assche708b25b2019-08-01 15:50:43 -0700198 unsigned *sectors, unsigned max_segs,
199 unsigned max_sectors)
Ming Leidcebd752019-02-15 19:13:12 +0800200{
Bart Van Assche708b25b2019-08-01 15:50:43 -0700201 unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
202 unsigned len = min(bv->bv_len, max_len);
Ming Leidcebd752019-02-15 19:13:12 +0800203 unsigned total_len = 0;
Bart Van Asscheff9811b2019-08-01 15:50:42 -0700204 unsigned seg_size = 0;
Ming Leidcebd752019-02-15 19:13:12 +0800205
Bart Van Asscheff9811b2019-08-01 15:50:42 -0700206 while (len && *nsegs < max_segs) {
Ming Lei429120f2019-12-29 10:32:30 +0800207 seg_size = get_max_segment_size(q, bv->bv_page,
208 bv->bv_offset + total_len);
Ming Leidcebd752019-02-15 19:13:12 +0800209 seg_size = min(seg_size, len);
210
Bart Van Asscheff9811b2019-08-01 15:50:42 -0700211 (*nsegs)++;
Ming Leidcebd752019-02-15 19:13:12 +0800212 total_len += seg_size;
213 len -= seg_size;
214
215 if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
216 break;
217 }
218
Bart Van Asscheff9811b2019-08-01 15:50:42 -0700219 *sectors += total_len >> 9;
Ming Leidcebd752019-02-15 19:13:12 +0800220
Bart Van Assche708b25b2019-08-01 15:50:43 -0700221 /* tell the caller to split the bvec if it is too big to fit */
222 return len > 0 || bv->bv_len > max_len;
Ming Leidcebd752019-02-15 19:13:12 +0800223}
224
Bart Van Asschedad77582019-08-01 15:50:41 -0700225/**
226 * blk_bio_segment_split - split a bio in two bios
227 * @q: [in] request queue pointer
228 * @bio: [in] bio to be split
229 * @bs: [in] bio set to allocate the clone from
230 * @segs: [out] number of segments in the bio with the first half of the sectors
231 *
232 * Clone @bio, update the bi_iter of the clone to represent the first sectors
233 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
234 * following is guaranteed for the cloned bio:
235 * - That it has at most get_max_io_size(@q, @bio) sectors.
236 * - That it has at most queue_max_segments(@q) segments.
237 *
238 * Except for discard requests the cloned bio will point at the bi_io_vec of
239 * the original bio. It is the responsibility of the caller to ensure that the
240 * original bio is not freed before the cloned bio. The caller is also
241 * responsible for ensuring that @bs is only destroyed after processing of the
242 * split bio has finished.
243 */
Kent Overstreet54efd502015-04-23 22:37:18 -0700244static struct bio *blk_bio_segment_split(struct request_queue *q,
245 struct bio *bio,
Ming Leibdced432015-10-20 23:13:52 +0800246 struct bio_set *bs,
247 unsigned *segs)
Kent Overstreet54efd502015-04-23 22:37:18 -0700248{
Jens Axboe5014c312015-09-02 16:46:02 -0600249 struct bio_vec bv, bvprv, *bvprvp = NULL;
Kent Overstreet54efd502015-04-23 22:37:18 -0700250 struct bvec_iter iter;
Christoph Hellwig68698752019-05-21 09:01:43 +0200251 unsigned nsegs = 0, sectors = 0;
Ming Leid0e5fbb2016-01-23 08:05:33 +0800252 const unsigned max_sectors = get_max_io_size(q, bio);
Ming Lei05b700b2019-03-03 21:17:48 +0800253 const unsigned max_segs = queue_max_segments(q);
Kent Overstreet54efd502015-04-23 22:37:18 -0700254
Ming Leidcebd752019-02-15 19:13:12 +0800255 bio_for_each_bvec(bv, bio, iter) {
Kent Overstreet54efd502015-04-23 22:37:18 -0700256 /*
257 * If the queue doesn't support SG gaps and adding this
258 * offset would create a gap, disallow it.
259 */
Jens Axboe5014c312015-09-02 16:46:02 -0600260 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
Kent Overstreet54efd502015-04-23 22:37:18 -0700261 goto split;
262
Bart Van Assche708b25b2019-08-01 15:50:43 -0700263 if (nsegs < max_segs &&
264 sectors + (bv.bv_len >> 9) <= max_sectors &&
265 bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
266 nsegs++;
267 sectors += bv.bv_len >> 9;
268 } else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
269 max_sectors)) {
Ming Leicf8c0c62017-12-18 20:22:16 +0800270 goto split;
Keith Busche36f6202016-01-12 15:08:39 -0700271 }
272
Kent Overstreet54efd502015-04-23 22:37:18 -0700273 bvprv = bv;
Ming Lei578270b2015-11-24 10:35:29 +0800274 bvprvp = &bvprv;
Kent Overstreet54efd502015-04-23 22:37:18 -0700275 }
276
Christoph Hellwigd6270652019-06-06 12:29:03 +0200277 *segs = nsegs;
278 return NULL;
Kent Overstreet54efd502015-04-23 22:37:18 -0700279split:
Ming Leibdced432015-10-20 23:13:52 +0800280 *segs = nsegs;
Christoph Hellwigd6270652019-06-06 12:29:03 +0200281 return bio_split(bio, sectors, GFP_NOIO, bs);
Kent Overstreet54efd502015-04-23 22:37:18 -0700282}
283
Bart Van Asschedad77582019-08-01 15:50:41 -0700284/**
285 * __blk_queue_split - split a bio and submit the second half
286 * @q: [in] request queue pointer
287 * @bio: [in, out] bio to be split
288 * @nr_segs: [out] number of segments in the first bio
289 *
290 * Split a bio into two bios, chain the two bios, submit the second half and
291 * store a pointer to the first half in *@bio. If the second bio is still too
292 * big it will be split by a recursive call to this function. Since this
293 * function may allocate a new bio from @q->bio_split, it is the responsibility
294 * of the caller to ensure that @q is only released after processing of the
295 * split bio has finished.
296 */
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200297void __blk_queue_split(struct request_queue *q, struct bio **bio,
298 unsigned int *nr_segs)
Kent Overstreet54efd502015-04-23 22:37:18 -0700299{
Christoph Hellwigfa532282019-11-04 10:05:43 -0800300 struct bio *split = NULL;
Kent Overstreet54efd502015-04-23 22:37:18 -0700301
Adrian Hunter7afafc82016-08-16 10:59:35 +0300302 switch (bio_op(*bio)) {
303 case REQ_OP_DISCARD:
304 case REQ_OP_SECURE_ERASE:
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200305 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
Adrian Hunter7afafc82016-08-16 10:59:35 +0300306 break;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800307 case REQ_OP_WRITE_ZEROES:
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200308 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
309 nr_segs);
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800310 break;
Adrian Hunter7afafc82016-08-16 10:59:35 +0300311 case REQ_OP_WRITE_SAME:
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200312 split = blk_bio_write_same_split(q, *bio, &q->bio_split,
313 nr_segs);
Adrian Hunter7afafc82016-08-16 10:59:35 +0300314 break;
315 default:
Christoph Hellwigfa532282019-11-04 10:05:43 -0800316 /*
317 * All drivers must accept single-segments bios that are <=
318 * PAGE_SIZE. This is a quick and dirty check that relies on
319 * the fact that bi_io_vec[0] is always valid if a bio has data.
320 * The check might lead to occasional false negatives when bios
321 * are cloned, but compared to the performance impact of cloned
322 * bios themselves the loop below doesn't matter anyway.
323 */
324 if (!q->limits.chunk_sectors &&
325 (*bio)->bi_vcnt == 1 &&
Ming Lei59db8ba2019-11-08 18:15:27 +0800326 ((*bio)->bi_io_vec[0].bv_len +
Jens Axboe1e279152019-11-21 10:16:12 -0700327 (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
Christoph Hellwigfa532282019-11-04 10:05:43 -0800328 *nr_segs = 1;
329 break;
330 }
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200331 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
Adrian Hunter7afafc82016-08-16 10:59:35 +0300332 break;
333 }
Ming Leibdced432015-10-20 23:13:52 +0800334
Kent Overstreet54efd502015-04-23 22:37:18 -0700335 if (split) {
Ming Lei6ac45ae2015-10-20 23:13:53 +0800336 /* there isn't chance to merge the splitted bio */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600337 split->bi_opf |= REQ_NOMERGE;
Ming Lei6ac45ae2015-10-20 23:13:53 +0800338
Kent Overstreet54efd502015-04-23 22:37:18 -0700339 bio_chain(split, *bio);
Mike Krinkincda22642015-12-03 17:32:30 +0300340 trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
Kent Overstreet54efd502015-04-23 22:37:18 -0700341 generic_make_request(*bio);
342 *bio = split;
343 }
344}
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200345
Bart Van Asschedad77582019-08-01 15:50:41 -0700346/**
347 * blk_queue_split - split a bio and submit the second half
348 * @q: [in] request queue pointer
349 * @bio: [in, out] bio to be split
350 *
351 * Split a bio into two bios, chains the two bios, submit the second half and
352 * store a pointer to the first half in *@bio. Since this function may allocate
353 * a new bio from @q->bio_split, it is the responsibility of the caller to
354 * ensure that @q is only released after processing of the split bio has
355 * finished.
356 */
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200357void blk_queue_split(struct request_queue *q, struct bio **bio)
358{
359 unsigned int nr_segs;
360
361 __blk_queue_split(q, bio, &nr_segs);
362}
Kent Overstreet54efd502015-04-23 22:37:18 -0700363EXPORT_SYMBOL(blk_queue_split);
364
Christoph Hellwige9cd19c2019-06-06 12:29:02 +0200365unsigned int blk_recalc_rq_segments(struct request *rq)
Jens Axboed6d48192008-01-29 14:04:06 +0100366{
Christoph Hellwig68698752019-05-21 09:01:43 +0200367 unsigned int nr_phys_segs = 0;
Bart Van Asscheff9811b2019-08-01 15:50:42 -0700368 unsigned int nr_sectors = 0;
Christoph Hellwige9cd19c2019-06-06 12:29:02 +0200369 struct req_iterator iter;
Christoph Hellwig68698752019-05-21 09:01:43 +0200370 struct bio_vec bv;
Jens Axboed6d48192008-01-29 14:04:06 +0100371
Christoph Hellwige9cd19c2019-06-06 12:29:02 +0200372 if (!rq->bio)
Jens Axboe1e428072009-02-23 09:03:10 +0100373 return 0;
Jens Axboed6d48192008-01-29 14:04:06 +0100374
Christoph Hellwige9cd19c2019-06-06 12:29:02 +0200375 switch (bio_op(rq->bio)) {
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800376 case REQ_OP_DISCARD:
377 case REQ_OP_SECURE_ERASE:
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800378 case REQ_OP_WRITE_ZEROES:
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700379 return 0;
380 case REQ_OP_WRITE_SAME:
Kent Overstreet5cb88502014-02-07 13:53:46 -0700381 return 1;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800382 }
Kent Overstreet5cb88502014-02-07 13:53:46 -0700383
Christoph Hellwige9cd19c2019-06-06 12:29:02 +0200384 rq_for_each_bvec(bv, rq, iter)
Bart Van Asscheff9811b2019-08-01 15:50:42 -0700385 bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
Bart Van Assche708b25b2019-08-01 15:50:43 -0700386 UINT_MAX, UINT_MAX);
Jens Axboe1e428072009-02-23 09:03:10 +0100387 return nr_phys_segs;
388}
389
Ming Lei48d77272019-02-27 20:40:11 +0800390static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
Ming Lei862e5a52019-02-15 19:13:13 +0800391 struct scatterlist *sglist)
392{
393 if (!*sg)
394 return sglist;
395
396 /*
397 * If the driver previously mapped a shorter list, we could see a
398 * termination bit prematurely unless it fully inits the sg table
399 * on each mapping. We KNOW that there must be more entries here
400 * or the driver would be buggy, so force clear the termination bit
401 * to avoid doing a full sg_init_table() in drivers for each command.
402 */
403 sg_unmark_end(*sg);
404 return sg_next(*sg);
405}
406
407static unsigned blk_bvec_map_sg(struct request_queue *q,
408 struct bio_vec *bvec, struct scatterlist *sglist,
409 struct scatterlist **sg)
410{
411 unsigned nbytes = bvec->bv_len;
Christoph Hellwig8a96a0e2019-04-11 08:23:27 +0200412 unsigned nsegs = 0, total = 0;
Ming Lei862e5a52019-02-15 19:13:13 +0800413
414 while (nbytes > 0) {
Christoph Hellwig8a96a0e2019-04-11 08:23:27 +0200415 unsigned offset = bvec->bv_offset + total;
Ming Lei429120f2019-12-29 10:32:30 +0800416 unsigned len = min(get_max_segment_size(q, bvec->bv_page,
417 offset), nbytes);
Christoph Hellwigf9f76872019-04-19 08:56:24 +0200418 struct page *page = bvec->bv_page;
419
420 /*
421 * Unfortunately a fair number of drivers barf on scatterlists
422 * that have an offset larger than PAGE_SIZE, despite other
423 * subsystems dealing with that invariant just fine. For now
424 * stick to the legacy format where we never present those from
425 * the block layer, but the code below should be removed once
426 * these offenders (mostly MMC/SD drivers) are fixed.
427 */
428 page += (offset >> PAGE_SHIFT);
429 offset &= ~PAGE_MASK;
Ming Lei862e5a52019-02-15 19:13:13 +0800430
431 *sg = blk_next_sg(sg, sglist);
Christoph Hellwigf9f76872019-04-19 08:56:24 +0200432 sg_set_page(*sg, page, len, offset);
Ming Lei862e5a52019-02-15 19:13:13 +0800433
Christoph Hellwig8a96a0e2019-04-11 08:23:27 +0200434 total += len;
435 nbytes -= len;
Ming Lei862e5a52019-02-15 19:13:13 +0800436 nsegs++;
437 }
438
439 return nsegs;
440}
441
Ming Lei16e3e412019-03-17 18:01:11 +0800442static inline int __blk_bvec_map_sg(struct bio_vec bv,
443 struct scatterlist *sglist, struct scatterlist **sg)
444{
445 *sg = blk_next_sg(sg, sglist);
446 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
447 return 1;
448}
449
Ming Leif6970f82019-03-17 18:01:12 +0800450/* only try to merge bvecs into one sg if they are from two bios */
451static inline bool
452__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
453 struct bio_vec *bvprv, struct scatterlist **sg)
Asias He963ab9e2012-08-02 23:42:03 +0200454{
455
456 int nbytes = bvec->bv_len;
457
Ming Leif6970f82019-03-17 18:01:12 +0800458 if (!*sg)
459 return false;
Asias He963ab9e2012-08-02 23:42:03 +0200460
Ming Leif6970f82019-03-17 18:01:12 +0800461 if ((*sg)->length + nbytes > queue_max_segment_size(q))
462 return false;
463
464 if (!biovec_phys_mergeable(q, bvprv, bvec))
465 return false;
466
467 (*sg)->length += nbytes;
468
469 return true;
Asias He963ab9e2012-08-02 23:42:03 +0200470}
471
Kent Overstreet5cb88502014-02-07 13:53:46 -0700472static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
473 struct scatterlist *sglist,
474 struct scatterlist **sg)
475{
Ming Leib21e11c2019-04-02 10:26:44 +0800476 struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
Kent Overstreet5cb88502014-02-07 13:53:46 -0700477 struct bvec_iter iter;
Christoph Hellwig38417462018-12-13 16:17:10 +0100478 int nsegs = 0;
Ming Leif6970f82019-03-17 18:01:12 +0800479 bool new_bio = false;
Kent Overstreet5cb88502014-02-07 13:53:46 -0700480
Ming Leif6970f82019-03-17 18:01:12 +0800481 for_each_bio(bio) {
482 bio_for_each_bvec(bvec, bio, iter) {
483 /*
484 * Only try to merge bvecs from two bios given we
485 * have done bio internal merge when adding pages
486 * to bio
487 */
488 if (new_bio &&
489 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
490 goto next_bvec;
491
492 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
493 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
494 else
495 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
496 next_bvec:
497 new_bio = false;
498 }
Ming Leib21e11c2019-04-02 10:26:44 +0800499 if (likely(bio->bi_iter.bi_size)) {
500 bvprv = bvec;
501 new_bio = true;
502 }
Ming Leif6970f82019-03-17 18:01:12 +0800503 }
Kent Overstreet5cb88502014-02-07 13:53:46 -0700504
505 return nsegs;
506}
507
Jens Axboed6d48192008-01-29 14:04:06 +0100508/*
509 * map a request to scatterlist, return number of sg entries setup. Caller
510 * must make sure sg can hold rq->nr_phys_segments entries
511 */
Christoph Hellwig89de1502020-04-14 09:42:22 +0200512int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
513 struct scatterlist *sglist, struct scatterlist **last_sg)
Jens Axboed6d48192008-01-29 14:04:06 +0100514{
Kent Overstreet5cb88502014-02-07 13:53:46 -0700515 int nsegs = 0;
Jens Axboed6d48192008-01-29 14:04:06 +0100516
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700517 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
Christoph Hellwig89de1502020-04-14 09:42:22 +0200518 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700519 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
Christoph Hellwig89de1502020-04-14 09:42:22 +0200520 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700521 else if (rq->bio)
Christoph Hellwig89de1502020-04-14 09:42:22 +0200522 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
FUJITA Tomonorif18573a2008-04-11 12:56:52 +0200523
Christoph Hellwig89de1502020-04-14 09:42:22 +0200524 if (*last_sg)
525 sg_mark_end(*last_sg);
Jens Axboed6d48192008-01-29 14:04:06 +0100526
Ming Lei12e57f52015-11-24 10:35:31 +0800527 /*
528 * Something must have been wrong if the figured number of
529 * segment is bigger than number of req's physical segments
530 */
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700531 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
Ming Lei12e57f52015-11-24 10:35:31 +0800532
Jens Axboed6d48192008-01-29 14:04:06 +0100533 return nsegs;
534}
Christoph Hellwig89de1502020-04-14 09:42:22 +0200535EXPORT_SYMBOL(__blk_rq_map_sg);
Jens Axboed6d48192008-01-29 14:04:06 +0100536
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200537static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
538 unsigned int nr_phys_segs)
Jens Axboed6d48192008-01-29 14:04:06 +0100539{
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200540 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200541 goto no_merge;
542
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200543 if (blk_integrity_merge_bio(req->q, req, bio) == false)
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200544 goto no_merge;
Jens Axboed6d48192008-01-29 14:04:06 +0100545
546 /*
547 * This will form the start of a new hw segment. Bump both
548 * counters.
549 */
Jens Axboed6d48192008-01-29 14:04:06 +0100550 req->nr_phys_segments += nr_phys_segs;
551 return 1;
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200552
553no_merge:
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200554 req_set_nomerge(req->q, req);
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200555 return 0;
Jens Axboed6d48192008-01-29 14:04:06 +0100556}
557
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200558int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
Jens Axboed6d48192008-01-29 14:04:06 +0100559{
Jens Axboe5e7c4272015-09-03 19:28:20 +0300560 if (req_gap_back_merge(req, bio))
561 return 0;
Sagi Grimberg7f39add2015-09-11 09:03:04 -0600562 if (blk_integrity_rq(req) &&
563 integrity_req_gap_back_merge(req, bio))
564 return 0;
Satya Tangiralaa892c8d2020-05-14 00:37:18 +0000565 if (!bio_crypt_ctx_back_mergeable(req, bio))
566 return 0;
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400567 if (blk_rq_sectors(req) + bio_sectors(bio) >
Damien Le Moal17007f32016-07-20 21:40:47 -0600568 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200569 req_set_nomerge(req->q, req);
Jens Axboed6d48192008-01-29 14:04:06 +0100570 return 0;
571 }
Jens Axboed6d48192008-01-29 14:04:06 +0100572
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200573 return ll_new_hw_segment(req, bio, nr_segs);
Jens Axboed6d48192008-01-29 14:04:06 +0100574}
575
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200576int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
Jens Axboed6d48192008-01-29 14:04:06 +0100577{
Jens Axboe5e7c4272015-09-03 19:28:20 +0300578 if (req_gap_front_merge(req, bio))
579 return 0;
Sagi Grimberg7f39add2015-09-11 09:03:04 -0600580 if (blk_integrity_rq(req) &&
581 integrity_req_gap_front_merge(req, bio))
582 return 0;
Satya Tangiralaa892c8d2020-05-14 00:37:18 +0000583 if (!bio_crypt_ctx_front_mergeable(req, bio))
584 return 0;
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400585 if (blk_rq_sectors(req) + bio_sectors(bio) >
Damien Le Moal17007f32016-07-20 21:40:47 -0600586 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200587 req_set_nomerge(req->q, req);
Jens Axboed6d48192008-01-29 14:04:06 +0100588 return 0;
589 }
Jens Axboed6d48192008-01-29 14:04:06 +0100590
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200591 return ll_new_hw_segment(req, bio, nr_segs);
Jens Axboed6d48192008-01-29 14:04:06 +0100592}
593
Jens Axboe445251d2018-02-01 14:01:02 -0700594static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
595 struct request *next)
596{
597 unsigned short segments = blk_rq_nr_discard_segments(req);
598
599 if (segments >= queue_max_discard_segments(q))
600 goto no_merge;
601 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
602 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
603 goto no_merge;
604
605 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
606 return true;
607no_merge:
608 req_set_nomerge(q, req);
609 return false;
610}
611
Jens Axboed6d48192008-01-29 14:04:06 +0100612static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
613 struct request *next)
614{
615 int total_phys_segments;
Jens Axboed6d48192008-01-29 14:04:06 +0100616
Jens Axboe5e7c4272015-09-03 19:28:20 +0300617 if (req_gap_back_merge(req, next->bio))
Keith Busch854fbb92015-02-11 08:20:13 -0700618 return 0;
619
Jens Axboed6d48192008-01-29 14:04:06 +0100620 /*
621 * Will it become too large?
622 */
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400623 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
Damien Le Moal17007f32016-07-20 21:40:47 -0600624 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
Jens Axboed6d48192008-01-29 14:04:06 +0100625 return 0;
626
627 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
Martin K. Petersen8a783622010-02-26 00:20:39 -0500628 if (total_phys_segments > queue_max_segments(q))
Jens Axboed6d48192008-01-29 14:04:06 +0100629 return 0;
630
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -0400631 if (blk_integrity_merge_rq(q, req, next) == false)
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200632 return 0;
633
Satya Tangiralaa892c8d2020-05-14 00:37:18 +0000634 if (!bio_crypt_ctx_merge_rq(req, next))
635 return 0;
636
Jens Axboed6d48192008-01-29 14:04:06 +0100637 /* Merge is OK... */
638 req->nr_phys_segments = total_phys_segments;
Jens Axboed6d48192008-01-29 14:04:06 +0100639 return 1;
640}
641
Tejun Heo80a761f2009-07-03 17:48:17 +0900642/**
643 * blk_rq_set_mixed_merge - mark a request as mixed merge
644 * @rq: request to mark as mixed merge
645 *
646 * Description:
647 * @rq is about to be mixed merged. Make sure the attributes
648 * which can be mixed are set in each bio and mark @rq as mixed
649 * merged.
650 */
651void blk_rq_set_mixed_merge(struct request *rq)
652{
653 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
654 struct bio *bio;
655
Christoph Hellwige8064022016-10-20 15:12:13 +0200656 if (rq->rq_flags & RQF_MIXED_MERGE)
Tejun Heo80a761f2009-07-03 17:48:17 +0900657 return;
658
659 /*
660 * @rq will no longer represent mixable attributes for all the
661 * contained bios. It will just track those of the first one.
662 * Distributes the attributs to each bio.
663 */
664 for (bio = rq->bio; bio; bio = bio->bi_next) {
Jens Axboe1eff9d32016-08-05 15:35:16 -0600665 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
666 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
667 bio->bi_opf |= ff;
Tejun Heo80a761f2009-07-03 17:48:17 +0900668 }
Christoph Hellwige8064022016-10-20 15:12:13 +0200669 rq->rq_flags |= RQF_MIXED_MERGE;
Tejun Heo80a761f2009-07-03 17:48:17 +0900670}
671
Konstantin Khlebnikovb9c54f52020-05-27 07:24:15 +0200672static void blk_account_io_merge_request(struct request *req)
Jerome Marchand26308ea2009-03-27 10:31:51 +0100673{
674 if (blk_do_io_stat(req)) {
Mike Snitzer112f1582018-12-06 11:41:18 -0500675 part_stat_lock();
Konstantin Khlebnikovb9c54f52020-05-27 07:24:15 +0200676 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
Jerome Marchand26308ea2009-03-27 10:31:51 +0100677 part_stat_unlock();
Christoph Hellwig524f9ff2020-05-27 07:24:19 +0200678
679 hd_struct_put(req->part);
Jerome Marchand26308ea2009-03-27 10:31:51 +0100680 }
681}
Konstantin Khlebnikovb9c54f52020-05-27 07:24:15 +0200682
Jianchao Wang698404662018-10-27 19:52:14 +0800683/*
684 * Two cases of handling DISCARD merge:
685 * If max_discard_segments > 1, the driver takes every bio
686 * as a range and send them to controller together. The ranges
687 * needn't to be contiguous.
688 * Otherwise, the bios/requests will be handled as same as
689 * others which should be contiguous.
690 */
691static inline bool blk_discard_mergable(struct request *req)
692{
693 if (req_op(req) == REQ_OP_DISCARD &&
694 queue_max_discard_segments(req->q) > 1)
695 return true;
696 return false;
697}
698
Eric Biggerse96c0d82018-11-14 17:19:46 -0800699static enum elv_merge blk_try_req_merge(struct request *req,
700 struct request *next)
Jianchao Wang698404662018-10-27 19:52:14 +0800701{
702 if (blk_discard_mergable(req))
703 return ELEVATOR_DISCARD_MERGE;
704 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
705 return ELEVATOR_BACK_MERGE;
706
707 return ELEVATOR_NO_MERGE;
708}
Jerome Marchand26308ea2009-03-27 10:31:51 +0100709
Jens Axboed6d48192008-01-29 14:04:06 +0100710/*
Jens Axboeb973cb72017-02-02 08:54:40 -0700711 * For non-mq, this has to be called with the request spinlock acquired.
712 * For mq with scheduling, the appropriate queue wide lock should be held.
Jens Axboed6d48192008-01-29 14:04:06 +0100713 */
Jens Axboeb973cb72017-02-02 08:54:40 -0700714static struct request *attempt_merge(struct request_queue *q,
715 struct request *req, struct request *next)
Jens Axboed6d48192008-01-29 14:04:06 +0100716{
717 if (!rq_mergeable(req) || !rq_mergeable(next))
Jens Axboeb973cb72017-02-02 08:54:40 -0700718 return NULL;
Jens Axboed6d48192008-01-29 14:04:06 +0100719
Christoph Hellwig288dab82016-06-09 16:00:36 +0200720 if (req_op(req) != req_op(next))
Jens Axboeb973cb72017-02-02 08:54:40 -0700721 return NULL;
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400722
Jens Axboed6d48192008-01-29 14:04:06 +0100723 if (rq_data_dir(req) != rq_data_dir(next)
Jens Axboe2081a562018-10-12 12:39:10 -0600724 || req->rq_disk != next->rq_disk)
Jens Axboeb973cb72017-02-02 08:54:40 -0700725 return NULL;
Jens Axboed6d48192008-01-29 14:04:06 +0100726
Mike Christie8fe0d472016-06-05 14:32:15 -0500727 if (req_op(req) == REQ_OP_WRITE_SAME &&
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400728 !blk_write_same_mergeable(req->bio, next->bio))
Jens Axboeb973cb72017-02-02 08:54:40 -0700729 return NULL;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400730
Jens Axboed6d48192008-01-29 14:04:06 +0100731 /*
Jens Axboecb6934f2017-06-27 09:22:02 -0600732 * Don't allow merge of different write hints, or for a hint with
733 * non-hint IO.
734 */
735 if (req->write_hint != next->write_hint)
736 return NULL;
737
Damien Le Moal668ffc02018-11-20 10:52:37 +0900738 if (req->ioprio != next->ioprio)
739 return NULL;
740
Jens Axboecb6934f2017-06-27 09:22:02 -0600741 /*
Jens Axboed6d48192008-01-29 14:04:06 +0100742 * If we are allowed to merge, then append bio list
743 * from next to rq and release next. merge_requests_fn
744 * will have updated segment counts, update sector
Jens Axboe445251d2018-02-01 14:01:02 -0700745 * counts here. Handle DISCARDs separately, as they
746 * have separate settings.
Jens Axboed6d48192008-01-29 14:04:06 +0100747 */
Jianchao Wang698404662018-10-27 19:52:14 +0800748
749 switch (blk_try_req_merge(req, next)) {
750 case ELEVATOR_DISCARD_MERGE:
Jens Axboe445251d2018-02-01 14:01:02 -0700751 if (!req_attempt_discard_merge(q, req, next))
752 return NULL;
Jianchao Wang698404662018-10-27 19:52:14 +0800753 break;
754 case ELEVATOR_BACK_MERGE:
755 if (!ll_merge_requests_fn(q, req, next))
756 return NULL;
757 break;
758 default:
Jens Axboeb973cb72017-02-02 08:54:40 -0700759 return NULL;
Jianchao Wang698404662018-10-27 19:52:14 +0800760 }
Jens Axboed6d48192008-01-29 14:04:06 +0100761
762 /*
Tejun Heo80a761f2009-07-03 17:48:17 +0900763 * If failfast settings disagree or any of the two is already
764 * a mixed merge, mark both as mixed before proceeding. This
765 * makes sure that all involved bios have mixable attributes
766 * set properly.
767 */
Christoph Hellwige8064022016-10-20 15:12:13 +0200768 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
Tejun Heo80a761f2009-07-03 17:48:17 +0900769 (req->cmd_flags & REQ_FAILFAST_MASK) !=
770 (next->cmd_flags & REQ_FAILFAST_MASK)) {
771 blk_rq_set_mixed_merge(req);
772 blk_rq_set_mixed_merge(next);
773 }
774
775 /*
Omar Sandoval522a7772018-05-09 02:08:53 -0700776 * At this point we have either done a back merge or front merge. We
777 * need the smaller start_time_ns of the merged requests to be the
778 * current request for accounting purposes.
Jens Axboed6d48192008-01-29 14:04:06 +0100779 */
Omar Sandoval522a7772018-05-09 02:08:53 -0700780 if (next->start_time_ns < req->start_time_ns)
781 req->start_time_ns = next->start_time_ns;
Jens Axboed6d48192008-01-29 14:04:06 +0100782
783 req->biotail->bi_next = next->bio;
784 req->biotail = next->biotail;
785
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900786 req->__data_len += blk_rq_bytes(next);
Jens Axboed6d48192008-01-29 14:04:06 +0100787
Ming Lei2a5cf352018-12-01 00:38:18 +0800788 if (!blk_discard_mergable(req))
Jens Axboe445251d2018-02-01 14:01:02 -0700789 elv_merge_requests(q, req, next);
Jens Axboed6d48192008-01-29 14:04:06 +0100790
Jerome Marchand42dad762009-04-22 14:01:49 +0200791 /*
792 * 'next' is going away, so update stats accordingly
793 */
Konstantin Khlebnikovb9c54f52020-05-27 07:24:15 +0200794 blk_account_io_merge_request(next);
Jens Axboed6d48192008-01-29 14:04:06 +0100795
Jens Axboee4d750c2017-02-03 09:48:28 -0700796 /*
797 * ownership of bio passed from next to req, return 'next' for
798 * the caller to free
799 */
Boaz Harrosh1cd96c22009-03-24 12:35:07 +0100800 next->bio = NULL;
Jens Axboeb973cb72017-02-02 08:54:40 -0700801 return next;
Jens Axboed6d48192008-01-29 14:04:06 +0100802}
803
Jens Axboeb973cb72017-02-02 08:54:40 -0700804struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
Jens Axboed6d48192008-01-29 14:04:06 +0100805{
806 struct request *next = elv_latter_request(q, rq);
807
808 if (next)
809 return attempt_merge(q, rq, next);
810
Jens Axboeb973cb72017-02-02 08:54:40 -0700811 return NULL;
Jens Axboed6d48192008-01-29 14:04:06 +0100812}
813
Jens Axboeb973cb72017-02-02 08:54:40 -0700814struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
Jens Axboed6d48192008-01-29 14:04:06 +0100815{
816 struct request *prev = elv_former_request(q, rq);
817
818 if (prev)
819 return attempt_merge(q, prev, rq);
820
Jens Axboeb973cb72017-02-02 08:54:40 -0700821 return NULL;
Jens Axboed6d48192008-01-29 14:04:06 +0100822}
Jens Axboe5e84ea32011-03-21 10:14:27 +0100823
824int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
825 struct request *next)
826{
Jens Axboee4d750c2017-02-03 09:48:28 -0700827 struct request *free;
Tahsin Erdogan72ef7992016-07-07 11:48:22 -0700828
Jens Axboee4d750c2017-02-03 09:48:28 -0700829 free = attempt_merge(q, rq, next);
830 if (free) {
Jens Axboe92bc5a22018-10-24 13:52:28 -0600831 blk_put_request(free);
Jens Axboee4d750c2017-02-03 09:48:28 -0700832 return 1;
833 }
834
835 return 0;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100836}
Tejun Heo050c8ea2012-02-08 09:19:38 +0100837
838bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
839{
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400840 if (!rq_mergeable(rq) || !bio_mergeable(bio))
Tejun Heo050c8ea2012-02-08 09:19:38 +0100841 return false;
842
Christoph Hellwig288dab82016-06-09 16:00:36 +0200843 if (req_op(rq) != bio_op(bio))
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400844 return false;
845
Tejun Heo050c8ea2012-02-08 09:19:38 +0100846 /* different data direction or already started, don't merge */
847 if (bio_data_dir(bio) != rq_data_dir(rq))
848 return false;
849
Jens Axboe2081a562018-10-12 12:39:10 -0600850 /* must be same device */
851 if (rq->rq_disk != bio->bi_disk)
Tejun Heo050c8ea2012-02-08 09:19:38 +0100852 return false;
853
854 /* only merge integrity protected bio into ditto rq */
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -0400855 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
Tejun Heo050c8ea2012-02-08 09:19:38 +0100856 return false;
857
Satya Tangiralaa892c8d2020-05-14 00:37:18 +0000858 /* Only merge if the crypt contexts are compatible */
859 if (!bio_crypt_rq_ctx_compatible(rq, bio))
860 return false;
861
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400862 /* must be using the same buffer */
Mike Christie8fe0d472016-06-05 14:32:15 -0500863 if (req_op(rq) == REQ_OP_WRITE_SAME &&
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400864 !blk_write_same_mergeable(rq->bio, bio))
865 return false;
866
Jens Axboecb6934f2017-06-27 09:22:02 -0600867 /*
868 * Don't allow merge of different write hints, or for a hint with
869 * non-hint IO.
870 */
871 if (rq->write_hint != bio->bi_write_hint)
872 return false;
873
Damien Le Moal668ffc02018-11-20 10:52:37 +0900874 if (rq->ioprio != bio_prio(bio))
875 return false;
876
Tejun Heo050c8ea2012-02-08 09:19:38 +0100877 return true;
878}
879
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100880enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
Tejun Heo050c8ea2012-02-08 09:19:38 +0100881{
Jianchao Wang698404662018-10-27 19:52:14 +0800882 if (blk_discard_mergable(rq))
Christoph Hellwig1e739732017-02-08 14:46:49 +0100883 return ELEVATOR_DISCARD_MERGE;
884 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
Tejun Heo050c8ea2012-02-08 09:19:38 +0100885 return ELEVATOR_BACK_MERGE;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700886 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
Tejun Heo050c8ea2012-02-08 09:19:38 +0100887 return ELEVATOR_FRONT_MERGE;
888 return ELEVATOR_NO_MERGE;
889}