Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Functions related to segment and merge handling |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
Christoph Hellwig | fe45e63 | 2021-09-20 14:33:27 +0200 | [diff] [blame] | 9 | #include <linux/blk-integrity.h> |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 10 | #include <linux/scatterlist.h> |
| 11 | |
Mike Krinkin | cda2264 | 2015-12-03 17:32:30 +0300 | [diff] [blame] | 12 | #include <trace/events/block.h> |
| 13 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 14 | #include "blk.h" |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 15 | #include "blk-rq-qos.h" |
Jens Axboe | a7b36ee | 2021-10-05 09:11:56 -0600 | [diff] [blame] | 16 | #include "blk-throttle.h" |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 17 | |
Christoph Hellwig | ff18d77 | 2021-10-12 18:18:03 +0200 | [diff] [blame] | 18 | static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) |
| 19 | { |
| 20 | *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); |
| 21 | } |
| 22 | |
| 23 | static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) |
| 24 | { |
| 25 | struct bvec_iter iter = bio->bi_iter; |
| 26 | int idx; |
| 27 | |
| 28 | bio_get_first_bvec(bio, bv); |
| 29 | if (bv->bv_len == bio->bi_iter.bi_size) |
| 30 | return; /* this bio only has a single bvec */ |
| 31 | |
| 32 | bio_advance_iter(bio, &iter, iter.bi_size); |
| 33 | |
| 34 | if (!iter.bi_bvec_done) |
| 35 | idx = iter.bi_idx - 1; |
| 36 | else /* in the middle of bvec */ |
| 37 | idx = iter.bi_idx; |
| 38 | |
| 39 | *bv = bio->bi_io_vec[idx]; |
| 40 | |
| 41 | /* |
| 42 | * iter.bi_bvec_done records actual length of the last bvec |
| 43 | * if this bio ends in the middle of one io vector |
| 44 | */ |
| 45 | if (iter.bi_bvec_done) |
| 46 | bv->bv_len = iter.bi_bvec_done; |
| 47 | } |
| 48 | |
Christoph Hellwig | e990700 | 2018-09-24 09:43:48 +0200 | [diff] [blame] | 49 | static inline bool bio_will_gap(struct request_queue *q, |
| 50 | struct request *prev_rq, struct bio *prev, struct bio *next) |
| 51 | { |
| 52 | struct bio_vec pb, nb; |
| 53 | |
| 54 | if (!bio_has_data(prev) || !queue_virt_boundary(q)) |
| 55 | return false; |
| 56 | |
| 57 | /* |
| 58 | * Don't merge if the 1st bio starts with non-zero offset, otherwise it |
| 59 | * is quite difficult to respect the sg gap limit. We work hard to |
| 60 | * merge a huge number of small single bios in case of mkfs. |
| 61 | */ |
| 62 | if (prev_rq) |
| 63 | bio_get_first_bvec(prev_rq->bio, &pb); |
| 64 | else |
| 65 | bio_get_first_bvec(prev, &pb); |
Johannes Thumshirn | df376b2 | 2018-11-07 14:58:14 +0100 | [diff] [blame] | 66 | if (pb.bv_offset & queue_virt_boundary(q)) |
Christoph Hellwig | e990700 | 2018-09-24 09:43:48 +0200 | [diff] [blame] | 67 | return true; |
| 68 | |
| 69 | /* |
| 70 | * We don't need to worry about the situation that the merged segment |
| 71 | * ends in unaligned virt boundary: |
| 72 | * |
| 73 | * - if 'pb' ends aligned, the merged segment ends aligned |
| 74 | * - if 'pb' ends unaligned, the next bio must include |
| 75 | * one single bvec of 'nb', otherwise the 'nb' can't |
| 76 | * merge with 'pb' |
| 77 | */ |
| 78 | bio_get_last_bvec(prev, &pb); |
| 79 | bio_get_first_bvec(next, &nb); |
Christoph Hellwig | 200a9af | 2019-05-21 09:01:42 +0200 | [diff] [blame] | 80 | if (biovec_phys_mergeable(q, &pb, &nb)) |
Christoph Hellwig | e990700 | 2018-09-24 09:43:48 +0200 | [diff] [blame] | 81 | return false; |
| 82 | return __bvec_gap_to_prev(q, &pb, nb.bv_offset); |
| 83 | } |
| 84 | |
| 85 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) |
| 86 | { |
| 87 | return bio_will_gap(req->q, req, req->biotail, bio); |
| 88 | } |
| 89 | |
| 90 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) |
| 91 | { |
| 92 | return bio_will_gap(req->q, NULL, bio, req->bio); |
| 93 | } |
| 94 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 95 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
| 96 | struct bio *bio, |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 97 | struct bio_set *bs, |
| 98 | unsigned *nsegs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 99 | { |
| 100 | unsigned int max_discard_sectors, granularity; |
| 101 | int alignment; |
| 102 | sector_t tmp; |
| 103 | unsigned split_sectors; |
| 104 | |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 105 | *nsegs = 1; |
| 106 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 107 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
| 108 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
| 109 | |
Ming Lei | 1adfc5e | 2018-10-29 20:57:17 +0800 | [diff] [blame] | 110 | max_discard_sectors = min(q->limits.max_discard_sectors, |
| 111 | bio_allowed_max_sectors(q)); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 112 | max_discard_sectors -= max_discard_sectors % granularity; |
| 113 | |
| 114 | if (unlikely(!max_discard_sectors)) { |
| 115 | /* XXX: warn */ |
| 116 | return NULL; |
| 117 | } |
| 118 | |
| 119 | if (bio_sectors(bio) <= max_discard_sectors) |
| 120 | return NULL; |
| 121 | |
| 122 | split_sectors = max_discard_sectors; |
| 123 | |
| 124 | /* |
| 125 | * If the next starting sector would be misaligned, stop the discard at |
| 126 | * the previous aligned sector. |
| 127 | */ |
| 128 | alignment = (q->limits.discard_alignment >> 9) % granularity; |
| 129 | |
| 130 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; |
| 131 | tmp = sector_div(tmp, granularity); |
| 132 | |
| 133 | if (split_sectors > tmp) |
| 134 | split_sectors -= tmp; |
| 135 | |
| 136 | return bio_split(bio, split_sectors, GFP_NOIO, bs); |
| 137 | } |
| 138 | |
Christoph Hellwig | 885fa13 | 2017-04-05 19:21:01 +0200 | [diff] [blame] | 139 | static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, |
| 140 | struct bio *bio, struct bio_set *bs, unsigned *nsegs) |
| 141 | { |
Christoph Hellwig | d665e12 | 2019-07-03 05:24:35 -0700 | [diff] [blame] | 142 | *nsegs = 0; |
Christoph Hellwig | 885fa13 | 2017-04-05 19:21:01 +0200 | [diff] [blame] | 143 | |
| 144 | if (!q->limits.max_write_zeroes_sectors) |
| 145 | return NULL; |
| 146 | |
| 147 | if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) |
| 148 | return NULL; |
| 149 | |
| 150 | return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); |
| 151 | } |
| 152 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 153 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
| 154 | struct bio *bio, |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 155 | struct bio_set *bs, |
| 156 | unsigned *nsegs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 157 | { |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 158 | *nsegs = 1; |
| 159 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 160 | if (!q->limits.max_write_same_sectors) |
| 161 | return NULL; |
| 162 | |
| 163 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) |
| 164 | return NULL; |
| 165 | |
| 166 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); |
| 167 | } |
| 168 | |
Bart Van Assche | 9cc5169 | 2019-08-01 15:50:44 -0700 | [diff] [blame] | 169 | /* |
| 170 | * Return the maximum number of sectors from the start of a bio that may be |
| 171 | * submitted as a single request to a block device. If enough sectors remain, |
| 172 | * align the end to the physical block size. Otherwise align the end to the |
| 173 | * logical block size. This approach minimizes the number of non-aligned |
| 174 | * requests that are submitted to a block device if the start of a bio is not |
| 175 | * aligned to a physical block boundary. |
| 176 | */ |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 177 | static inline unsigned get_max_io_size(struct request_queue *q, |
| 178 | struct bio *bio) |
| 179 | { |
Mike Snitzer | 3ee16db | 2020-11-30 10:57:43 -0500 | [diff] [blame] | 180 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0); |
Bart Van Assche | 9cc5169 | 2019-08-01 15:50:44 -0700 | [diff] [blame] | 181 | unsigned max_sectors = sectors; |
| 182 | unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT; |
| 183 | unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT; |
| 184 | unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1); |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 185 | |
Bart Van Assche | 9cc5169 | 2019-08-01 15:50:44 -0700 | [diff] [blame] | 186 | max_sectors += start_offset; |
| 187 | max_sectors &= ~(pbs - 1); |
| 188 | if (max_sectors > start_offset) |
| 189 | return max_sectors - start_offset; |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 190 | |
Keith Busch | e4b469c | 2020-08-06 14:58:37 -0700 | [diff] [blame] | 191 | return sectors & ~(lbs - 1); |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 192 | } |
| 193 | |
Ming Lei | 429120f | 2019-12-29 10:32:30 +0800 | [diff] [blame] | 194 | static inline unsigned get_max_segment_size(const struct request_queue *q, |
| 195 | struct page *start_page, |
| 196 | unsigned long offset) |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 197 | { |
| 198 | unsigned long mask = queue_segment_boundary(q); |
| 199 | |
Ming Lei | 429120f | 2019-12-29 10:32:30 +0800 | [diff] [blame] | 200 | offset = mask & (page_to_phys(start_page) + offset); |
Ming Lei | 4a2f704 | 2020-01-11 20:57:43 +0800 | [diff] [blame] | 201 | |
| 202 | /* |
| 203 | * overflow may be triggered in case of zero page physical address |
| 204 | * on 32bit arch, use queue's max segment size when that happens. |
| 205 | */ |
| 206 | return min_not_zero(mask - offset + 1, |
| 207 | (unsigned long)queue_max_segment_size(q)); |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 208 | } |
| 209 | |
Bart Van Assche | 708b25b | 2019-08-01 15:50:43 -0700 | [diff] [blame] | 210 | /** |
| 211 | * bvec_split_segs - verify whether or not a bvec should be split in the middle |
| 212 | * @q: [in] request queue associated with the bio associated with @bv |
| 213 | * @bv: [in] bvec to examine |
| 214 | * @nsegs: [in,out] Number of segments in the bio being built. Incremented |
| 215 | * by the number of segments from @bv that may be appended to that |
| 216 | * bio without exceeding @max_segs |
| 217 | * @sectors: [in,out] Number of sectors in the bio being built. Incremented |
| 218 | * by the number of sectors from @bv that may be appended to that |
| 219 | * bio without exceeding @max_sectors |
| 220 | * @max_segs: [in] upper bound for *@nsegs |
| 221 | * @max_sectors: [in] upper bound for *@sectors |
| 222 | * |
| 223 | * When splitting a bio, it can happen that a bvec is encountered that is too |
| 224 | * big to fit in a single segment and hence that it has to be split in the |
| 225 | * middle. This function verifies whether or not that should happen. The value |
| 226 | * %true is returned if and only if appending the entire @bv to a bio with |
| 227 | * *@nsegs segments and *@sectors sectors would make that bio unacceptable for |
| 228 | * the block driver. |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 229 | */ |
Bart Van Assche | af2c68f | 2019-08-01 15:50:40 -0700 | [diff] [blame] | 230 | static bool bvec_split_segs(const struct request_queue *q, |
| 231 | const struct bio_vec *bv, unsigned *nsegs, |
Bart Van Assche | 708b25b | 2019-08-01 15:50:43 -0700 | [diff] [blame] | 232 | unsigned *sectors, unsigned max_segs, |
| 233 | unsigned max_sectors) |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 234 | { |
Bart Van Assche | 708b25b | 2019-08-01 15:50:43 -0700 | [diff] [blame] | 235 | unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9; |
| 236 | unsigned len = min(bv->bv_len, max_len); |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 237 | unsigned total_len = 0; |
Bart Van Assche | ff9811b | 2019-08-01 15:50:42 -0700 | [diff] [blame] | 238 | unsigned seg_size = 0; |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 239 | |
Bart Van Assche | ff9811b | 2019-08-01 15:50:42 -0700 | [diff] [blame] | 240 | while (len && *nsegs < max_segs) { |
Ming Lei | 429120f | 2019-12-29 10:32:30 +0800 | [diff] [blame] | 241 | seg_size = get_max_segment_size(q, bv->bv_page, |
| 242 | bv->bv_offset + total_len); |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 243 | seg_size = min(seg_size, len); |
| 244 | |
Bart Van Assche | ff9811b | 2019-08-01 15:50:42 -0700 | [diff] [blame] | 245 | (*nsegs)++; |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 246 | total_len += seg_size; |
| 247 | len -= seg_size; |
| 248 | |
| 249 | if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) |
| 250 | break; |
| 251 | } |
| 252 | |
Bart Van Assche | ff9811b | 2019-08-01 15:50:42 -0700 | [diff] [blame] | 253 | *sectors += total_len >> 9; |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 254 | |
Bart Van Assche | 708b25b | 2019-08-01 15:50:43 -0700 | [diff] [blame] | 255 | /* tell the caller to split the bvec if it is too big to fit */ |
| 256 | return len > 0 || bv->bv_len > max_len; |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 257 | } |
| 258 | |
Bart Van Assche | dad7758 | 2019-08-01 15:50:41 -0700 | [diff] [blame] | 259 | /** |
| 260 | * blk_bio_segment_split - split a bio in two bios |
| 261 | * @q: [in] request queue pointer |
| 262 | * @bio: [in] bio to be split |
| 263 | * @bs: [in] bio set to allocate the clone from |
| 264 | * @segs: [out] number of segments in the bio with the first half of the sectors |
| 265 | * |
| 266 | * Clone @bio, update the bi_iter of the clone to represent the first sectors |
| 267 | * of @bio and update @bio->bi_iter to represent the remaining sectors. The |
| 268 | * following is guaranteed for the cloned bio: |
| 269 | * - That it has at most get_max_io_size(@q, @bio) sectors. |
| 270 | * - That it has at most queue_max_segments(@q) segments. |
| 271 | * |
| 272 | * Except for discard requests the cloned bio will point at the bi_io_vec of |
| 273 | * the original bio. It is the responsibility of the caller to ensure that the |
| 274 | * original bio is not freed before the cloned bio. The caller is also |
| 275 | * responsible for ensuring that @bs is only destroyed after processing of the |
| 276 | * split bio has finished. |
| 277 | */ |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 278 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
| 279 | struct bio *bio, |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 280 | struct bio_set *bs, |
| 281 | unsigned *segs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 282 | { |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 283 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 284 | struct bvec_iter iter; |
Christoph Hellwig | 6869875 | 2019-05-21 09:01:43 +0200 | [diff] [blame] | 285 | unsigned nsegs = 0, sectors = 0; |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 286 | const unsigned max_sectors = get_max_io_size(q, bio); |
Ming Lei | 05b700b | 2019-03-03 21:17:48 +0800 | [diff] [blame] | 287 | const unsigned max_segs = queue_max_segments(q); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 288 | |
Ming Lei | dcebd75 | 2019-02-15 19:13:12 +0800 | [diff] [blame] | 289 | bio_for_each_bvec(bv, bio, iter) { |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 290 | /* |
| 291 | * If the queue doesn't support SG gaps and adding this |
| 292 | * offset would create a gap, disallow it. |
| 293 | */ |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 294 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 295 | goto split; |
| 296 | |
Bart Van Assche | 708b25b | 2019-08-01 15:50:43 -0700 | [diff] [blame] | 297 | if (nsegs < max_segs && |
| 298 | sectors + (bv.bv_len >> 9) <= max_sectors && |
| 299 | bv.bv_offset + bv.bv_len <= PAGE_SIZE) { |
| 300 | nsegs++; |
| 301 | sectors += bv.bv_len >> 9; |
| 302 | } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs, |
| 303 | max_sectors)) { |
Ming Lei | cf8c0c6 | 2017-12-18 20:22:16 +0800 | [diff] [blame] | 304 | goto split; |
Keith Busch | e36f620 | 2016-01-12 15:08:39 -0700 | [diff] [blame] | 305 | } |
| 306 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 307 | bvprv = bv; |
Ming Lei | 578270b | 2015-11-24 10:35:29 +0800 | [diff] [blame] | 308 | bvprvp = &bvprv; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 309 | } |
| 310 | |
Christoph Hellwig | d627065 | 2019-06-06 12:29:03 +0200 | [diff] [blame] | 311 | *segs = nsegs; |
| 312 | return NULL; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 313 | split: |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 314 | *segs = nsegs; |
Jeffle Xu | cc29e1b | 2020-11-26 17:18:52 +0800 | [diff] [blame] | 315 | |
| 316 | /* |
| 317 | * Bio splitting may cause subtle trouble such as hang when doing sync |
| 318 | * iopoll in direct IO routine. Given performance gain of iopoll for |
| 319 | * big IO can be trival, disable iopoll when split needed. |
| 320 | */ |
Christoph Hellwig | 6ce913f | 2021-10-12 13:12:21 +0200 | [diff] [blame] | 321 | bio_clear_polled(bio); |
Christoph Hellwig | d627065 | 2019-06-06 12:29:03 +0200 | [diff] [blame] | 322 | return bio_split(bio, sectors, GFP_NOIO, bs); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 323 | } |
| 324 | |
Bart Van Assche | dad7758 | 2019-08-01 15:50:41 -0700 | [diff] [blame] | 325 | /** |
| 326 | * __blk_queue_split - split a bio and submit the second half |
Jens Axboe | abd45c1 | 2021-10-13 12:43:41 -0600 | [diff] [blame] | 327 | * @q: [in] request_queue new bio is being queued at |
Bart Van Assche | dad7758 | 2019-08-01 15:50:41 -0700 | [diff] [blame] | 328 | * @bio: [in, out] bio to be split |
| 329 | * @nr_segs: [out] number of segments in the first bio |
| 330 | * |
| 331 | * Split a bio into two bios, chain the two bios, submit the second half and |
| 332 | * store a pointer to the first half in *@bio. If the second bio is still too |
| 333 | * big it will be split by a recursive call to this function. Since this |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 334 | * function may allocate a new bio from q->bio_split, it is the responsibility |
| 335 | * of the caller to ensure that q->bio_split is only released after processing |
| 336 | * of the split bio has finished. |
Bart Van Assche | dad7758 | 2019-08-01 15:50:41 -0700 | [diff] [blame] | 337 | */ |
Jens Axboe | abd45c1 | 2021-10-13 12:43:41 -0600 | [diff] [blame] | 338 | void __blk_queue_split(struct request_queue *q, struct bio **bio, |
| 339 | unsigned int *nr_segs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 340 | { |
Christoph Hellwig | fa53228 | 2019-11-04 10:05:43 -0800 | [diff] [blame] | 341 | struct bio *split = NULL; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 342 | |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 343 | switch (bio_op(*bio)) { |
| 344 | case REQ_OP_DISCARD: |
| 345 | case REQ_OP_SECURE_ERASE: |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 346 | split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 347 | break; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 348 | case REQ_OP_WRITE_ZEROES: |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 349 | split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, |
| 350 | nr_segs); |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 351 | break; |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 352 | case REQ_OP_WRITE_SAME: |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 353 | split = blk_bio_write_same_split(q, *bio, &q->bio_split, |
| 354 | nr_segs); |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 355 | break; |
| 356 | default: |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 357 | split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 358 | break; |
| 359 | } |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 360 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 361 | if (split) { |
Ming Lei | 6ac45ae | 2015-10-20 23:13:53 +0800 | [diff] [blame] | 362 | /* there isn't chance to merge the splitted bio */ |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 363 | split->bi_opf |= REQ_NOMERGE; |
Ming Lei | 6ac45ae | 2015-10-20 23:13:53 +0800 | [diff] [blame] | 364 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 365 | bio_chain(split, *bio); |
Christoph Hellwig | eb6f7f7 | 2020-12-03 17:21:37 +0100 | [diff] [blame] | 366 | trace_block_split(split, (*bio)->bi_iter.bi_sector); |
Christoph Hellwig | ed00aab | 2020-07-01 10:59:44 +0200 | [diff] [blame] | 367 | submit_bio_noacct(*bio); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 368 | *bio = split; |
Chunguang Xu | 4f1e963 | 2021-08-02 11:51:56 +0800 | [diff] [blame] | 369 | |
| 370 | blk_throtl_charge_bio_split(*bio); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 371 | } |
| 372 | } |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 373 | |
Bart Van Assche | dad7758 | 2019-08-01 15:50:41 -0700 | [diff] [blame] | 374 | /** |
| 375 | * blk_queue_split - split a bio and submit the second half |
Bart Van Assche | dad7758 | 2019-08-01 15:50:41 -0700 | [diff] [blame] | 376 | * @bio: [in, out] bio to be split |
| 377 | * |
| 378 | * Split a bio into two bios, chains the two bios, submit the second half and |
| 379 | * store a pointer to the first half in *@bio. Since this function may allocate |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 380 | * a new bio from q->bio_split, it is the responsibility of the caller to ensure |
| 381 | * that q->bio_split is only released after processing of the split bio has |
| 382 | * finished. |
Bart Van Assche | dad7758 | 2019-08-01 15:50:41 -0700 | [diff] [blame] | 383 | */ |
Christoph Hellwig | f695ca3 | 2020-07-01 10:59:39 +0200 | [diff] [blame] | 384 | void blk_queue_split(struct bio **bio) |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 385 | { |
Pavel Begunkov | 859897c | 2021-10-19 22:24:11 +0100 | [diff] [blame] | 386 | struct request_queue *q = bdev_get_queue((*bio)->bi_bdev); |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 387 | unsigned int nr_segs; |
| 388 | |
Jens Axboe | abd45c1 | 2021-10-13 12:43:41 -0600 | [diff] [blame] | 389 | if (blk_may_split(q, *bio)) |
| 390 | __blk_queue_split(q, bio, &nr_segs); |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 391 | } |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 392 | EXPORT_SYMBOL(blk_queue_split); |
| 393 | |
Christoph Hellwig | e9cd19c | 2019-06-06 12:29:02 +0200 | [diff] [blame] | 394 | unsigned int blk_recalc_rq_segments(struct request *rq) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 395 | { |
Christoph Hellwig | 6869875 | 2019-05-21 09:01:43 +0200 | [diff] [blame] | 396 | unsigned int nr_phys_segs = 0; |
Bart Van Assche | ff9811b | 2019-08-01 15:50:42 -0700 | [diff] [blame] | 397 | unsigned int nr_sectors = 0; |
Christoph Hellwig | e9cd19c | 2019-06-06 12:29:02 +0200 | [diff] [blame] | 398 | struct req_iterator iter; |
Christoph Hellwig | 6869875 | 2019-05-21 09:01:43 +0200 | [diff] [blame] | 399 | struct bio_vec bv; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 400 | |
Christoph Hellwig | e9cd19c | 2019-06-06 12:29:02 +0200 | [diff] [blame] | 401 | if (!rq->bio) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 402 | return 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 403 | |
Christoph Hellwig | e9cd19c | 2019-06-06 12:29:02 +0200 | [diff] [blame] | 404 | switch (bio_op(rq->bio)) { |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 405 | case REQ_OP_DISCARD: |
| 406 | case REQ_OP_SECURE_ERASE: |
David Jeffery | a958937f | 2021-02-11 09:38:07 -0500 | [diff] [blame] | 407 | if (queue_max_discard_segments(rq->q) > 1) { |
| 408 | struct bio *bio = rq->bio; |
| 409 | |
| 410 | for_each_bio(bio) |
| 411 | nr_phys_segs++; |
| 412 | return nr_phys_segs; |
| 413 | } |
| 414 | return 1; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 415 | case REQ_OP_WRITE_ZEROES: |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 416 | return 0; |
| 417 | case REQ_OP_WRITE_SAME: |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 418 | return 1; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 419 | } |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 420 | |
Christoph Hellwig | e9cd19c | 2019-06-06 12:29:02 +0200 | [diff] [blame] | 421 | rq_for_each_bvec(bv, rq, iter) |
Bart Van Assche | ff9811b | 2019-08-01 15:50:42 -0700 | [diff] [blame] | 422 | bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors, |
Bart Van Assche | 708b25b | 2019-08-01 15:50:43 -0700 | [diff] [blame] | 423 | UINT_MAX, UINT_MAX); |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 424 | return nr_phys_segs; |
| 425 | } |
| 426 | |
Ming Lei | 48d7727 | 2019-02-27 20:40:11 +0800 | [diff] [blame] | 427 | static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, |
Ming Lei | 862e5a5 | 2019-02-15 19:13:13 +0800 | [diff] [blame] | 428 | struct scatterlist *sglist) |
| 429 | { |
| 430 | if (!*sg) |
| 431 | return sglist; |
| 432 | |
| 433 | /* |
| 434 | * If the driver previously mapped a shorter list, we could see a |
| 435 | * termination bit prematurely unless it fully inits the sg table |
| 436 | * on each mapping. We KNOW that there must be more entries here |
| 437 | * or the driver would be buggy, so force clear the termination bit |
| 438 | * to avoid doing a full sg_init_table() in drivers for each command. |
| 439 | */ |
| 440 | sg_unmark_end(*sg); |
| 441 | return sg_next(*sg); |
| 442 | } |
| 443 | |
| 444 | static unsigned blk_bvec_map_sg(struct request_queue *q, |
| 445 | struct bio_vec *bvec, struct scatterlist *sglist, |
| 446 | struct scatterlist **sg) |
| 447 | { |
| 448 | unsigned nbytes = bvec->bv_len; |
Christoph Hellwig | 8a96a0e | 2019-04-11 08:23:27 +0200 | [diff] [blame] | 449 | unsigned nsegs = 0, total = 0; |
Ming Lei | 862e5a5 | 2019-02-15 19:13:13 +0800 | [diff] [blame] | 450 | |
| 451 | while (nbytes > 0) { |
Christoph Hellwig | 8a96a0e | 2019-04-11 08:23:27 +0200 | [diff] [blame] | 452 | unsigned offset = bvec->bv_offset + total; |
Ming Lei | 429120f | 2019-12-29 10:32:30 +0800 | [diff] [blame] | 453 | unsigned len = min(get_max_segment_size(q, bvec->bv_page, |
| 454 | offset), nbytes); |
Christoph Hellwig | f9f7687 | 2019-04-19 08:56:24 +0200 | [diff] [blame] | 455 | struct page *page = bvec->bv_page; |
| 456 | |
| 457 | /* |
| 458 | * Unfortunately a fair number of drivers barf on scatterlists |
| 459 | * that have an offset larger than PAGE_SIZE, despite other |
| 460 | * subsystems dealing with that invariant just fine. For now |
| 461 | * stick to the legacy format where we never present those from |
| 462 | * the block layer, but the code below should be removed once |
| 463 | * these offenders (mostly MMC/SD drivers) are fixed. |
| 464 | */ |
| 465 | page += (offset >> PAGE_SHIFT); |
| 466 | offset &= ~PAGE_MASK; |
Ming Lei | 862e5a5 | 2019-02-15 19:13:13 +0800 | [diff] [blame] | 467 | |
| 468 | *sg = blk_next_sg(sg, sglist); |
Christoph Hellwig | f9f7687 | 2019-04-19 08:56:24 +0200 | [diff] [blame] | 469 | sg_set_page(*sg, page, len, offset); |
Ming Lei | 862e5a5 | 2019-02-15 19:13:13 +0800 | [diff] [blame] | 470 | |
Christoph Hellwig | 8a96a0e | 2019-04-11 08:23:27 +0200 | [diff] [blame] | 471 | total += len; |
| 472 | nbytes -= len; |
Ming Lei | 862e5a5 | 2019-02-15 19:13:13 +0800 | [diff] [blame] | 473 | nsegs++; |
| 474 | } |
| 475 | |
| 476 | return nsegs; |
| 477 | } |
| 478 | |
Ming Lei | 16e3e41 | 2019-03-17 18:01:11 +0800 | [diff] [blame] | 479 | static inline int __blk_bvec_map_sg(struct bio_vec bv, |
| 480 | struct scatterlist *sglist, struct scatterlist **sg) |
| 481 | { |
| 482 | *sg = blk_next_sg(sg, sglist); |
| 483 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); |
| 484 | return 1; |
| 485 | } |
| 486 | |
Ming Lei | f6970f8 | 2019-03-17 18:01:12 +0800 | [diff] [blame] | 487 | /* only try to merge bvecs into one sg if they are from two bios */ |
| 488 | static inline bool |
| 489 | __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, |
| 490 | struct bio_vec *bvprv, struct scatterlist **sg) |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 491 | { |
| 492 | |
| 493 | int nbytes = bvec->bv_len; |
| 494 | |
Ming Lei | f6970f8 | 2019-03-17 18:01:12 +0800 | [diff] [blame] | 495 | if (!*sg) |
| 496 | return false; |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 497 | |
Ming Lei | f6970f8 | 2019-03-17 18:01:12 +0800 | [diff] [blame] | 498 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
| 499 | return false; |
| 500 | |
| 501 | if (!biovec_phys_mergeable(q, bvprv, bvec)) |
| 502 | return false; |
| 503 | |
| 504 | (*sg)->length += nbytes; |
| 505 | |
| 506 | return true; |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 507 | } |
| 508 | |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 509 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
| 510 | struct scatterlist *sglist, |
| 511 | struct scatterlist **sg) |
| 512 | { |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 513 | struct bio_vec bvec, bvprv = { NULL }; |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 514 | struct bvec_iter iter; |
Christoph Hellwig | 3841746 | 2018-12-13 16:17:10 +0100 | [diff] [blame] | 515 | int nsegs = 0; |
Ming Lei | f6970f8 | 2019-03-17 18:01:12 +0800 | [diff] [blame] | 516 | bool new_bio = false; |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 517 | |
Ming Lei | f6970f8 | 2019-03-17 18:01:12 +0800 | [diff] [blame] | 518 | for_each_bio(bio) { |
| 519 | bio_for_each_bvec(bvec, bio, iter) { |
| 520 | /* |
| 521 | * Only try to merge bvecs from two bios given we |
| 522 | * have done bio internal merge when adding pages |
| 523 | * to bio |
| 524 | */ |
| 525 | if (new_bio && |
| 526 | __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) |
| 527 | goto next_bvec; |
| 528 | |
| 529 | if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) |
| 530 | nsegs += __blk_bvec_map_sg(bvec, sglist, sg); |
| 531 | else |
| 532 | nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); |
| 533 | next_bvec: |
| 534 | new_bio = false; |
| 535 | } |
Ming Lei | b21e11c | 2019-04-02 10:26:44 +0800 | [diff] [blame] | 536 | if (likely(bio->bi_iter.bi_size)) { |
| 537 | bvprv = bvec; |
| 538 | new_bio = true; |
| 539 | } |
Ming Lei | f6970f8 | 2019-03-17 18:01:12 +0800 | [diff] [blame] | 540 | } |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 541 | |
| 542 | return nsegs; |
| 543 | } |
| 544 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 545 | /* |
| 546 | * map a request to scatterlist, return number of sg entries setup. Caller |
| 547 | * must make sure sg can hold rq->nr_phys_segments entries |
| 548 | */ |
Christoph Hellwig | 89de150 | 2020-04-14 09:42:22 +0200 | [diff] [blame] | 549 | int __blk_rq_map_sg(struct request_queue *q, struct request *rq, |
| 550 | struct scatterlist *sglist, struct scatterlist **last_sg) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 551 | { |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 552 | int nsegs = 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 553 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 554 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
Christoph Hellwig | 89de150 | 2020-04-14 09:42:22 +0200 | [diff] [blame] | 555 | nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg); |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 556 | else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) |
Christoph Hellwig | 89de150 | 2020-04-14 09:42:22 +0200 | [diff] [blame] | 557 | nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg); |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 558 | else if (rq->bio) |
Christoph Hellwig | 89de150 | 2020-04-14 09:42:22 +0200 | [diff] [blame] | 559 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 560 | |
Christoph Hellwig | 89de150 | 2020-04-14 09:42:22 +0200 | [diff] [blame] | 561 | if (*last_sg) |
| 562 | sg_mark_end(*last_sg); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 563 | |
Ming Lei | 12e57f5 | 2015-11-24 10:35:31 +0800 | [diff] [blame] | 564 | /* |
| 565 | * Something must have been wrong if the figured number of |
| 566 | * segment is bigger than number of req's physical segments |
| 567 | */ |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 568 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); |
Ming Lei | 12e57f5 | 2015-11-24 10:35:31 +0800 | [diff] [blame] | 569 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 570 | return nsegs; |
| 571 | } |
Christoph Hellwig | 89de150 | 2020-04-14 09:42:22 +0200 | [diff] [blame] | 572 | EXPORT_SYMBOL(__blk_rq_map_sg); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 573 | |
Ming Lei | 943b40c | 2020-08-17 17:52:39 +0800 | [diff] [blame] | 574 | static inline unsigned int blk_rq_get_max_segments(struct request *rq) |
| 575 | { |
| 576 | if (req_op(rq) == REQ_OP_DISCARD) |
| 577 | return queue_max_discard_segments(rq->q); |
| 578 | return queue_max_segments(rq->q); |
| 579 | } |
| 580 | |
Christoph Hellwig | badf7f6 | 2021-09-20 14:33:26 +0200 | [diff] [blame] | 581 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq, |
| 582 | sector_t offset) |
| 583 | { |
| 584 | struct request_queue *q = rq->q; |
| 585 | |
| 586 | if (blk_rq_is_passthrough(rq)) |
| 587 | return q->limits.max_hw_sectors; |
| 588 | |
| 589 | if (!q->limits.chunk_sectors || |
| 590 | req_op(rq) == REQ_OP_DISCARD || |
| 591 | req_op(rq) == REQ_OP_SECURE_ERASE) |
| 592 | return blk_queue_get_max_sectors(q, req_op(rq)); |
| 593 | |
| 594 | return min(blk_max_size_offset(q, offset, 0), |
| 595 | blk_queue_get_max_sectors(q, req_op(rq))); |
| 596 | } |
| 597 | |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 598 | static inline int ll_new_hw_segment(struct request *req, struct bio *bio, |
| 599 | unsigned int nr_phys_segs) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 600 | { |
Ming Lei | 2705dfb | 2021-06-28 10:33:12 +0800 | [diff] [blame] | 601 | if (blk_integrity_merge_bio(req->q, req, bio) == false) |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 602 | goto no_merge; |
| 603 | |
Ming Lei | 2705dfb | 2021-06-28 10:33:12 +0800 | [diff] [blame] | 604 | /* discard request merge won't add new segment */ |
| 605 | if (req_op(req) == REQ_OP_DISCARD) |
| 606 | return 1; |
| 607 | |
| 608 | if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 609 | goto no_merge; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 610 | |
| 611 | /* |
| 612 | * This will form the start of a new hw segment. Bump both |
| 613 | * counters. |
| 614 | */ |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 615 | req->nr_phys_segments += nr_phys_segs; |
| 616 | return 1; |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 617 | |
| 618 | no_merge: |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 619 | req_set_nomerge(req->q, req); |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 620 | return 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 621 | } |
| 622 | |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 623 | int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 624 | { |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 625 | if (req_gap_back_merge(req, bio)) |
| 626 | return 0; |
Sagi Grimberg | 7f39add | 2015-09-11 09:03:04 -0600 | [diff] [blame] | 627 | if (blk_integrity_rq(req) && |
| 628 | integrity_req_gap_back_merge(req, bio)) |
| 629 | return 0; |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 630 | if (!bio_crypt_ctx_back_mergeable(req, bio)) |
| 631 | return 0; |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 632 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
Damien Le Moal | 17007f3 | 2016-07-20 21:40:47 -0600 | [diff] [blame] | 633 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 634 | req_set_nomerge(req->q, req); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 635 | return 0; |
| 636 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 637 | |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 638 | return ll_new_hw_segment(req, bio, nr_segs); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 639 | } |
| 640 | |
Christoph Hellwig | eda5cc9 | 2020-10-06 09:07:19 +0200 | [diff] [blame] | 641 | static int ll_front_merge_fn(struct request *req, struct bio *bio, |
| 642 | unsigned int nr_segs) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 643 | { |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 644 | if (req_gap_front_merge(req, bio)) |
| 645 | return 0; |
Sagi Grimberg | 7f39add | 2015-09-11 09:03:04 -0600 | [diff] [blame] | 646 | if (blk_integrity_rq(req) && |
| 647 | integrity_req_gap_front_merge(req, bio)) |
| 648 | return 0; |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 649 | if (!bio_crypt_ctx_front_mergeable(req, bio)) |
| 650 | return 0; |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 651 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
Damien Le Moal | 17007f3 | 2016-07-20 21:40:47 -0600 | [diff] [blame] | 652 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 653 | req_set_nomerge(req->q, req); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 654 | return 0; |
| 655 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 656 | |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 657 | return ll_new_hw_segment(req, bio, nr_segs); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 658 | } |
| 659 | |
Jens Axboe | 445251d | 2018-02-01 14:01:02 -0700 | [diff] [blame] | 660 | static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, |
| 661 | struct request *next) |
| 662 | { |
| 663 | unsigned short segments = blk_rq_nr_discard_segments(req); |
| 664 | |
| 665 | if (segments >= queue_max_discard_segments(q)) |
| 666 | goto no_merge; |
| 667 | if (blk_rq_sectors(req) + bio_sectors(next->bio) > |
| 668 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
| 669 | goto no_merge; |
| 670 | |
| 671 | req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); |
| 672 | return true; |
| 673 | no_merge: |
| 674 | req_set_nomerge(q, req); |
| 675 | return false; |
| 676 | } |
| 677 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 678 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
| 679 | struct request *next) |
| 680 | { |
| 681 | int total_phys_segments; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 682 | |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 683 | if (req_gap_back_merge(req, next->bio)) |
Keith Busch | 854fbb9 | 2015-02-11 08:20:13 -0700 | [diff] [blame] | 684 | return 0; |
| 685 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 686 | /* |
| 687 | * Will it become too large? |
| 688 | */ |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 689 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
Damien Le Moal | 17007f3 | 2016-07-20 21:40:47 -0600 | [diff] [blame] | 690 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 691 | return 0; |
| 692 | |
| 693 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
Ming Lei | 943b40c | 2020-08-17 17:52:39 +0800 | [diff] [blame] | 694 | if (total_phys_segments > blk_rq_get_max_segments(req)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 695 | return 0; |
| 696 | |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 697 | if (blk_integrity_merge_rq(q, req, next) == false) |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 698 | return 0; |
| 699 | |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 700 | if (!bio_crypt_ctx_merge_rq(req, next)) |
| 701 | return 0; |
| 702 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 703 | /* Merge is OK... */ |
| 704 | req->nr_phys_segments = total_phys_segments; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 705 | return 1; |
| 706 | } |
| 707 | |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 708 | /** |
| 709 | * blk_rq_set_mixed_merge - mark a request as mixed merge |
| 710 | * @rq: request to mark as mixed merge |
| 711 | * |
| 712 | * Description: |
| 713 | * @rq is about to be mixed merged. Make sure the attributes |
| 714 | * which can be mixed are set in each bio and mark @rq as mixed |
| 715 | * merged. |
| 716 | */ |
| 717 | void blk_rq_set_mixed_merge(struct request *rq) |
| 718 | { |
| 719 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
| 720 | struct bio *bio; |
| 721 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 722 | if (rq->rq_flags & RQF_MIXED_MERGE) |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 723 | return; |
| 724 | |
| 725 | /* |
| 726 | * @rq will no longer represent mixable attributes for all the |
| 727 | * contained bios. It will just track those of the first one. |
| 728 | * Distributes the attributs to each bio. |
| 729 | */ |
| 730 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 731 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
| 732 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); |
| 733 | bio->bi_opf |= ff; |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 734 | } |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 735 | rq->rq_flags |= RQF_MIXED_MERGE; |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 736 | } |
| 737 | |
Konstantin Khlebnikov | b9c54f5 | 2020-05-27 07:24:15 +0200 | [diff] [blame] | 738 | static void blk_account_io_merge_request(struct request *req) |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 739 | { |
| 740 | if (blk_do_io_stat(req)) { |
Mike Snitzer | 112f158 | 2018-12-06 11:41:18 -0500 | [diff] [blame] | 741 | part_stat_lock(); |
Konstantin Khlebnikov | b9c54f5 | 2020-05-27 07:24:15 +0200 | [diff] [blame] | 742 | part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 743 | part_stat_unlock(); |
| 744 | } |
| 745 | } |
Konstantin Khlebnikov | b9c54f5 | 2020-05-27 07:24:15 +0200 | [diff] [blame] | 746 | |
Eric Biggers | e96c0d8 | 2018-11-14 17:19:46 -0800 | [diff] [blame] | 747 | static enum elv_merge blk_try_req_merge(struct request *req, |
| 748 | struct request *next) |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 749 | { |
| 750 | if (blk_discard_mergable(req)) |
| 751 | return ELEVATOR_DISCARD_MERGE; |
| 752 | else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) |
| 753 | return ELEVATOR_BACK_MERGE; |
| 754 | |
| 755 | return ELEVATOR_NO_MERGE; |
| 756 | } |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 757 | |
Christoph Hellwig | badf7f6 | 2021-09-20 14:33:26 +0200 | [diff] [blame] | 758 | static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) |
| 759 | { |
| 760 | if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b)) |
| 761 | return true; |
| 762 | return false; |
| 763 | } |
| 764 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 765 | /* |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 766 | * For non-mq, this has to be called with the request spinlock acquired. |
| 767 | * For mq with scheduling, the appropriate queue wide lock should be held. |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 768 | */ |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 769 | static struct request *attempt_merge(struct request_queue *q, |
| 770 | struct request *req, struct request *next) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 771 | { |
| 772 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 773 | return NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 774 | |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 775 | if (req_op(req) != req_op(next)) |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 776 | return NULL; |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 777 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 778 | if (rq_data_dir(req) != rq_data_dir(next) |
Jens Axboe | 2081a56 | 2018-10-12 12:39:10 -0600 | [diff] [blame] | 779 | || req->rq_disk != next->rq_disk) |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 780 | return NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 781 | |
Mike Christie | 8fe0d47 | 2016-06-05 14:32:15 -0500 | [diff] [blame] | 782 | if (req_op(req) == REQ_OP_WRITE_SAME && |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 783 | !blk_write_same_mergeable(req->bio, next->bio)) |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 784 | return NULL; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 785 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 786 | /* |
Jens Axboe | cb6934f | 2017-06-27 09:22:02 -0600 | [diff] [blame] | 787 | * Don't allow merge of different write hints, or for a hint with |
| 788 | * non-hint IO. |
| 789 | */ |
| 790 | if (req->write_hint != next->write_hint) |
| 791 | return NULL; |
| 792 | |
Damien Le Moal | 668ffc0 | 2018-11-20 10:52:37 +0900 | [diff] [blame] | 793 | if (req->ioprio != next->ioprio) |
| 794 | return NULL; |
| 795 | |
Jens Axboe | cb6934f | 2017-06-27 09:22:02 -0600 | [diff] [blame] | 796 | /* |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 797 | * If we are allowed to merge, then append bio list |
| 798 | * from next to rq and release next. merge_requests_fn |
| 799 | * will have updated segment counts, update sector |
Jens Axboe | 445251d | 2018-02-01 14:01:02 -0700 | [diff] [blame] | 800 | * counts here. Handle DISCARDs separately, as they |
| 801 | * have separate settings. |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 802 | */ |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 803 | |
| 804 | switch (blk_try_req_merge(req, next)) { |
| 805 | case ELEVATOR_DISCARD_MERGE: |
Jens Axboe | 445251d | 2018-02-01 14:01:02 -0700 | [diff] [blame] | 806 | if (!req_attempt_discard_merge(q, req, next)) |
| 807 | return NULL; |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 808 | break; |
| 809 | case ELEVATOR_BACK_MERGE: |
| 810 | if (!ll_merge_requests_fn(q, req, next)) |
| 811 | return NULL; |
| 812 | break; |
| 813 | default: |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 814 | return NULL; |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 815 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 816 | |
| 817 | /* |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 818 | * If failfast settings disagree or any of the two is already |
| 819 | * a mixed merge, mark both as mixed before proceeding. This |
| 820 | * makes sure that all involved bios have mixable attributes |
| 821 | * set properly. |
| 822 | */ |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 823 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 824 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
| 825 | (next->cmd_flags & REQ_FAILFAST_MASK)) { |
| 826 | blk_rq_set_mixed_merge(req); |
| 827 | blk_rq_set_mixed_merge(next); |
| 828 | } |
| 829 | |
| 830 | /* |
Omar Sandoval | 522a777 | 2018-05-09 02:08:53 -0700 | [diff] [blame] | 831 | * At this point we have either done a back merge or front merge. We |
| 832 | * need the smaller start_time_ns of the merged requests to be the |
| 833 | * current request for accounting purposes. |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 834 | */ |
Omar Sandoval | 522a777 | 2018-05-09 02:08:53 -0700 | [diff] [blame] | 835 | if (next->start_time_ns < req->start_time_ns) |
| 836 | req->start_time_ns = next->start_time_ns; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 837 | |
| 838 | req->biotail->bi_next = next->bio; |
| 839 | req->biotail = next->biotail; |
| 840 | |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 841 | req->__data_len += blk_rq_bytes(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 842 | |
Ming Lei | 2a5cf35 | 2018-12-01 00:38:18 +0800 | [diff] [blame] | 843 | if (!blk_discard_mergable(req)) |
Jens Axboe | 445251d | 2018-02-01 14:01:02 -0700 | [diff] [blame] | 844 | elv_merge_requests(q, req, next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 845 | |
Jerome Marchand | 42dad76 | 2009-04-22 14:01:49 +0200 | [diff] [blame] | 846 | /* |
| 847 | * 'next' is going away, so update stats accordingly |
| 848 | */ |
Konstantin Khlebnikov | b9c54f5 | 2020-05-27 07:24:15 +0200 | [diff] [blame] | 849 | blk_account_io_merge_request(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 850 | |
Christoph Hellwig | a54895f | 2020-12-03 17:21:39 +0100 | [diff] [blame] | 851 | trace_block_rq_merge(next); |
Jan Kara | f3bdc62 | 2020-06-17 15:58:23 +0200 | [diff] [blame] | 852 | |
Jens Axboe | e4d750c | 2017-02-03 09:48:28 -0700 | [diff] [blame] | 853 | /* |
| 854 | * ownership of bio passed from next to req, return 'next' for |
| 855 | * the caller to free |
| 856 | */ |
Boaz Harrosh | 1cd96c2 | 2009-03-24 12:35:07 +0100 | [diff] [blame] | 857 | next->bio = NULL; |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 858 | return next; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 859 | } |
| 860 | |
Christoph Hellwig | eda5cc9 | 2020-10-06 09:07:19 +0200 | [diff] [blame] | 861 | static struct request *attempt_back_merge(struct request_queue *q, |
| 862 | struct request *rq) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 863 | { |
| 864 | struct request *next = elv_latter_request(q, rq); |
| 865 | |
| 866 | if (next) |
| 867 | return attempt_merge(q, rq, next); |
| 868 | |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 869 | return NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 870 | } |
| 871 | |
Christoph Hellwig | eda5cc9 | 2020-10-06 09:07:19 +0200 | [diff] [blame] | 872 | static struct request *attempt_front_merge(struct request_queue *q, |
| 873 | struct request *rq) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 874 | { |
| 875 | struct request *prev = elv_former_request(q, rq); |
| 876 | |
| 877 | if (prev) |
| 878 | return attempt_merge(q, prev, rq); |
| 879 | |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 880 | return NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 881 | } |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 882 | |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 883 | /* |
| 884 | * Try to merge 'next' into 'rq'. Return true if the merge happened, false |
| 885 | * otherwise. The caller is responsible for freeing 'next' if the merge |
| 886 | * happened. |
| 887 | */ |
| 888 | bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
| 889 | struct request *next) |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 890 | { |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 891 | return attempt_merge(q, rq, next); |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 892 | } |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 893 | |
| 894 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) |
| 895 | { |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 896 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 897 | return false; |
| 898 | |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 899 | if (req_op(rq) != bio_op(bio)) |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 900 | return false; |
| 901 | |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 902 | /* different data direction or already started, don't merge */ |
| 903 | if (bio_data_dir(bio) != rq_data_dir(rq)) |
| 904 | return false; |
| 905 | |
Jens Axboe | 2081a56 | 2018-10-12 12:39:10 -0600 | [diff] [blame] | 906 | /* must be same device */ |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 907 | if (rq->rq_disk != bio->bi_bdev->bd_disk) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 908 | return false; |
| 909 | |
| 910 | /* only merge integrity protected bio into ditto rq */ |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 911 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 912 | return false; |
| 913 | |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 914 | /* Only merge if the crypt contexts are compatible */ |
| 915 | if (!bio_crypt_rq_ctx_compatible(rq, bio)) |
| 916 | return false; |
| 917 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 918 | /* must be using the same buffer */ |
Mike Christie | 8fe0d47 | 2016-06-05 14:32:15 -0500 | [diff] [blame] | 919 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 920 | !blk_write_same_mergeable(rq->bio, bio)) |
| 921 | return false; |
| 922 | |
Jens Axboe | cb6934f | 2017-06-27 09:22:02 -0600 | [diff] [blame] | 923 | /* |
| 924 | * Don't allow merge of different write hints, or for a hint with |
| 925 | * non-hint IO. |
| 926 | */ |
| 927 | if (rq->write_hint != bio->bi_write_hint) |
| 928 | return false; |
| 929 | |
Damien Le Moal | 668ffc0 | 2018-11-20 10:52:37 +0900 | [diff] [blame] | 930 | if (rq->ioprio != bio_prio(bio)) |
| 931 | return false; |
| 932 | |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 933 | return true; |
| 934 | } |
| 935 | |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 936 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 937 | { |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 938 | if (blk_discard_mergable(rq)) |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 939 | return ELEVATOR_DISCARD_MERGE; |
| 940 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 941 | return ELEVATOR_BACK_MERGE; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 942 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 943 | return ELEVATOR_FRONT_MERGE; |
| 944 | return ELEVATOR_NO_MERGE; |
| 945 | } |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 946 | |
| 947 | static void blk_account_io_merge_bio(struct request *req) |
| 948 | { |
| 949 | if (!blk_do_io_stat(req)) |
| 950 | return; |
| 951 | |
| 952 | part_stat_lock(); |
| 953 | part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); |
| 954 | part_stat_unlock(); |
| 955 | } |
| 956 | |
Christoph Hellwig | eda5cc9 | 2020-10-06 09:07:19 +0200 | [diff] [blame] | 957 | enum bio_merge_status { |
| 958 | BIO_MERGE_OK, |
| 959 | BIO_MERGE_NONE, |
| 960 | BIO_MERGE_FAILED, |
| 961 | }; |
| 962 | |
| 963 | static enum bio_merge_status bio_attempt_back_merge(struct request *req, |
| 964 | struct bio *bio, unsigned int nr_segs) |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 965 | { |
| 966 | const int ff = bio->bi_opf & REQ_FAILFAST_MASK; |
| 967 | |
| 968 | if (!ll_back_merge_fn(req, bio, nr_segs)) |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 969 | return BIO_MERGE_FAILED; |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 970 | |
Christoph Hellwig | e8a676d | 2020-12-03 17:21:36 +0100 | [diff] [blame] | 971 | trace_block_bio_backmerge(bio); |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 972 | rq_qos_merge(req->q, req, bio); |
| 973 | |
| 974 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) |
| 975 | blk_rq_set_mixed_merge(req); |
| 976 | |
| 977 | req->biotail->bi_next = bio; |
| 978 | req->biotail = bio; |
| 979 | req->__data_len += bio->bi_iter.bi_size; |
| 980 | |
| 981 | bio_crypt_free_ctx(bio); |
| 982 | |
| 983 | blk_account_io_merge_bio(req); |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 984 | return BIO_MERGE_OK; |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 985 | } |
| 986 | |
Christoph Hellwig | eda5cc9 | 2020-10-06 09:07:19 +0200 | [diff] [blame] | 987 | static enum bio_merge_status bio_attempt_front_merge(struct request *req, |
| 988 | struct bio *bio, unsigned int nr_segs) |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 989 | { |
| 990 | const int ff = bio->bi_opf & REQ_FAILFAST_MASK; |
| 991 | |
| 992 | if (!ll_front_merge_fn(req, bio, nr_segs)) |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 993 | return BIO_MERGE_FAILED; |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 994 | |
Christoph Hellwig | e8a676d | 2020-12-03 17:21:36 +0100 | [diff] [blame] | 995 | trace_block_bio_frontmerge(bio); |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 996 | rq_qos_merge(req->q, req, bio); |
| 997 | |
| 998 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) |
| 999 | blk_rq_set_mixed_merge(req); |
| 1000 | |
| 1001 | bio->bi_next = req->bio; |
| 1002 | req->bio = bio; |
| 1003 | |
| 1004 | req->__sector = bio->bi_iter.bi_sector; |
| 1005 | req->__data_len += bio->bi_iter.bi_size; |
| 1006 | |
| 1007 | bio_crypt_do_front_merge(req, bio); |
| 1008 | |
| 1009 | blk_account_io_merge_bio(req); |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 1010 | return BIO_MERGE_OK; |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1011 | } |
| 1012 | |
Christoph Hellwig | eda5cc9 | 2020-10-06 09:07:19 +0200 | [diff] [blame] | 1013 | static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q, |
| 1014 | struct request *req, struct bio *bio) |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1015 | { |
| 1016 | unsigned short segments = blk_rq_nr_discard_segments(req); |
| 1017 | |
| 1018 | if (segments >= queue_max_discard_segments(q)) |
| 1019 | goto no_merge; |
| 1020 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
| 1021 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
| 1022 | goto no_merge; |
| 1023 | |
| 1024 | rq_qos_merge(q, req, bio); |
| 1025 | |
| 1026 | req->biotail->bi_next = bio; |
| 1027 | req->biotail = bio; |
| 1028 | req->__data_len += bio->bi_iter.bi_size; |
| 1029 | req->nr_phys_segments = segments + 1; |
| 1030 | |
| 1031 | blk_account_io_merge_bio(req); |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 1032 | return BIO_MERGE_OK; |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1033 | no_merge: |
| 1034 | req_set_nomerge(q, req); |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 1035 | return BIO_MERGE_FAILED; |
| 1036 | } |
| 1037 | |
| 1038 | static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, |
| 1039 | struct request *rq, |
| 1040 | struct bio *bio, |
| 1041 | unsigned int nr_segs, |
| 1042 | bool sched_allow_merge) |
| 1043 | { |
| 1044 | if (!blk_rq_merge_ok(rq, bio)) |
| 1045 | return BIO_MERGE_NONE; |
| 1046 | |
| 1047 | switch (blk_try_merge(rq, bio)) { |
| 1048 | case ELEVATOR_BACK_MERGE: |
Baolin Wang | 265600b | 2020-09-02 09:45:25 +0800 | [diff] [blame] | 1049 | if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 1050 | return bio_attempt_back_merge(rq, bio, nr_segs); |
| 1051 | break; |
| 1052 | case ELEVATOR_FRONT_MERGE: |
Baolin Wang | 265600b | 2020-09-02 09:45:25 +0800 | [diff] [blame] | 1053 | if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 1054 | return bio_attempt_front_merge(rq, bio, nr_segs); |
| 1055 | break; |
| 1056 | case ELEVATOR_DISCARD_MERGE: |
| 1057 | return bio_attempt_discard_merge(q, rq, bio); |
| 1058 | default: |
| 1059 | return BIO_MERGE_NONE; |
| 1060 | } |
| 1061 | |
| 1062 | return BIO_MERGE_FAILED; |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1063 | } |
| 1064 | |
| 1065 | /** |
| 1066 | * blk_attempt_plug_merge - try to merge with %current's plugged list |
| 1067 | * @q: request_queue new bio is being queued at |
| 1068 | * @bio: new bio being queued |
| 1069 | * @nr_segs: number of segments in @bio |
Jens Axboe | 87c037d | 2021-10-18 10:07:09 -0600 | [diff] [blame] | 1070 | * @same_queue_rq: output value, will be true if there's an existing request |
| 1071 | * from the passed in @q already in the plug list |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1072 | * |
Jens Axboe | d38a9c0 | 2021-10-14 07:24:07 -0600 | [diff] [blame] | 1073 | * Determine whether @bio being queued on @q can be merged with the previous |
| 1074 | * request on %current's plugged list. Returns %true if merge was successful, |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1075 | * otherwise %false. |
| 1076 | * |
| 1077 | * Plugging coalesces IOs from the same issuer for the same purpose without |
| 1078 | * going through @q->queue_lock. As such it's more of an issuing mechanism |
| 1079 | * than scheduling, and the request, while may have elvpriv data, is not |
| 1080 | * added on the elevator at this point. In addition, we don't have |
| 1081 | * reliable access to the elevator outside queue lock. Only check basic |
| 1082 | * merging parameters without querying the elevator. |
| 1083 | * |
| 1084 | * Caller must ensure !blk_queue_nomerges(q) beforehand. |
| 1085 | */ |
| 1086 | bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, |
Jens Axboe | 87c037d | 2021-10-18 10:07:09 -0600 | [diff] [blame] | 1087 | unsigned int nr_segs, bool *same_queue_rq) |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1088 | { |
| 1089 | struct blk_plug *plug; |
| 1090 | struct request *rq; |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1091 | |
| 1092 | plug = blk_mq_plug(q, bio); |
Jens Axboe | bc490f8 | 2021-10-18 10:12:12 -0600 | [diff] [blame] | 1093 | if (!plug || rq_list_empty(plug->mq_list)) |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1094 | return false; |
| 1095 | |
Jens Axboe | d38a9c0 | 2021-10-14 07:24:07 -0600 | [diff] [blame] | 1096 | /* check the previously added entry for a quick merge attempt */ |
Jens Axboe | bc490f8 | 2021-10-18 10:12:12 -0600 | [diff] [blame] | 1097 | rq = rq_list_peek(&plug->mq_list); |
Jens Axboe | 87c037d | 2021-10-18 10:07:09 -0600 | [diff] [blame] | 1098 | if (rq->q == q) { |
Jens Axboe | d38a9c0 | 2021-10-14 07:24:07 -0600 | [diff] [blame] | 1099 | /* |
| 1100 | * Only blk-mq multiple hardware queues case checks the rq in |
| 1101 | * the same queue, there should be only one such rq in a queue |
| 1102 | */ |
Jens Axboe | 87c037d | 2021-10-18 10:07:09 -0600 | [diff] [blame] | 1103 | *same_queue_rq = true; |
Ming Lei | a1cb653 | 2021-11-02 21:35:00 +0800 | [diff] [blame] | 1104 | |
| 1105 | if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == |
| 1106 | BIO_MERGE_OK) |
| 1107 | return true; |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1108 | } |
Baolin Wang | 8e75637 | 2020-08-28 10:52:54 +0800 | [diff] [blame] | 1109 | return false; |
| 1110 | } |
Baolin Wang | bdc6a287 | 2020-08-28 10:52:55 +0800 | [diff] [blame] | 1111 | |
| 1112 | /* |
| 1113 | * Iterate list of requests and see if we can merge this bio with any |
| 1114 | * of them. |
| 1115 | */ |
| 1116 | bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, |
| 1117 | struct bio *bio, unsigned int nr_segs) |
| 1118 | { |
| 1119 | struct request *rq; |
| 1120 | int checked = 8; |
| 1121 | |
| 1122 | list_for_each_entry_reverse(rq, list, queuelist) { |
Baolin Wang | bdc6a287 | 2020-08-28 10:52:55 +0800 | [diff] [blame] | 1123 | if (!checked--) |
| 1124 | break; |
| 1125 | |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 1126 | switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { |
| 1127 | case BIO_MERGE_NONE: |
Baolin Wang | bdc6a287 | 2020-08-28 10:52:55 +0800 | [diff] [blame] | 1128 | continue; |
Baolin Wang | 7d7ca7c | 2020-08-28 10:52:56 +0800 | [diff] [blame] | 1129 | case BIO_MERGE_OK: |
| 1130 | return true; |
| 1131 | case BIO_MERGE_FAILED: |
| 1132 | return false; |
Baolin Wang | bdc6a287 | 2020-08-28 10:52:55 +0800 | [diff] [blame] | 1133 | } |
| 1134 | |
Baolin Wang | bdc6a287 | 2020-08-28 10:52:55 +0800 | [diff] [blame] | 1135 | } |
| 1136 | |
| 1137 | return false; |
| 1138 | } |
| 1139 | EXPORT_SYMBOL_GPL(blk_bio_list_merge); |
Christoph Hellwig | eda5cc9 | 2020-10-06 09:07:19 +0200 | [diff] [blame] | 1140 | |
| 1141 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, |
| 1142 | unsigned int nr_segs, struct request **merged_request) |
| 1143 | { |
| 1144 | struct request *rq; |
| 1145 | |
| 1146 | switch (elv_merge(q, &rq, bio)) { |
| 1147 | case ELEVATOR_BACK_MERGE: |
| 1148 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
| 1149 | return false; |
| 1150 | if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK) |
| 1151 | return false; |
| 1152 | *merged_request = attempt_back_merge(q, rq); |
| 1153 | if (!*merged_request) |
| 1154 | elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); |
| 1155 | return true; |
| 1156 | case ELEVATOR_FRONT_MERGE: |
| 1157 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
| 1158 | return false; |
| 1159 | if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK) |
| 1160 | return false; |
| 1161 | *merged_request = attempt_front_merge(q, rq); |
| 1162 | if (!*merged_request) |
| 1163 | elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); |
| 1164 | return true; |
| 1165 | case ELEVATOR_DISCARD_MERGE: |
| 1166 | return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; |
| 1167 | default: |
| 1168 | return false; |
| 1169 | } |
| 1170 | } |
| 1171 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); |