Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Functions related to segment and merge handling |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/scatterlist.h> |
| 10 | |
Mike Krinkin | cda2264 | 2015-12-03 17:32:30 +0300 | [diff] [blame] | 11 | #include <trace/events/block.h> |
| 12 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 13 | #include "blk.h" |
| 14 | |
Christoph Hellwig | e990700 | 2018-09-24 09:43:48 +0200 | [diff] [blame] | 15 | /* |
| 16 | * Check if the two bvecs from two bios can be merged to one segment. If yes, |
| 17 | * no need to check gap between the two bios since the 1st bio and the 1st bvec |
| 18 | * in the 2nd bio can be handled in one segment. |
| 19 | */ |
| 20 | static inline bool bios_segs_mergeable(struct request_queue *q, |
| 21 | struct bio *prev, struct bio_vec *prev_last_bv, |
| 22 | struct bio_vec *next_first_bv) |
| 23 | { |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 24 | if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv)) |
Christoph Hellwig | e990700 | 2018-09-24 09:43:48 +0200 | [diff] [blame] | 25 | return false; |
| 26 | if (prev->bi_seg_back_size + next_first_bv->bv_len > |
| 27 | queue_max_segment_size(q)) |
| 28 | return false; |
| 29 | return true; |
| 30 | } |
| 31 | |
| 32 | static inline bool bio_will_gap(struct request_queue *q, |
| 33 | struct request *prev_rq, struct bio *prev, struct bio *next) |
| 34 | { |
| 35 | struct bio_vec pb, nb; |
| 36 | |
| 37 | if (!bio_has_data(prev) || !queue_virt_boundary(q)) |
| 38 | return false; |
| 39 | |
| 40 | /* |
| 41 | * Don't merge if the 1st bio starts with non-zero offset, otherwise it |
| 42 | * is quite difficult to respect the sg gap limit. We work hard to |
| 43 | * merge a huge number of small single bios in case of mkfs. |
| 44 | */ |
| 45 | if (prev_rq) |
| 46 | bio_get_first_bvec(prev_rq->bio, &pb); |
| 47 | else |
| 48 | bio_get_first_bvec(prev, &pb); |
Johannes Thumshirn | df376b2 | 2018-11-07 14:58:14 +0100 | [diff] [blame] | 49 | if (pb.bv_offset & queue_virt_boundary(q)) |
Christoph Hellwig | e990700 | 2018-09-24 09:43:48 +0200 | [diff] [blame] | 50 | return true; |
| 51 | |
| 52 | /* |
| 53 | * We don't need to worry about the situation that the merged segment |
| 54 | * ends in unaligned virt boundary: |
| 55 | * |
| 56 | * - if 'pb' ends aligned, the merged segment ends aligned |
| 57 | * - if 'pb' ends unaligned, the next bio must include |
| 58 | * one single bvec of 'nb', otherwise the 'nb' can't |
| 59 | * merge with 'pb' |
| 60 | */ |
| 61 | bio_get_last_bvec(prev, &pb); |
| 62 | bio_get_first_bvec(next, &nb); |
| 63 | if (bios_segs_mergeable(q, prev, &pb, &nb)) |
| 64 | return false; |
| 65 | return __bvec_gap_to_prev(q, &pb, nb.bv_offset); |
| 66 | } |
| 67 | |
| 68 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) |
| 69 | { |
| 70 | return bio_will_gap(req->q, req, req->biotail, bio); |
| 71 | } |
| 72 | |
| 73 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) |
| 74 | { |
| 75 | return bio_will_gap(req->q, NULL, bio, req->bio); |
| 76 | } |
| 77 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 78 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
| 79 | struct bio *bio, |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 80 | struct bio_set *bs, |
| 81 | unsigned *nsegs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 82 | { |
| 83 | unsigned int max_discard_sectors, granularity; |
| 84 | int alignment; |
| 85 | sector_t tmp; |
| 86 | unsigned split_sectors; |
| 87 | |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 88 | *nsegs = 1; |
| 89 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 90 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
| 91 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
| 92 | |
Ming Lei | 1adfc5e | 2018-10-29 20:57:17 +0800 | [diff] [blame^] | 93 | max_discard_sectors = min(q->limits.max_discard_sectors, |
| 94 | bio_allowed_max_sectors(q)); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 95 | max_discard_sectors -= max_discard_sectors % granularity; |
| 96 | |
| 97 | if (unlikely(!max_discard_sectors)) { |
| 98 | /* XXX: warn */ |
| 99 | return NULL; |
| 100 | } |
| 101 | |
| 102 | if (bio_sectors(bio) <= max_discard_sectors) |
| 103 | return NULL; |
| 104 | |
| 105 | split_sectors = max_discard_sectors; |
| 106 | |
| 107 | /* |
| 108 | * If the next starting sector would be misaligned, stop the discard at |
| 109 | * the previous aligned sector. |
| 110 | */ |
| 111 | alignment = (q->limits.discard_alignment >> 9) % granularity; |
| 112 | |
| 113 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; |
| 114 | tmp = sector_div(tmp, granularity); |
| 115 | |
| 116 | if (split_sectors > tmp) |
| 117 | split_sectors -= tmp; |
| 118 | |
| 119 | return bio_split(bio, split_sectors, GFP_NOIO, bs); |
| 120 | } |
| 121 | |
Christoph Hellwig | 885fa13 | 2017-04-05 19:21:01 +0200 | [diff] [blame] | 122 | static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, |
| 123 | struct bio *bio, struct bio_set *bs, unsigned *nsegs) |
| 124 | { |
| 125 | *nsegs = 1; |
| 126 | |
| 127 | if (!q->limits.max_write_zeroes_sectors) |
| 128 | return NULL; |
| 129 | |
| 130 | if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) |
| 131 | return NULL; |
| 132 | |
| 133 | return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); |
| 134 | } |
| 135 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 136 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
| 137 | struct bio *bio, |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 138 | struct bio_set *bs, |
| 139 | unsigned *nsegs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 140 | { |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 141 | *nsegs = 1; |
| 142 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 143 | if (!q->limits.max_write_same_sectors) |
| 144 | return NULL; |
| 145 | |
| 146 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) |
| 147 | return NULL; |
| 148 | |
| 149 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); |
| 150 | } |
| 151 | |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 152 | static inline unsigned get_max_io_size(struct request_queue *q, |
| 153 | struct bio *bio) |
| 154 | { |
| 155 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); |
| 156 | unsigned mask = queue_logical_block_size(q) - 1; |
| 157 | |
| 158 | /* aligned to logical block size */ |
| 159 | sectors &= ~(mask >> 9); |
| 160 | |
| 161 | return sectors; |
| 162 | } |
| 163 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 164 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
| 165 | struct bio *bio, |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 166 | struct bio_set *bs, |
| 167 | unsigned *segs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 168 | { |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 169 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 170 | struct bvec_iter iter; |
Kent Overstreet | 8ae1266 | 2015-04-27 23:48:34 -0700 | [diff] [blame] | 171 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
Ming Lei | 02e7074 | 2015-11-24 10:35:30 +0800 | [diff] [blame] | 172 | unsigned front_seg_size = bio->bi_seg_front_size; |
| 173 | bool do_split = true; |
| 174 | struct bio *new = NULL; |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 175 | const unsigned max_sectors = get_max_io_size(q, bio); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 176 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 177 | bio_for_each_segment(bv, bio, iter) { |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 178 | /* |
| 179 | * If the queue doesn't support SG gaps and adding this |
| 180 | * offset would create a gap, disallow it. |
| 181 | */ |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 182 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 183 | goto split; |
| 184 | |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 185 | if (sectors + (bv.bv_len >> 9) > max_sectors) { |
Keith Busch | e36f620 | 2016-01-12 15:08:39 -0700 | [diff] [blame] | 186 | /* |
| 187 | * Consider this a new segment if we're splitting in |
| 188 | * the middle of this vector. |
| 189 | */ |
| 190 | if (nsegs < queue_max_segments(q) && |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 191 | sectors < max_sectors) { |
Keith Busch | e36f620 | 2016-01-12 15:08:39 -0700 | [diff] [blame] | 192 | nsegs++; |
Ming Lei | d0e5fbb | 2016-01-23 08:05:33 +0800 | [diff] [blame] | 193 | sectors = max_sectors; |
Keith Busch | e36f620 | 2016-01-12 15:08:39 -0700 | [diff] [blame] | 194 | } |
Ming Lei | cf8c0c6 | 2017-12-18 20:22:16 +0800 | [diff] [blame] | 195 | goto split; |
Keith Busch | e36f620 | 2016-01-12 15:08:39 -0700 | [diff] [blame] | 196 | } |
| 197 | |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 198 | if (bvprvp && blk_queue_cluster(q)) { |
Ming Lei | b4b6cb6 | 2018-01-10 10:51:29 +0800 | [diff] [blame] | 199 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
| 200 | goto new_segment; |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 201 | if (!biovec_phys_mergeable(q, bvprvp, &bv)) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 202 | goto new_segment; |
| 203 | |
| 204 | seg_size += bv.bv_len; |
| 205 | bvprv = bv; |
Ming Lei | 578270b | 2015-11-24 10:35:29 +0800 | [diff] [blame] | 206 | bvprvp = &bvprv; |
Ming Lei | 52cc6ee | 2015-09-17 09:58:38 -0600 | [diff] [blame] | 207 | sectors += bv.bv_len >> 9; |
Ming Lei | a88d32a | 2015-11-30 16:05:49 +0800 | [diff] [blame] | 208 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 209 | continue; |
| 210 | } |
| 211 | new_segment: |
| 212 | if (nsegs == queue_max_segments(q)) |
| 213 | goto split; |
| 214 | |
Ming Lei | 6a501bf | 2017-12-18 20:22:14 +0800 | [diff] [blame] | 215 | if (nsegs == 1 && seg_size > front_seg_size) |
| 216 | front_seg_size = seg_size; |
| 217 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 218 | nsegs++; |
| 219 | bvprv = bv; |
Ming Lei | 578270b | 2015-11-24 10:35:29 +0800 | [diff] [blame] | 220 | bvprvp = &bvprv; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 221 | seg_size = bv.bv_len; |
Ming Lei | 52cc6ee | 2015-09-17 09:58:38 -0600 | [diff] [blame] | 222 | sectors += bv.bv_len >> 9; |
Ming Lei | 02e7074 | 2015-11-24 10:35:30 +0800 | [diff] [blame] | 223 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 224 | } |
| 225 | |
Ming Lei | 02e7074 | 2015-11-24 10:35:30 +0800 | [diff] [blame] | 226 | do_split = false; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 227 | split: |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 228 | *segs = nsegs; |
Ming Lei | 02e7074 | 2015-11-24 10:35:30 +0800 | [diff] [blame] | 229 | |
| 230 | if (do_split) { |
| 231 | new = bio_split(bio, sectors, GFP_NOIO, bs); |
| 232 | if (new) |
| 233 | bio = new; |
| 234 | } |
| 235 | |
Ming Lei | 6a501bf | 2017-12-18 20:22:14 +0800 | [diff] [blame] | 236 | if (nsegs == 1 && seg_size > front_seg_size) |
| 237 | front_seg_size = seg_size; |
Ming Lei | 02e7074 | 2015-11-24 10:35:30 +0800 | [diff] [blame] | 238 | bio->bi_seg_front_size = front_seg_size; |
| 239 | if (seg_size > bio->bi_seg_back_size) |
| 240 | bio->bi_seg_back_size = seg_size; |
| 241 | |
| 242 | return do_split ? new : NULL; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 243 | } |
| 244 | |
NeilBrown | af67c31 | 2017-06-18 14:38:57 +1000 | [diff] [blame] | 245 | void blk_queue_split(struct request_queue *q, struct bio **bio) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 246 | { |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 247 | struct bio *split, *res; |
| 248 | unsigned nsegs; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 249 | |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 250 | switch (bio_op(*bio)) { |
| 251 | case REQ_OP_DISCARD: |
| 252 | case REQ_OP_SECURE_ERASE: |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 253 | split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs); |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 254 | break; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 255 | case REQ_OP_WRITE_ZEROES: |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 256 | split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs); |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 257 | break; |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 258 | case REQ_OP_WRITE_SAME: |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 259 | split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs); |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 260 | break; |
| 261 | default: |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 262 | split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs); |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 263 | break; |
| 264 | } |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 265 | |
| 266 | /* physical segments can be figured out during splitting */ |
| 267 | res = split ? split : *bio; |
| 268 | res->bi_phys_segments = nsegs; |
| 269 | bio_set_flag(res, BIO_SEG_VALID); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 270 | |
| 271 | if (split) { |
Ming Lei | 6ac45ae | 2015-10-20 23:13:53 +0800 | [diff] [blame] | 272 | /* there isn't chance to merge the splitted bio */ |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 273 | split->bi_opf |= REQ_NOMERGE; |
Ming Lei | 6ac45ae | 2015-10-20 23:13:53 +0800 | [diff] [blame] | 274 | |
Jens Axboe | cd4a4ae | 2018-06-02 14:04:07 -0600 | [diff] [blame] | 275 | /* |
| 276 | * Since we're recursing into make_request here, ensure |
| 277 | * that we mark this bio as already having entered the queue. |
| 278 | * If not, and the queue is going away, we can get stuck |
| 279 | * forever on waiting for the queue reference to drop. But |
| 280 | * that will never happen, as we're already holding a |
| 281 | * reference to it. |
| 282 | */ |
| 283 | bio_set_flag(*bio, BIO_QUEUE_ENTERED); |
| 284 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 285 | bio_chain(split, *bio); |
Mike Krinkin | cda2264 | 2015-12-03 17:32:30 +0300 | [diff] [blame] | 286 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 287 | generic_make_request(*bio); |
| 288 | *bio = split; |
| 289 | } |
| 290 | } |
| 291 | EXPORT_SYMBOL(blk_queue_split); |
| 292 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 293 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
Ming Lei | 0738854 | 2014-09-02 23:02:59 +0800 | [diff] [blame] | 294 | struct bio *bio, |
| 295 | bool no_sg_merge) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 296 | { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 297 | struct bio_vec bv, bvprv = { NULL }; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 298 | int cluster, prev = 0; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 299 | unsigned int seg_size, nr_phys_segs; |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 300 | struct bio *fbio, *bbio; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 301 | struct bvec_iter iter; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 302 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 303 | if (!bio) |
| 304 | return 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 305 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 306 | switch (bio_op(bio)) { |
| 307 | case REQ_OP_DISCARD: |
| 308 | case REQ_OP_SECURE_ERASE: |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 309 | case REQ_OP_WRITE_ZEROES: |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 310 | return 0; |
| 311 | case REQ_OP_WRITE_SAME: |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 312 | return 1; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 313 | } |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 314 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 315 | fbio = bio; |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 316 | cluster = blk_queue_cluster(q); |
Mikulas Patocka | 5df97b9 | 2008-08-15 10:20:02 +0200 | [diff] [blame] | 317 | seg_size = 0; |
Andi Kleen | 2c8919d | 2010-06-21 11:02:47 +0200 | [diff] [blame] | 318 | nr_phys_segs = 0; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 319 | for_each_bio(bio) { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 320 | bio_for_each_segment(bv, bio, iter) { |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 321 | /* |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 322 | * If SG merging is disabled, each bio vector is |
| 323 | * a segment |
| 324 | */ |
| 325 | if (no_sg_merge) |
| 326 | goto new_segment; |
| 327 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 328 | if (prev && cluster) { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 329 | if (seg_size + bv.bv_len |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 330 | > queue_max_segment_size(q)) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 331 | goto new_segment; |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 332 | if (!biovec_phys_mergeable(q, &bvprv, &bv)) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 333 | goto new_segment; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 334 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 335 | seg_size += bv.bv_len; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 336 | bvprv = bv; |
| 337 | continue; |
| 338 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 339 | new_segment: |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 340 | if (nr_phys_segs == 1 && seg_size > |
| 341 | fbio->bi_seg_front_size) |
| 342 | fbio->bi_seg_front_size = seg_size; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 343 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 344 | nr_phys_segs++; |
| 345 | bvprv = bv; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 346 | prev = 1; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 347 | seg_size = bv.bv_len; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 348 | } |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 349 | bbio = bio; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 350 | } |
| 351 | |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 352 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
| 353 | fbio->bi_seg_front_size = seg_size; |
| 354 | if (seg_size > bbio->bi_seg_back_size) |
| 355 | bbio->bi_seg_back_size = seg_size; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 356 | |
| 357 | return nr_phys_segs; |
| 358 | } |
| 359 | |
| 360 | void blk_recalc_rq_segments(struct request *rq) |
| 361 | { |
Ming Lei | 0738854 | 2014-09-02 23:02:59 +0800 | [diff] [blame] | 362 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
| 363 | &rq->q->queue_flags); |
| 364 | |
| 365 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, |
| 366 | no_sg_merge); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 367 | } |
| 368 | |
| 369 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
| 370 | { |
Ming Lei | 7f60dca | 2014-11-12 00:15:41 +0800 | [diff] [blame] | 371 | unsigned short seg_cnt; |
Ming Lei | 764f612 | 2014-10-09 23:17:35 +0800 | [diff] [blame] | 372 | |
Ming Lei | 7f60dca | 2014-11-12 00:15:41 +0800 | [diff] [blame] | 373 | /* estimate segment number by bi_vcnt for non-cloned bio */ |
| 374 | if (bio_flagged(bio, BIO_CLONED)) |
| 375 | seg_cnt = bio_segments(bio); |
| 376 | else |
| 377 | seg_cnt = bio->bi_vcnt; |
| 378 | |
| 379 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
| 380 | (seg_cnt < queue_max_segments(q))) |
| 381 | bio->bi_phys_segments = seg_cnt; |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 382 | else { |
| 383 | struct bio *nxt = bio->bi_next; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 384 | |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 385 | bio->bi_next = NULL; |
Ming Lei | 7f60dca | 2014-11-12 00:15:41 +0800 | [diff] [blame] | 386 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 387 | bio->bi_next = nxt; |
| 388 | } |
| 389 | |
Jens Axboe | b7c44ed | 2015-07-24 12:37:59 -0600 | [diff] [blame] | 390 | bio_set_flag(bio, BIO_SEG_VALID); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 391 | } |
| 392 | EXPORT_SYMBOL(blk_recount_segments); |
| 393 | |
| 394 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
| 395 | struct bio *nxt) |
| 396 | { |
Kent Overstreet | 2b8221e | 2013-12-03 14:29:09 -0700 | [diff] [blame] | 397 | struct bio_vec end_bv = { NULL }, nxt_bv; |
Kent Overstreet | f619d25 | 2013-08-07 14:30:33 -0700 | [diff] [blame] | 398 | |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 399 | if (!blk_queue_cluster(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 400 | return 0; |
| 401 | |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 402 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 403 | queue_max_segment_size(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 404 | return 0; |
| 405 | |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 406 | if (!bio_has_data(bio)) |
| 407 | return 1; |
| 408 | |
Ming Lei | e827091 | 2016-02-26 23:40:53 +0800 | [diff] [blame] | 409 | bio_get_last_bvec(bio, &end_bv); |
| 410 | bio_get_first_bvec(nxt, &nxt_bv); |
Kent Overstreet | f619d25 | 2013-08-07 14:30:33 -0700 | [diff] [blame] | 411 | |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 412 | return biovec_phys_mergeable(q, &end_bv, &nxt_bv); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 413 | } |
| 414 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 415 | static inline void |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 416 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 417 | struct scatterlist *sglist, struct bio_vec *bvprv, |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 418 | struct scatterlist **sg, int *nsegs, int *cluster) |
| 419 | { |
| 420 | |
| 421 | int nbytes = bvec->bv_len; |
| 422 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 423 | if (*sg && *cluster) { |
Ming Lei | b4b6cb6 | 2018-01-10 10:51:29 +0800 | [diff] [blame] | 424 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
| 425 | goto new_segment; |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 426 | if (!biovec_phys_mergeable(q, bvprv, bvec)) |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 427 | goto new_segment; |
| 428 | |
| 429 | (*sg)->length += nbytes; |
| 430 | } else { |
| 431 | new_segment: |
| 432 | if (!*sg) |
| 433 | *sg = sglist; |
| 434 | else { |
| 435 | /* |
| 436 | * If the driver previously mapped a shorter |
| 437 | * list, we could see a termination bit |
| 438 | * prematurely unless it fully inits the sg |
| 439 | * table on each mapping. We KNOW that there |
| 440 | * must be more entries here or the driver |
| 441 | * would be buggy, so force clear the |
| 442 | * termination bit to avoid doing a full |
| 443 | * sg_init_table() in drivers for each command. |
| 444 | */ |
Paolo Bonzini | c8164d8 | 2013-03-20 15:37:08 +1030 | [diff] [blame] | 445 | sg_unmark_end(*sg); |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 446 | *sg = sg_next(*sg); |
| 447 | } |
| 448 | |
| 449 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); |
| 450 | (*nsegs)++; |
| 451 | } |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 452 | *bvprv = *bvec; |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 453 | } |
| 454 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 455 | static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv, |
| 456 | struct scatterlist *sglist, struct scatterlist **sg) |
| 457 | { |
| 458 | *sg = sglist; |
| 459 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); |
| 460 | return 1; |
| 461 | } |
| 462 | |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 463 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
| 464 | struct scatterlist *sglist, |
| 465 | struct scatterlist **sg) |
| 466 | { |
| 467 | struct bio_vec bvec, bvprv = { NULL }; |
| 468 | struct bvec_iter iter; |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 469 | int cluster = blk_queue_cluster(q), nsegs = 0; |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 470 | |
| 471 | for_each_bio(bio) |
| 472 | bio_for_each_segment(bvec, bio, iter) |
| 473 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, |
| 474 | &nsegs, &cluster); |
| 475 | |
| 476 | return nsegs; |
| 477 | } |
| 478 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 479 | /* |
| 480 | * map a request to scatterlist, return number of sg entries setup. Caller |
| 481 | * must make sure sg can hold rq->nr_phys_segments entries |
| 482 | */ |
| 483 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
| 484 | struct scatterlist *sglist) |
| 485 | { |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 486 | struct scatterlist *sg = NULL; |
| 487 | int nsegs = 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 488 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 489 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
| 490 | nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); |
| 491 | else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) |
| 492 | nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); |
| 493 | else if (rq->bio) |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 494 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 495 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 496 | if (unlikely(rq->rq_flags & RQF_COPY_USER) && |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 497 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
| 498 | unsigned int pad_len = |
| 499 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 500 | |
| 501 | sg->length += pad_len; |
| 502 | rq->extra_len += pad_len; |
| 503 | } |
| 504 | |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 505 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
Mike Christie | a8ebb05 | 2016-06-05 14:31:45 -0500 | [diff] [blame] | 506 | if (op_is_write(req_op(rq))) |
Tejun Heo | db0a2e0 | 2008-02-19 11:36:55 +0100 | [diff] [blame] | 507 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
| 508 | |
Dan Williams | da81ed1 | 2015-08-07 18:15:14 +0200 | [diff] [blame] | 509 | sg_unmark_end(sg); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 510 | sg = sg_next(sg); |
| 511 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), |
| 512 | q->dma_drain_size, |
| 513 | ((unsigned long)q->dma_drain_buffer) & |
| 514 | (PAGE_SIZE - 1)); |
| 515 | nsegs++; |
FUJITA Tomonori | 7a85f88 | 2008-03-04 11:17:11 +0100 | [diff] [blame] | 516 | rq->extra_len += q->dma_drain_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 517 | } |
| 518 | |
| 519 | if (sg) |
| 520 | sg_mark_end(sg); |
| 521 | |
Ming Lei | 12e57f5 | 2015-11-24 10:35:31 +0800 | [diff] [blame] | 522 | /* |
| 523 | * Something must have been wrong if the figured number of |
| 524 | * segment is bigger than number of req's physical segments |
| 525 | */ |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 526 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); |
Ming Lei | 12e57f5 | 2015-11-24 10:35:31 +0800 | [diff] [blame] | 527 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 528 | return nsegs; |
| 529 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 530 | EXPORT_SYMBOL(blk_rq_map_sg); |
| 531 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 532 | static inline int ll_new_hw_segment(struct request_queue *q, |
| 533 | struct request *req, |
| 534 | struct bio *bio) |
| 535 | { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 536 | int nr_phys_segs = bio_phys_segments(q, bio); |
| 537 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 538 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
| 539 | goto no_merge; |
| 540 | |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 541 | if (blk_integrity_merge_bio(q, req, bio) == false) |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 542 | goto no_merge; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 543 | |
| 544 | /* |
| 545 | * This will form the start of a new hw segment. Bump both |
| 546 | * counters. |
| 547 | */ |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 548 | req->nr_phys_segments += nr_phys_segs; |
| 549 | return 1; |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 550 | |
| 551 | no_merge: |
Ritesh Harjani | e0c7230 | 2016-12-01 08:36:16 -0700 | [diff] [blame] | 552 | req_set_nomerge(q, req); |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 553 | return 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 554 | } |
| 555 | |
| 556 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
| 557 | struct bio *bio) |
| 558 | { |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 559 | if (req_gap_back_merge(req, bio)) |
| 560 | return 0; |
Sagi Grimberg | 7f39add | 2015-09-11 09:03:04 -0600 | [diff] [blame] | 561 | if (blk_integrity_rq(req) && |
| 562 | integrity_req_gap_back_merge(req, bio)) |
| 563 | return 0; |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 564 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
Damien Le Moal | 17007f3 | 2016-07-20 21:40:47 -0600 | [diff] [blame] | 565 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
Ritesh Harjani | e0c7230 | 2016-12-01 08:36:16 -0700 | [diff] [blame] | 566 | req_set_nomerge(q, req); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 567 | return 0; |
| 568 | } |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 569 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 570 | blk_recount_segments(q, req->biotail); |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 571 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 572 | blk_recount_segments(q, bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 573 | |
| 574 | return ll_new_hw_segment(q, req, bio); |
| 575 | } |
| 576 | |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 577 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 578 | struct bio *bio) |
| 579 | { |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 580 | |
| 581 | if (req_gap_front_merge(req, bio)) |
| 582 | return 0; |
Sagi Grimberg | 7f39add | 2015-09-11 09:03:04 -0600 | [diff] [blame] | 583 | if (blk_integrity_rq(req) && |
| 584 | integrity_req_gap_front_merge(req, bio)) |
| 585 | return 0; |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 586 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
Damien Le Moal | 17007f3 | 2016-07-20 21:40:47 -0600 | [diff] [blame] | 587 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
Ritesh Harjani | e0c7230 | 2016-12-01 08:36:16 -0700 | [diff] [blame] | 588 | req_set_nomerge(q, req); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 589 | return 0; |
| 590 | } |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 591 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 592 | blk_recount_segments(q, bio); |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 593 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 594 | blk_recount_segments(q, req->bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 595 | |
| 596 | return ll_new_hw_segment(q, req, bio); |
| 597 | } |
| 598 | |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 599 | /* |
| 600 | * blk-mq uses req->special to carry normal driver per-request payload, it |
| 601 | * does not indicate a prepared command that we cannot merge with. |
| 602 | */ |
| 603 | static bool req_no_special_merge(struct request *req) |
| 604 | { |
| 605 | struct request_queue *q = req->q; |
| 606 | |
| 607 | return !q->mq_ops && req->special; |
| 608 | } |
| 609 | |
Jens Axboe | 445251d | 2018-02-01 14:01:02 -0700 | [diff] [blame] | 610 | static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, |
| 611 | struct request *next) |
| 612 | { |
| 613 | unsigned short segments = blk_rq_nr_discard_segments(req); |
| 614 | |
| 615 | if (segments >= queue_max_discard_segments(q)) |
| 616 | goto no_merge; |
| 617 | if (blk_rq_sectors(req) + bio_sectors(next->bio) > |
| 618 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
| 619 | goto no_merge; |
| 620 | |
| 621 | req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); |
| 622 | return true; |
| 623 | no_merge: |
| 624 | req_set_nomerge(q, req); |
| 625 | return false; |
| 626 | } |
| 627 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 628 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
| 629 | struct request *next) |
| 630 | { |
| 631 | int total_phys_segments; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 632 | unsigned int seg_size = |
| 633 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 634 | |
| 635 | /* |
| 636 | * First check if the either of the requests are re-queued |
| 637 | * requests. Can't merge them if they are. |
| 638 | */ |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 639 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 640 | return 0; |
| 641 | |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 642 | if (req_gap_back_merge(req, next->bio)) |
Keith Busch | 854fbb9 | 2015-02-11 08:20:13 -0700 | [diff] [blame] | 643 | return 0; |
| 644 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 645 | /* |
| 646 | * Will it become too large? |
| 647 | */ |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 648 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
Damien Le Moal | 17007f3 | 2016-07-20 21:40:47 -0600 | [diff] [blame] | 649 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 650 | return 0; |
| 651 | |
| 652 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 653 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
| 654 | if (req->nr_phys_segments == 1) |
| 655 | req->bio->bi_seg_front_size = seg_size; |
| 656 | if (next->nr_phys_segments == 1) |
| 657 | next->biotail->bi_seg_back_size = seg_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 658 | total_phys_segments--; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 659 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 660 | |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 661 | if (total_phys_segments > queue_max_segments(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 662 | return 0; |
| 663 | |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 664 | if (blk_integrity_merge_rq(q, req, next) == false) |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 665 | return 0; |
| 666 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 667 | /* Merge is OK... */ |
| 668 | req->nr_phys_segments = total_phys_segments; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 669 | return 1; |
| 670 | } |
| 671 | |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 672 | /** |
| 673 | * blk_rq_set_mixed_merge - mark a request as mixed merge |
| 674 | * @rq: request to mark as mixed merge |
| 675 | * |
| 676 | * Description: |
| 677 | * @rq is about to be mixed merged. Make sure the attributes |
| 678 | * which can be mixed are set in each bio and mark @rq as mixed |
| 679 | * merged. |
| 680 | */ |
| 681 | void blk_rq_set_mixed_merge(struct request *rq) |
| 682 | { |
| 683 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
| 684 | struct bio *bio; |
| 685 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 686 | if (rq->rq_flags & RQF_MIXED_MERGE) |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 687 | return; |
| 688 | |
| 689 | /* |
| 690 | * @rq will no longer represent mixable attributes for all the |
| 691 | * contained bios. It will just track those of the first one. |
| 692 | * Distributes the attributs to each bio. |
| 693 | */ |
| 694 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 695 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
| 696 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); |
| 697 | bio->bi_opf |= ff; |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 698 | } |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 699 | rq->rq_flags |= RQF_MIXED_MERGE; |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 700 | } |
| 701 | |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 702 | static void blk_account_io_merge(struct request *req) |
| 703 | { |
| 704 | if (blk_do_io_stat(req)) { |
| 705 | struct hd_struct *part; |
| 706 | int cpu; |
| 707 | |
| 708 | cpu = part_stat_lock(); |
Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 709 | part = req->part; |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 710 | |
Jens Axboe | d62e26b | 2017-06-30 21:55:08 -0600 | [diff] [blame] | 711 | part_round_stats(req->q, cpu, part); |
| 712 | part_dec_in_flight(req->q, part, rq_data_dir(req)); |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 713 | |
Jens Axboe | 6c23a96 | 2011-01-07 08:43:37 +0100 | [diff] [blame] | 714 | hd_struct_put(part); |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 715 | part_stat_unlock(); |
| 716 | } |
| 717 | } |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 718 | /* |
| 719 | * Two cases of handling DISCARD merge: |
| 720 | * If max_discard_segments > 1, the driver takes every bio |
| 721 | * as a range and send them to controller together. The ranges |
| 722 | * needn't to be contiguous. |
| 723 | * Otherwise, the bios/requests will be handled as same as |
| 724 | * others which should be contiguous. |
| 725 | */ |
| 726 | static inline bool blk_discard_mergable(struct request *req) |
| 727 | { |
| 728 | if (req_op(req) == REQ_OP_DISCARD && |
| 729 | queue_max_discard_segments(req->q) > 1) |
| 730 | return true; |
| 731 | return false; |
| 732 | } |
| 733 | |
| 734 | enum elv_merge blk_try_req_merge(struct request *req, struct request *next) |
| 735 | { |
| 736 | if (blk_discard_mergable(req)) |
| 737 | return ELEVATOR_DISCARD_MERGE; |
| 738 | else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) |
| 739 | return ELEVATOR_BACK_MERGE; |
| 740 | |
| 741 | return ELEVATOR_NO_MERGE; |
| 742 | } |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 743 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 744 | /* |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 745 | * For non-mq, this has to be called with the request spinlock acquired. |
| 746 | * For mq with scheduling, the appropriate queue wide lock should be held. |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 747 | */ |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 748 | static struct request *attempt_merge(struct request_queue *q, |
| 749 | struct request *req, struct request *next) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 750 | { |
Bart Van Assche | 2fff8a9 | 2017-06-20 11:15:45 -0700 | [diff] [blame] | 751 | if (!q->mq_ops) |
| 752 | lockdep_assert_held(q->queue_lock); |
| 753 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 754 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 755 | return NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 756 | |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 757 | if (req_op(req) != req_op(next)) |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 758 | return NULL; |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 759 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 760 | if (rq_data_dir(req) != rq_data_dir(next) |
| 761 | || req->rq_disk != next->rq_disk |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 762 | || req_no_special_merge(next)) |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 763 | return NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 764 | |
Mike Christie | 8fe0d47 | 2016-06-05 14:32:15 -0500 | [diff] [blame] | 765 | if (req_op(req) == REQ_OP_WRITE_SAME && |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 766 | !blk_write_same_mergeable(req->bio, next->bio)) |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 767 | return NULL; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 768 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 769 | /* |
Jens Axboe | cb6934f | 2017-06-27 09:22:02 -0600 | [diff] [blame] | 770 | * Don't allow merge of different write hints, or for a hint with |
| 771 | * non-hint IO. |
| 772 | */ |
| 773 | if (req->write_hint != next->write_hint) |
| 774 | return NULL; |
| 775 | |
| 776 | /* |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 777 | * If we are allowed to merge, then append bio list |
| 778 | * from next to rq and release next. merge_requests_fn |
| 779 | * will have updated segment counts, update sector |
Jens Axboe | 445251d | 2018-02-01 14:01:02 -0700 | [diff] [blame] | 780 | * counts here. Handle DISCARDs separately, as they |
| 781 | * have separate settings. |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 782 | */ |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 783 | |
| 784 | switch (blk_try_req_merge(req, next)) { |
| 785 | case ELEVATOR_DISCARD_MERGE: |
Jens Axboe | 445251d | 2018-02-01 14:01:02 -0700 | [diff] [blame] | 786 | if (!req_attempt_discard_merge(q, req, next)) |
| 787 | return NULL; |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 788 | break; |
| 789 | case ELEVATOR_BACK_MERGE: |
| 790 | if (!ll_merge_requests_fn(q, req, next)) |
| 791 | return NULL; |
| 792 | break; |
| 793 | default: |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 794 | return NULL; |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 795 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 796 | |
| 797 | /* |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 798 | * If failfast settings disagree or any of the two is already |
| 799 | * a mixed merge, mark both as mixed before proceeding. This |
| 800 | * makes sure that all involved bios have mixable attributes |
| 801 | * set properly. |
| 802 | */ |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 803 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 804 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
| 805 | (next->cmd_flags & REQ_FAILFAST_MASK)) { |
| 806 | blk_rq_set_mixed_merge(req); |
| 807 | blk_rq_set_mixed_merge(next); |
| 808 | } |
| 809 | |
| 810 | /* |
Omar Sandoval | 522a777 | 2018-05-09 02:08:53 -0700 | [diff] [blame] | 811 | * At this point we have either done a back merge or front merge. We |
| 812 | * need the smaller start_time_ns of the merged requests to be the |
| 813 | * current request for accounting purposes. |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 814 | */ |
Omar Sandoval | 522a777 | 2018-05-09 02:08:53 -0700 | [diff] [blame] | 815 | if (next->start_time_ns < req->start_time_ns) |
| 816 | req->start_time_ns = next->start_time_ns; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 817 | |
| 818 | req->biotail->bi_next = next->bio; |
| 819 | req->biotail = next->biotail; |
| 820 | |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 821 | req->__data_len += blk_rq_bytes(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 822 | |
Jens Axboe | 445251d | 2018-02-01 14:01:02 -0700 | [diff] [blame] | 823 | if (req_op(req) != REQ_OP_DISCARD) |
| 824 | elv_merge_requests(q, req, next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 825 | |
Jerome Marchand | 42dad76 | 2009-04-22 14:01:49 +0200 | [diff] [blame] | 826 | /* |
| 827 | * 'next' is going away, so update stats accordingly |
| 828 | */ |
| 829 | blk_account_io_merge(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 830 | |
| 831 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); |
Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 832 | if (blk_rq_cpu_valid(next)) |
| 833 | req->cpu = next->cpu; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 834 | |
Jens Axboe | e4d750c | 2017-02-03 09:48:28 -0700 | [diff] [blame] | 835 | /* |
| 836 | * ownership of bio passed from next to req, return 'next' for |
| 837 | * the caller to free |
| 838 | */ |
Boaz Harrosh | 1cd96c2 | 2009-03-24 12:35:07 +0100 | [diff] [blame] | 839 | next->bio = NULL; |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 840 | return next; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 841 | } |
| 842 | |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 843 | struct request *attempt_back_merge(struct request_queue *q, struct request *rq) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 844 | { |
| 845 | struct request *next = elv_latter_request(q, rq); |
| 846 | |
| 847 | if (next) |
| 848 | return attempt_merge(q, rq, next); |
| 849 | |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 850 | return NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 851 | } |
| 852 | |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 853 | struct request *attempt_front_merge(struct request_queue *q, struct request *rq) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 854 | { |
| 855 | struct request *prev = elv_former_request(q, rq); |
| 856 | |
| 857 | if (prev) |
| 858 | return attempt_merge(q, prev, rq); |
| 859 | |
Jens Axboe | b973cb7 | 2017-02-02 08:54:40 -0700 | [diff] [blame] | 860 | return NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 861 | } |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 862 | |
| 863 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
| 864 | struct request *next) |
| 865 | { |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 866 | struct elevator_queue *e = q->elevator; |
Jens Axboe | e4d750c | 2017-02-03 09:48:28 -0700 | [diff] [blame] | 867 | struct request *free; |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 868 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 869 | if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 870 | if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 871 | return 0; |
| 872 | |
Jens Axboe | e4d750c | 2017-02-03 09:48:28 -0700 | [diff] [blame] | 873 | free = attempt_merge(q, rq, next); |
| 874 | if (free) { |
| 875 | __blk_put_request(q, free); |
| 876 | return 1; |
| 877 | } |
| 878 | |
| 879 | return 0; |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 880 | } |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 881 | |
| 882 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) |
| 883 | { |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 884 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 885 | return false; |
| 886 | |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 887 | if (req_op(rq) != bio_op(bio)) |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 888 | return false; |
| 889 | |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 890 | /* different data direction or already started, don't merge */ |
| 891 | if (bio_data_dir(bio) != rq_data_dir(rq)) |
| 892 | return false; |
| 893 | |
| 894 | /* must be same device and not a special request */ |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 895 | if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq)) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 896 | return false; |
| 897 | |
| 898 | /* only merge integrity protected bio into ditto rq */ |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 899 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 900 | return false; |
| 901 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 902 | /* must be using the same buffer */ |
Mike Christie | 8fe0d47 | 2016-06-05 14:32:15 -0500 | [diff] [blame] | 903 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 904 | !blk_write_same_mergeable(rq->bio, bio)) |
| 905 | return false; |
| 906 | |
Jens Axboe | cb6934f | 2017-06-27 09:22:02 -0600 | [diff] [blame] | 907 | /* |
| 908 | * Don't allow merge of different write hints, or for a hint with |
| 909 | * non-hint IO. |
| 910 | */ |
| 911 | if (rq->write_hint != bio->bi_write_hint) |
| 912 | return false; |
| 913 | |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 914 | return true; |
| 915 | } |
| 916 | |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 917 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 918 | { |
Jianchao Wang | 69840466 | 2018-10-27 19:52:14 +0800 | [diff] [blame] | 919 | if (blk_discard_mergable(rq)) |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 920 | return ELEVATOR_DISCARD_MERGE; |
| 921 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 922 | return ELEVATOR_BACK_MERGE; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 923 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 924 | return ELEVATOR_FRONT_MERGE; |
| 925 | return ELEVATOR_NO_MERGE; |
| 926 | } |