Christoph Hellwig | 8c16567 | 2019-04-30 14:42:39 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Copyright (C) 2001 Jens Axboe <axboe@suse.de> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | */ |
| 5 | #ifndef __LINUX_BIO_H |
| 6 | #define __LINUX_BIO_H |
| 7 | |
| 8 | #include <linux/highmem.h> |
| 9 | #include <linux/mempool.h> |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 10 | #include <linux/ioprio.h> |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 11 | /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ |
| 12 | #include <linux/blk_types.h> |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #define BIO_DEBUG |
| 15 | |
| 16 | #ifdef BIO_DEBUG |
| 17 | #define BIO_BUG_ON BUG_ON |
| 18 | #else |
| 19 | #define BIO_BUG_ON |
| 20 | #endif |
| 21 | |
Alexey Dobriyan | d84a847 | 2006-06-25 05:49:32 -0700 | [diff] [blame] | 22 | #define BIO_MAX_PAGES 256 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Mike Christie | 43b62ce | 2016-06-05 14:32:20 -0500 | [diff] [blame] | 24 | #define bio_prio(bio) (bio)->bi_ioprio |
| 25 | #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 26 | |
Kent Overstreet | 4550dd6 | 2013-08-07 14:26:21 -0700 | [diff] [blame] | 27 | #define bio_iter_iovec(bio, iter) \ |
| 28 | bvec_iter_bvec((bio)->bi_io_vec, (iter)) |
| 29 | |
| 30 | #define bio_iter_page(bio, iter) \ |
| 31 | bvec_iter_page((bio)->bi_io_vec, (iter)) |
| 32 | #define bio_iter_len(bio, iter) \ |
| 33 | bvec_iter_len((bio)->bi_io_vec, (iter)) |
| 34 | #define bio_iter_offset(bio, iter) \ |
| 35 | bvec_iter_offset((bio)->bi_io_vec, (iter)) |
| 36 | |
| 37 | #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) |
| 38 | #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) |
| 39 | #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 40 | |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 41 | #define bio_multiple_segments(bio) \ |
| 42 | ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) |
Kent Overstreet | 38a72da | 2018-05-08 21:33:53 -0400 | [diff] [blame] | 43 | |
| 44 | #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) |
| 45 | #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) |
| 46 | |
| 47 | #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) |
| 48 | #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 49 | |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 50 | /* |
Christoph Hellwig | d384995 | 2016-11-01 07:40:11 -0600 | [diff] [blame] | 51 | * Return the data direction, READ or WRITE. |
| 52 | */ |
| 53 | #define bio_data_dir(bio) \ |
| 54 | (op_is_write(bio_op(bio)) ? WRITE : READ) |
| 55 | |
| 56 | /* |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 57 | * Check whether this bio carries any data or not. A NULL bio is allowed. |
| 58 | */ |
| 59 | static inline bool bio_has_data(struct bio *bio) |
| 60 | { |
| 61 | if (bio && |
| 62 | bio->bi_iter.bi_size && |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 63 | bio_op(bio) != REQ_OP_DISCARD && |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 64 | bio_op(bio) != REQ_OP_SECURE_ERASE && |
| 65 | bio_op(bio) != REQ_OP_WRITE_ZEROES) |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 66 | return true; |
| 67 | |
| 68 | return false; |
| 69 | } |
| 70 | |
Bart Van Assche | c1527c0 | 2020-05-18 21:07:35 -0700 | [diff] [blame] | 71 | static inline bool bio_no_advance_iter(const struct bio *bio) |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 72 | { |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 73 | return bio_op(bio) == REQ_OP_DISCARD || |
| 74 | bio_op(bio) == REQ_OP_SECURE_ERASE || |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 75 | bio_op(bio) == REQ_OP_WRITE_SAME || |
| 76 | bio_op(bio) == REQ_OP_WRITE_ZEROES; |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 77 | } |
| 78 | |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 79 | static inline bool bio_mergeable(struct bio *bio) |
| 80 | { |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 81 | if (bio->bi_opf & REQ_NOMERGE_FLAGS) |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 82 | return false; |
| 83 | |
| 84 | return true; |
| 85 | } |
| 86 | |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 87 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 88 | { |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 89 | if (bio_has_data(bio)) |
Kent Overstreet | a4ad39b1 | 2013-08-07 14:24:32 -0700 | [diff] [blame] | 90 | return bio_iovec(bio).bv_len; |
David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 91 | else /* dataless requests such as discard */ |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 92 | return bio->bi_iter.bi_size; |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | static inline void *bio_data(struct bio *bio) |
| 96 | { |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 97 | if (bio_has_data(bio)) |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 98 | return page_address(bio_page(bio)) + bio_offset(bio); |
| 99 | |
| 100 | return NULL; |
| 101 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | |
Ming Lei | 79d08f8 | 2019-07-01 15:14:46 +0800 | [diff] [blame] | 103 | /** |
| 104 | * bio_full - check if the bio is full |
| 105 | * @bio: bio to check |
| 106 | * @len: length of one segment to be added |
| 107 | * |
| 108 | * Return true if @bio is full and one segment with @len bytes can't be |
| 109 | * added to the bio, otherwise return false |
| 110 | */ |
| 111 | static inline bool bio_full(struct bio *bio, unsigned len) |
Christoph Hellwig | 0aa69fd | 2018-06-01 09:03:05 -0700 | [diff] [blame] | 112 | { |
Ming Lei | 79d08f8 | 2019-07-01 15:14:46 +0800 | [diff] [blame] | 113 | if (bio->bi_vcnt >= bio->bi_max_vecs) |
| 114 | return true; |
| 115 | |
| 116 | if (bio->bi_iter.bi_size > UINT_MAX - len) |
| 117 | return true; |
| 118 | |
| 119 | return false; |
Christoph Hellwig | 0aa69fd | 2018-06-01 09:03:05 -0700 | [diff] [blame] | 120 | } |
| 121 | |
Ming Lei | 1200e07f | 2019-04-08 19:02:38 +0800 | [diff] [blame] | 122 | static inline bool bio_next_segment(const struct bio *bio, |
| 123 | struct bvec_iter_all *iter) |
| 124 | { |
| 125 | if (iter->idx >= bio->bi_vcnt) |
| 126 | return false; |
| 127 | |
| 128 | bvec_advance(&bio->bi_io_vec[iter->idx], iter); |
| 129 | return true; |
| 130 | } |
Ming Lei | 6dc4f10 | 2019-02-15 19:13:19 +0800 | [diff] [blame] | 131 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | /* |
Kent Overstreet | d74c6d5 | 2013-02-06 12:23:11 -0800 | [diff] [blame] | 133 | * drivers should _never_ use the all version - the bio may have been split |
| 134 | * before it got to the driver and the driver won't own all of it |
| 135 | */ |
Christoph Hellwig | 2b070cf | 2019-04-25 09:03:00 +0200 | [diff] [blame] | 136 | #define bio_for_each_segment_all(bvl, bio, iter) \ |
| 137 | for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) |
Kent Overstreet | d74c6d5 | 2013-02-06 12:23:11 -0800 | [diff] [blame] | 138 | |
Bart Van Assche | c1527c0 | 2020-05-18 21:07:35 -0700 | [diff] [blame] | 139 | static inline void bio_advance_iter(const struct bio *bio, |
| 140 | struct bvec_iter *iter, unsigned int bytes) |
Kent Overstreet | 4550dd6 | 2013-08-07 14:26:21 -0700 | [diff] [blame] | 141 | { |
| 142 | iter->bi_sector += bytes >> 9; |
| 143 | |
Ming Lei | 7759eb2 | 2018-09-05 15:45:54 -0600 | [diff] [blame] | 144 | if (bio_no_advance_iter(bio)) |
Kent Overstreet | 4550dd6 | 2013-08-07 14:26:21 -0700 | [diff] [blame] | 145 | iter->bi_size -= bytes; |
Ming Lei | 7759eb2 | 2018-09-05 15:45:54 -0600 | [diff] [blame] | 146 | else |
Kent Overstreet | 4550dd6 | 2013-08-07 14:26:21 -0700 | [diff] [blame] | 147 | bvec_iter_advance(bio->bi_io_vec, iter, bytes); |
Dmitry Monakhov | b1fb2c5 | 2017-06-29 11:31:13 -0700 | [diff] [blame] | 148 | /* TODO: It is reasonable to complete bio with error here. */ |
Dmitry Monakhov | f9df1cd | 2017-06-29 11:31:14 -0700 | [diff] [blame] | 149 | } |
| 150 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 151 | #define __bio_for_each_segment(bvl, bio, iter, start) \ |
| 152 | for (iter = (start); \ |
Kent Overstreet | 4550dd6 | 2013-08-07 14:26:21 -0700 | [diff] [blame] | 153 | (iter).bi_size && \ |
| 154 | ((bvl = bio_iter_iovec((bio), (iter))), 1); \ |
| 155 | bio_advance_iter((bio), &(iter), (bvl).bv_len)) |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 156 | |
| 157 | #define bio_for_each_segment(bvl, bio, iter) \ |
| 158 | __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) |
| 159 | |
Ming Lei | d18d917 | 2019-02-15 19:13:11 +0800 | [diff] [blame] | 160 | #define __bio_for_each_bvec(bvl, bio, iter, start) \ |
| 161 | for (iter = (start); \ |
| 162 | (iter).bi_size && \ |
| 163 | ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ |
| 164 | bio_advance_iter((bio), &(iter), (bvl).bv_len)) |
| 165 | |
| 166 | /* iterate over multi-page bvec */ |
| 167 | #define bio_for_each_bvec(bvl, bio, iter) \ |
| 168 | __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) |
| 169 | |
Omar Sandoval | 1072c12 | 2020-04-16 14:46:11 -0700 | [diff] [blame] | 170 | /* |
| 171 | * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the |
| 172 | * same reasons as bio_for_each_segment_all(). |
| 173 | */ |
| 174 | #define bio_for_each_bvec_all(bvl, bio, i) \ |
| 175 | for (i = 0, bvl = bio_first_bvec_all(bio); \ |
| 176 | i < (bio)->bi_vcnt; i++, bvl++) \ |
| 177 | |
Kent Overstreet | 4550dd6 | 2013-08-07 14:26:21 -0700 | [diff] [blame] | 178 | #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | |
Shaohua Li | f459587 | 2017-03-24 10:34:43 -0700 | [diff] [blame] | 180 | static inline unsigned bio_segments(struct bio *bio) |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 181 | { |
| 182 | unsigned segs = 0; |
| 183 | struct bio_vec bv; |
| 184 | struct bvec_iter iter; |
| 185 | |
Kent Overstreet | 8423ae3 | 2014-02-10 17:45:50 -0800 | [diff] [blame] | 186 | /* |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 187 | * We special case discard/write same/write zeroes, because they |
| 188 | * interpret bi_size differently: |
Kent Overstreet | 8423ae3 | 2014-02-10 17:45:50 -0800 | [diff] [blame] | 189 | */ |
| 190 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 191 | switch (bio_op(bio)) { |
| 192 | case REQ_OP_DISCARD: |
| 193 | case REQ_OP_SECURE_ERASE: |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 194 | case REQ_OP_WRITE_ZEROES: |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 195 | return 0; |
| 196 | case REQ_OP_WRITE_SAME: |
Kent Overstreet | 8423ae3 | 2014-02-10 17:45:50 -0800 | [diff] [blame] | 197 | return 1; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 198 | default: |
| 199 | break; |
| 200 | } |
Kent Overstreet | 8423ae3 | 2014-02-10 17:45:50 -0800 | [diff] [blame] | 201 | |
Shaohua Li | f459587 | 2017-03-24 10:34:43 -0700 | [diff] [blame] | 202 | bio_for_each_segment(bv, bio, iter) |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 203 | segs++; |
| 204 | |
| 205 | return segs; |
| 206 | } |
| 207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | /* |
| 209 | * get a reference to a bio, so it won't disappear. the intended use is |
| 210 | * something like: |
| 211 | * |
| 212 | * bio_get(bio); |
| 213 | * submit_bio(rw, bio); |
| 214 | * if (bio->bi_flags ...) |
| 215 | * do_something |
| 216 | * bio_put(bio); |
| 217 | * |
| 218 | * without the bio_get(), it could potentially complete I/O before submit_bio |
| 219 | * returns. and then bio would be freed memory when if (bio->bi_flags ...) |
| 220 | * runs |
| 221 | */ |
Jens Axboe | dac5621 | 2015-04-17 16:23:59 -0600 | [diff] [blame] | 222 | static inline void bio_get(struct bio *bio) |
| 223 | { |
| 224 | bio->bi_flags |= (1 << BIO_REFFED); |
| 225 | smp_mb__before_atomic(); |
| 226 | atomic_inc(&bio->__bi_cnt); |
| 227 | } |
| 228 | |
| 229 | static inline void bio_cnt_set(struct bio *bio, unsigned int count) |
| 230 | { |
| 231 | if (count != 1) { |
| 232 | bio->bi_flags |= (1 << BIO_REFFED); |
Andrea Parri | f381c6a | 2019-05-20 19:23:56 +0200 | [diff] [blame] | 233 | smp_mb(); |
Jens Axboe | dac5621 | 2015-04-17 16:23:59 -0600 | [diff] [blame] | 234 | } |
| 235 | atomic_set(&bio->__bi_cnt, count); |
| 236 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | |
Jens Axboe | b7c44ed | 2015-07-24 12:37:59 -0600 | [diff] [blame] | 238 | static inline bool bio_flagged(struct bio *bio, unsigned int bit) |
| 239 | { |
Jens Axboe | 2c68f6d | 2015-07-28 13:14:32 -0600 | [diff] [blame] | 240 | return (bio->bi_flags & (1U << bit)) != 0; |
Jens Axboe | b7c44ed | 2015-07-24 12:37:59 -0600 | [diff] [blame] | 241 | } |
| 242 | |
| 243 | static inline void bio_set_flag(struct bio *bio, unsigned int bit) |
| 244 | { |
Jens Axboe | 2c68f6d | 2015-07-28 13:14:32 -0600 | [diff] [blame] | 245 | bio->bi_flags |= (1U << bit); |
Jens Axboe | b7c44ed | 2015-07-24 12:37:59 -0600 | [diff] [blame] | 246 | } |
| 247 | |
| 248 | static inline void bio_clear_flag(struct bio *bio, unsigned int bit) |
| 249 | { |
Jens Axboe | 2c68f6d | 2015-07-28 13:14:32 -0600 | [diff] [blame] | 250 | bio->bi_flags &= ~(1U << bit); |
Jens Axboe | b7c44ed | 2015-07-24 12:37:59 -0600 | [diff] [blame] | 251 | } |
| 252 | |
Ming Lei | 7bcd79a | 2016-02-26 23:40:50 +0800 | [diff] [blame] | 253 | static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) |
| 254 | { |
| 255 | *bv = bio_iovec(bio); |
| 256 | } |
| 257 | |
| 258 | static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) |
| 259 | { |
| 260 | struct bvec_iter iter = bio->bi_iter; |
| 261 | int idx; |
| 262 | |
Ming Lei | 7bcd79a | 2016-02-26 23:40:50 +0800 | [diff] [blame] | 263 | if (unlikely(!bio_multiple_segments(bio))) { |
| 264 | *bv = bio_iovec(bio); |
| 265 | return; |
| 266 | } |
| 267 | |
| 268 | bio_advance_iter(bio, &iter, iter.bi_size); |
| 269 | |
| 270 | if (!iter.bi_bvec_done) |
| 271 | idx = iter.bi_idx - 1; |
| 272 | else /* in the middle of bvec */ |
| 273 | idx = iter.bi_idx; |
| 274 | |
| 275 | *bv = bio->bi_io_vec[idx]; |
| 276 | |
| 277 | /* |
| 278 | * iter.bi_bvec_done records actual length of the last bvec |
| 279 | * if this bio ends in the middle of one io vector |
| 280 | */ |
| 281 | if (iter.bi_bvec_done) |
| 282 | bv->bv_len = iter.bi_bvec_done; |
| 283 | } |
| 284 | |
Ming Lei | 86292ab | 2017-12-18 20:22:03 +0800 | [diff] [blame] | 285 | static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) |
| 286 | { |
| 287 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
| 288 | return bio->bi_io_vec; |
| 289 | } |
| 290 | |
| 291 | static inline struct page *bio_first_page_all(struct bio *bio) |
| 292 | { |
| 293 | return bio_first_bvec_all(bio)->bv_page; |
| 294 | } |
| 295 | |
| 296 | static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) |
| 297 | { |
| 298 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
| 299 | return &bio->bi_io_vec[bio->bi_vcnt - 1]; |
| 300 | } |
| 301 | |
Martin K. Petersen | c611529 | 2014-09-26 19:20:08 -0400 | [diff] [blame] | 302 | enum bip_flags { |
| 303 | BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ |
| 304 | BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ |
| 305 | BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ |
| 306 | BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ |
| 307 | BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ |
| 308 | }; |
| 309 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 310 | /* |
| 311 | * bio integrity payload |
| 312 | */ |
| 313 | struct bio_integrity_payload { |
| 314 | struct bio *bip_bio; /* parent bio */ |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 315 | |
Kent Overstreet | d57a5f7 | 2013-11-23 17:20:16 -0800 | [diff] [blame] | 316 | struct bvec_iter bip_iter; |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 317 | |
Martin K. Petersen | 7878cba | 2009-06-26 15:37:49 +0200 | [diff] [blame] | 318 | unsigned short bip_slab; /* slab the bip came from */ |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 319 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ |
Gu Zheng | cbcd1054 | 2014-07-01 10:36:47 -0600 | [diff] [blame] | 320 | unsigned short bip_max_vcnt; /* integrity bio_vec slots */ |
Martin K. Petersen | b1f013885 | 2014-09-26 19:20:04 -0400 | [diff] [blame] | 321 | unsigned short bip_flags; /* control flags */ |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 322 | |
Ming Lei | 7759eb2 | 2018-09-05 15:45:54 -0600 | [diff] [blame] | 323 | struct bvec_iter bio_iter; /* for rewinding parent bio */ |
| 324 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 325 | struct work_struct bip_work; /* I/O completion */ |
Kent Overstreet | 6fda981 | 2012-10-12 13:18:27 -0700 | [diff] [blame] | 326 | |
| 327 | struct bio_vec *bip_vec; |
Gustavo A. R. Silva | 0a368bf | 2020-03-23 16:40:21 -0500 | [diff] [blame] | 328 | struct bio_vec bip_inline_vecs[];/* embedded bvec array */ |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 329 | }; |
Martin K. Petersen | 1859308 | 2014-09-26 19:20:01 -0400 | [diff] [blame] | 330 | |
Keith Busch | 06c1e39 | 2015-12-03 09:32:21 -0700 | [diff] [blame] | 331 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 332 | |
| 333 | static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) |
| 334 | { |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 335 | if (bio->bi_opf & REQ_INTEGRITY) |
Keith Busch | 06c1e39 | 2015-12-03 09:32:21 -0700 | [diff] [blame] | 336 | return bio->bi_integrity; |
| 337 | |
| 338 | return NULL; |
| 339 | } |
| 340 | |
Martin K. Petersen | c611529 | 2014-09-26 19:20:08 -0400 | [diff] [blame] | 341 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) |
| 342 | { |
| 343 | struct bio_integrity_payload *bip = bio_integrity(bio); |
| 344 | |
| 345 | if (bip) |
| 346 | return bip->bip_flags & flag; |
| 347 | |
| 348 | return false; |
| 349 | } |
Martin K. Petersen | b1f013885 | 2014-09-26 19:20:04 -0400 | [diff] [blame] | 350 | |
Martin K. Petersen | 1859308 | 2014-09-26 19:20:01 -0400 | [diff] [blame] | 351 | static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) |
| 352 | { |
| 353 | return bip->bip_iter.bi_sector; |
| 354 | } |
| 355 | |
| 356 | static inline void bip_set_seed(struct bio_integrity_payload *bip, |
| 357 | sector_t seed) |
| 358 | { |
| 359 | bip->bip_iter.bi_sector = seed; |
| 360 | } |
| 361 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 362 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | |
Kent Overstreet | 6678d83 | 2013-08-07 11:14:32 -0700 | [diff] [blame] | 364 | extern void bio_trim(struct bio *bio, int offset, int size); |
Kent Overstreet | 20d0189 | 2013-11-23 18:21:01 -0800 | [diff] [blame] | 365 | extern struct bio *bio_split(struct bio *bio, int sectors, |
| 366 | gfp_t gfp, struct bio_set *bs); |
| 367 | |
| 368 | /** |
| 369 | * bio_next_split - get next @sectors from a bio, splitting if necessary |
| 370 | * @bio: bio to split |
| 371 | * @sectors: number of sectors to split from the front of @bio |
| 372 | * @gfp: gfp mask |
| 373 | * @bs: bio set to allocate from |
| 374 | * |
| 375 | * Returns a bio representing the next @sectors of @bio - if the bio is smaller |
| 376 | * than @sectors, returns the original bio unchanged. |
| 377 | */ |
| 378 | static inline struct bio *bio_next_split(struct bio *bio, int sectors, |
| 379 | gfp_t gfp, struct bio_set *bs) |
| 380 | { |
| 381 | if (sectors >= bio_sectors(bio)) |
| 382 | return bio; |
| 383 | |
| 384 | return bio_split(bio, sectors, gfp, bs); |
| 385 | } |
| 386 | |
NeilBrown | 011067b | 2017-06-18 14:38:57 +1000 | [diff] [blame] | 387 | enum { |
| 388 | BIOSET_NEED_BVECS = BIT(0), |
NeilBrown | 47e0fb4 | 2017-06-18 14:38:57 +1000 | [diff] [blame] | 389 | BIOSET_NEED_RESCUER = BIT(1), |
NeilBrown | 011067b | 2017-06-18 14:38:57 +1000 | [diff] [blame] | 390 | }; |
Kent Overstreet | dad0852 | 2018-05-20 18:25:58 -0400 | [diff] [blame] | 391 | extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); |
| 392 | extern void bioset_exit(struct bio_set *); |
Kent Overstreet | 8aa6ba2 | 2018-05-08 21:33:50 -0400 | [diff] [blame] | 393 | extern int biovec_init_pool(mempool_t *pool, int pool_entries); |
Jens Axboe | 28e89fd9 | 2018-06-07 14:42:05 -0600 | [diff] [blame] | 394 | extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | |
Dan Carpenter | 7a88fa1 | 2017-03-23 13:24:55 +0300 | [diff] [blame] | 396 | extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | extern void bio_put(struct bio *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | |
Kent Overstreet | 59d276f | 2013-11-23 18:19:27 -0800 | [diff] [blame] | 399 | extern void __bio_clone_fast(struct bio *, struct bio *); |
| 400 | extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); |
Kent Overstreet | bf800ef | 2012-09-06 15:35:02 -0700 | [diff] [blame] | 401 | |
Kent Overstreet | f4f8154 | 2018-05-08 21:33:52 -0400 | [diff] [blame] | 402 | extern struct bio_set fs_bio_set; |
Kent Overstreet | 3f86a82 | 2012-09-06 15:35:01 -0700 | [diff] [blame] | 403 | |
| 404 | static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
| 405 | { |
Kent Overstreet | f4f8154 | 2018-05-08 21:33:52 -0400 | [diff] [blame] | 406 | return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); |
Kent Overstreet | 3f86a82 | 2012-09-06 15:35:01 -0700 | [diff] [blame] | 407 | } |
| 408 | |
| 409 | static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
| 410 | { |
| 411 | return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); |
| 412 | } |
| 413 | |
Christoph Hellwig | 1e3914d | 2016-11-01 07:40:12 -0600 | [diff] [blame] | 414 | extern blk_qc_t submit_bio(struct bio *); |
| 415 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 416 | extern void bio_endio(struct bio *); |
| 417 | |
| 418 | static inline void bio_io_error(struct bio *bio) |
| 419 | { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 420 | bio->bi_status = BLK_STS_IOERR; |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 421 | bio_endio(bio); |
| 422 | } |
| 423 | |
Goldwyn Rodrigues | 03a07c9 | 2017-06-20 07:05:46 -0500 | [diff] [blame] | 424 | static inline void bio_wouldblock_error(struct bio *bio) |
| 425 | { |
Jens Axboe | abb3046 | 2020-06-01 10:02:01 -0600 | [diff] [blame] | 426 | bio_set_flag(bio, BIO_QUIET); |
Goldwyn Rodrigues | 03a07c9 | 2017-06-20 07:05:46 -0500 | [diff] [blame] | 427 | bio->bi_status = BLK_STS_AGAIN; |
Kent Overstreet | bf800ef | 2012-09-06 15:35:02 -0700 | [diff] [blame] | 428 | bio_endio(bio); |
| 429 | } |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 430 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | struct request_queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 433 | extern int submit_bio_wait(struct bio *bio); |
Kent Overstreet | 054bdf6 | 2012-09-28 13:17:55 -0700 | [diff] [blame] | 434 | extern void bio_advance(struct bio *, unsigned); |
| 435 | |
Ming Lei | 3a83f46 | 2016-11-22 08:57:21 -0700 | [diff] [blame] | 436 | extern void bio_init(struct bio *bio, struct bio_vec *table, |
| 437 | unsigned short max_vecs); |
Jens Axboe | 9ae3b3f5 | 2017-06-28 15:30:13 -0600 | [diff] [blame] | 438 | extern void bio_uninit(struct bio *); |
Kent Overstreet | f44b48c | 2012-09-06 15:34:58 -0700 | [diff] [blame] | 439 | extern void bio_reset(struct bio *); |
Kent Overstreet | 196d38bc | 2013-11-23 18:34:15 -0800 | [diff] [blame] | 440 | void bio_chain(struct bio *, struct bio *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | |
| 442 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); |
Mike Christie | 6e68af6 | 2005-11-11 05:30:27 -0600 | [diff] [blame] | 443 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
| 444 | unsigned int, unsigned int); |
Christoph Hellwig | 0aa69fd | 2018-06-01 09:03:05 -0700 | [diff] [blame] | 445 | bool __bio_try_merge_page(struct bio *bio, struct page *page, |
Christoph Hellwig | ff89673 | 2019-06-17 11:14:11 +0200 | [diff] [blame] | 446 | unsigned int len, unsigned int off, bool *same_page); |
Christoph Hellwig | 0aa69fd | 2018-06-01 09:03:05 -0700 | [diff] [blame] | 447 | void __bio_add_page(struct bio *bio, struct page *page, |
| 448 | unsigned int len, unsigned int off); |
Kent Overstreet | 2cefe4d | 2016-10-31 11:59:24 -0600 | [diff] [blame] | 449 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); |
Christoph Hellwig | d241a95 | 2019-06-26 15:49:21 +0200 | [diff] [blame] | 450 | void bio_release_pages(struct bio *bio, bool mark_dirty); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | extern void bio_set_pages_dirty(struct bio *bio); |
| 452 | extern void bio_check_pages_dirty(struct bio *bio); |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 453 | |
Kent Overstreet | 45db54d | 2018-05-08 21:33:54 -0400 | [diff] [blame] | 454 | extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
| 455 | struct bio *src, struct bvec_iter *src_iter); |
Kent Overstreet | 16ac3d6 | 2012-09-10 13:57:51 -0700 | [diff] [blame] | 456 | extern void bio_copy_data(struct bio *dst, struct bio *src); |
Kent Overstreet | 45db54d | 2018-05-08 21:33:54 -0400 | [diff] [blame] | 457 | extern void bio_list_copy_data(struct bio *dst, struct bio *src); |
Guoqing Jiang | 491221f | 2016-09-22 03:10:01 -0400 | [diff] [blame] | 458 | extern void bio_free_pages(struct bio *bio); |
Kent Overstreet | 38a72da | 2018-05-08 21:33:53 -0400 | [diff] [blame] | 459 | void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); |
Ming Lei | 85a8ce6 | 2019-12-28 07:05:48 +0800 | [diff] [blame] | 460 | void bio_truncate(struct bio *bio, unsigned new_size); |
Christoph Hellwig | 29125ed | 2020-03-25 16:48:40 +0100 | [diff] [blame] | 461 | void guard_bio_eod(struct bio *bio); |
Kent Overstreet | 38a72da | 2018-05-08 21:33:53 -0400 | [diff] [blame] | 462 | |
| 463 | static inline void zero_fill_bio(struct bio *bio) |
| 464 | { |
| 465 | zero_fill_bio_iter(bio, bio->bi_iter); |
| 466 | } |
| 467 | |
Kent Overstreet | 9f060e2 | 2012-10-12 15:29:33 -0700 | [diff] [blame] | 468 | extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); |
| 469 | extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 470 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
Jiufei Xue | 9c0fb1e | 2018-02-27 20:10:18 +0800 | [diff] [blame] | 471 | extern const char *bio_devname(struct bio *bio, char *buffer); |
Martin K. Petersen | 51d654e | 2008-06-17 18:59:56 +0200 | [diff] [blame] | 472 | |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 473 | #define bio_set_dev(bio, bdev) \ |
| 474 | do { \ |
Shaohua Li | 111be88 | 2017-12-20 11:10:17 -0700 | [diff] [blame] | 475 | if ((bio)->bi_disk != (bdev)->bd_disk) \ |
| 476 | bio_clear_flag(bio, BIO_THROTTLED);\ |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 477 | (bio)->bi_disk = (bdev)->bd_disk; \ |
| 478 | (bio)->bi_partno = (bdev)->bd_partno; \ |
Dennis Zhou | 5cdf2e3 | 2018-12-05 12:10:31 -0500 | [diff] [blame] | 479 | bio_associate_blkg(bio); \ |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 480 | } while (0) |
| 481 | |
| 482 | #define bio_copy_dev(dst, src) \ |
| 483 | do { \ |
| 484 | (dst)->bi_disk = (src)->bi_disk; \ |
| 485 | (dst)->bi_partno = (src)->bi_partno; \ |
Dennis Zhou | db6638d | 2018-12-05 12:10:35 -0500 | [diff] [blame] | 486 | bio_clone_blkg_association(dst, src); \ |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 487 | } while (0) |
| 488 | |
| 489 | #define bio_dev(bio) \ |
| 490 | disk_devt((bio)->bi_disk) |
| 491 | |
Tejun Heo | 852c788 | 2012-03-05 13:15:27 -0800 | [diff] [blame] | 492 | #ifdef CONFIG_BLK_CGROUP |
Dennis Zhou | 2268c0f | 2018-12-05 12:10:29 -0500 | [diff] [blame] | 493 | void bio_associate_blkg(struct bio *bio); |
Dennis Zhou | fd42df3 | 2018-12-05 12:10:34 -0500 | [diff] [blame] | 494 | void bio_associate_blkg_from_css(struct bio *bio, |
| 495 | struct cgroup_subsys_state *css); |
Dennis Zhou | db6638d | 2018-12-05 12:10:35 -0500 | [diff] [blame] | 496 | void bio_clone_blkg_association(struct bio *dst, struct bio *src); |
Tejun Heo | 852c788 | 2012-03-05 13:15:27 -0800 | [diff] [blame] | 497 | #else /* CONFIG_BLK_CGROUP */ |
Dennis Zhou | 2268c0f | 2018-12-05 12:10:29 -0500 | [diff] [blame] | 498 | static inline void bio_associate_blkg(struct bio *bio) { } |
Dennis Zhou | fd42df3 | 2018-12-05 12:10:34 -0500 | [diff] [blame] | 499 | static inline void bio_associate_blkg_from_css(struct bio *bio, |
| 500 | struct cgroup_subsys_state *css) |
| 501 | { } |
Dennis Zhou | db6638d | 2018-12-05 12:10:35 -0500 | [diff] [blame] | 502 | static inline void bio_clone_blkg_association(struct bio *dst, |
| 503 | struct bio *src) { } |
Tejun Heo | 852c788 | 2012-03-05 13:15:27 -0800 | [diff] [blame] | 504 | #endif /* CONFIG_BLK_CGROUP */ |
| 505 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | #ifdef CONFIG_HIGHMEM |
| 507 | /* |
Alberto Bertogli | 20b636b | 2009-02-02 12:41:07 +0100 | [diff] [blame] | 508 | * remember never ever reenable interrupts between a bvec_kmap_irq and |
| 509 | * bvec_kunmap_irq! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | */ |
Alberto Bertogli | 4f570f9 | 2009-11-02 11:40:16 +0100 | [diff] [blame] | 511 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | { |
| 513 | unsigned long addr; |
| 514 | |
| 515 | /* |
| 516 | * might not be a highmem page, but the preempt/irq count |
| 517 | * balancing is a lot nicer this way |
| 518 | */ |
| 519 | local_irq_save(*flags); |
Cong Wang | e8e3c3d | 2011-11-25 23:14:27 +0800 | [diff] [blame] | 520 | addr = (unsigned long) kmap_atomic(bvec->bv_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | |
| 522 | BUG_ON(addr & ~PAGE_MASK); |
| 523 | |
| 524 | return (char *) addr + bvec->bv_offset; |
| 525 | } |
| 526 | |
Alberto Bertogli | 4f570f9 | 2009-11-02 11:40:16 +0100 | [diff] [blame] | 527 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | { |
| 529 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; |
| 530 | |
Cong Wang | e8e3c3d | 2011-11-25 23:14:27 +0800 | [diff] [blame] | 531 | kunmap_atomic((void *) ptr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | local_irq_restore(*flags); |
| 533 | } |
| 534 | |
| 535 | #else |
Geert Uytterhoeven | 11a691b | 2010-10-21 10:32:29 +0200 | [diff] [blame] | 536 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
| 537 | { |
| 538 | return page_address(bvec->bv_page) + bvec->bv_offset; |
| 539 | } |
| 540 | |
| 541 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
| 542 | { |
| 543 | *flags = 0; |
| 544 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | #endif |
| 546 | |
Jens Axboe | 7a67f63 | 2008-08-08 11:17:12 +0200 | [diff] [blame] | 547 | /* |
Akinobu Mita | e686307 | 2009-04-17 08:41:21 +0200 | [diff] [blame] | 548 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
Christoph Hellwig | 8f3d8ba | 2009-04-07 19:55:13 +0200 | [diff] [blame] | 549 | * |
| 550 | * A bio_list anchors a singly-linked list of bios chained through the bi_next |
| 551 | * member of the bio. The bio_list also caches the last list member to allow |
| 552 | * fast access to the tail. |
| 553 | */ |
| 554 | struct bio_list { |
| 555 | struct bio *head; |
| 556 | struct bio *tail; |
| 557 | }; |
| 558 | |
| 559 | static inline int bio_list_empty(const struct bio_list *bl) |
| 560 | { |
| 561 | return bl->head == NULL; |
| 562 | } |
| 563 | |
| 564 | static inline void bio_list_init(struct bio_list *bl) |
| 565 | { |
| 566 | bl->head = bl->tail = NULL; |
| 567 | } |
| 568 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 569 | #define BIO_EMPTY_LIST { NULL, NULL } |
| 570 | |
Christoph Hellwig | 8f3d8ba | 2009-04-07 19:55:13 +0200 | [diff] [blame] | 571 | #define bio_list_for_each(bio, bl) \ |
| 572 | for (bio = (bl)->head; bio; bio = bio->bi_next) |
| 573 | |
| 574 | static inline unsigned bio_list_size(const struct bio_list *bl) |
| 575 | { |
| 576 | unsigned sz = 0; |
| 577 | struct bio *bio; |
| 578 | |
| 579 | bio_list_for_each(bio, bl) |
| 580 | sz++; |
| 581 | |
| 582 | return sz; |
| 583 | } |
| 584 | |
| 585 | static inline void bio_list_add(struct bio_list *bl, struct bio *bio) |
| 586 | { |
| 587 | bio->bi_next = NULL; |
| 588 | |
| 589 | if (bl->tail) |
| 590 | bl->tail->bi_next = bio; |
| 591 | else |
| 592 | bl->head = bio; |
| 593 | |
| 594 | bl->tail = bio; |
| 595 | } |
| 596 | |
| 597 | static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) |
| 598 | { |
| 599 | bio->bi_next = bl->head; |
| 600 | |
| 601 | bl->head = bio; |
| 602 | |
| 603 | if (!bl->tail) |
| 604 | bl->tail = bio; |
| 605 | } |
| 606 | |
| 607 | static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) |
| 608 | { |
| 609 | if (!bl2->head) |
| 610 | return; |
| 611 | |
| 612 | if (bl->tail) |
| 613 | bl->tail->bi_next = bl2->head; |
| 614 | else |
| 615 | bl->head = bl2->head; |
| 616 | |
| 617 | bl->tail = bl2->tail; |
| 618 | } |
| 619 | |
| 620 | static inline void bio_list_merge_head(struct bio_list *bl, |
| 621 | struct bio_list *bl2) |
| 622 | { |
| 623 | if (!bl2->head) |
| 624 | return; |
| 625 | |
| 626 | if (bl->head) |
| 627 | bl2->tail->bi_next = bl->head; |
| 628 | else |
| 629 | bl->tail = bl2->tail; |
| 630 | |
| 631 | bl->head = bl2->head; |
| 632 | } |
| 633 | |
Geert Uytterhoeven | 13685a1 | 2009-06-10 04:38:40 +0000 | [diff] [blame] | 634 | static inline struct bio *bio_list_peek(struct bio_list *bl) |
| 635 | { |
| 636 | return bl->head; |
| 637 | } |
| 638 | |
Christoph Hellwig | 8f3d8ba | 2009-04-07 19:55:13 +0200 | [diff] [blame] | 639 | static inline struct bio *bio_list_pop(struct bio_list *bl) |
| 640 | { |
| 641 | struct bio *bio = bl->head; |
| 642 | |
| 643 | if (bio) { |
| 644 | bl->head = bl->head->bi_next; |
| 645 | if (!bl->head) |
| 646 | bl->tail = NULL; |
| 647 | |
| 648 | bio->bi_next = NULL; |
| 649 | } |
| 650 | |
| 651 | return bio; |
| 652 | } |
| 653 | |
| 654 | static inline struct bio *bio_list_get(struct bio_list *bl) |
| 655 | { |
| 656 | struct bio *bio = bl->head; |
| 657 | |
| 658 | bl->head = bl->tail = NULL; |
| 659 | |
| 660 | return bio; |
| 661 | } |
| 662 | |
Kent Overstreet | 57fb233 | 2012-08-24 04:56:11 -0700 | [diff] [blame] | 663 | /* |
Mike Snitzer | 0ef5a50 | 2016-05-05 11:54:22 -0400 | [diff] [blame] | 664 | * Increment chain count for the bio. Make sure the CHAIN flag update |
| 665 | * is visible before the raised count. |
| 666 | */ |
| 667 | static inline void bio_inc_remaining(struct bio *bio) |
| 668 | { |
| 669 | bio_set_flag(bio, BIO_CHAIN); |
| 670 | smp_mb__before_atomic(); |
| 671 | atomic_inc(&bio->__bi_remaining); |
| 672 | } |
| 673 | |
| 674 | /* |
Kent Overstreet | 57fb233 | 2012-08-24 04:56:11 -0700 | [diff] [blame] | 675 | * bio_set is used to allow other portions of the IO system to |
| 676 | * allocate their own private memory pools for bio and iovec structures. |
| 677 | * These memory pools in turn all allocate from the bio_slab |
| 678 | * and the bvec_slabs[]. |
| 679 | */ |
| 680 | #define BIO_POOL_SIZE 2 |
Kent Overstreet | 57fb233 | 2012-08-24 04:56:11 -0700 | [diff] [blame] | 681 | |
| 682 | struct bio_set { |
| 683 | struct kmem_cache *bio_slab; |
| 684 | unsigned int front_pad; |
| 685 | |
Kent Overstreet | 8aa6ba2 | 2018-05-08 21:33:50 -0400 | [diff] [blame] | 686 | mempool_t bio_pool; |
| 687 | mempool_t bvec_pool; |
Kent Overstreet | 57fb233 | 2012-08-24 04:56:11 -0700 | [diff] [blame] | 688 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
Kent Overstreet | 8aa6ba2 | 2018-05-08 21:33:50 -0400 | [diff] [blame] | 689 | mempool_t bio_integrity_pool; |
| 690 | mempool_t bvec_integrity_pool; |
Kent Overstreet | 57fb233 | 2012-08-24 04:56:11 -0700 | [diff] [blame] | 691 | #endif |
Kent Overstreet | df2cb6d | 2012-09-10 14:33:46 -0700 | [diff] [blame] | 692 | |
| 693 | /* |
| 694 | * Deadlock avoidance for stacking block drivers: see comments in |
| 695 | * bio_alloc_bioset() for details |
| 696 | */ |
| 697 | spinlock_t rescue_lock; |
| 698 | struct bio_list rescue_list; |
| 699 | struct work_struct rescue_work; |
| 700 | struct workqueue_struct *rescue_workqueue; |
Kent Overstreet | 57fb233 | 2012-08-24 04:56:11 -0700 | [diff] [blame] | 701 | }; |
| 702 | |
| 703 | struct biovec_slab { |
| 704 | int nr_vecs; |
| 705 | char *name; |
| 706 | struct kmem_cache *slab; |
| 707 | }; |
| 708 | |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 709 | static inline bool bioset_initialized(struct bio_set *bs) |
| 710 | { |
| 711 | return bs->bio_slab != NULL; |
| 712 | } |
| 713 | |
Kent Overstreet | 57fb233 | 2012-08-24 04:56:11 -0700 | [diff] [blame] | 714 | /* |
| 715 | * a small number of entries is fine, not going to be performance critical. |
| 716 | * basically we just need to survive |
| 717 | */ |
| 718 | #define BIO_SPLIT_ENTRIES 2 |
| 719 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 720 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 721 | |
Kent Overstreet | d57a5f7 | 2013-11-23 17:20:16 -0800 | [diff] [blame] | 722 | #define bip_for_each_vec(bvl, bip, iter) \ |
| 723 | for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 724 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 725 | #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ |
| 726 | for_each_bio(_bio) \ |
| 727 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) |
| 728 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 729 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 730 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); |
Dmitry Monakhov | e23947b | 2017-06-29 11:31:11 -0700 | [diff] [blame] | 731 | extern bool bio_integrity_prep(struct bio *); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 732 | extern void bio_integrity_advance(struct bio *, unsigned int); |
Dmitry Monakhov | fbd08e7 | 2017-06-29 11:31:10 -0700 | [diff] [blame] | 733 | extern void bio_integrity_trim(struct bio *); |
Kent Overstreet | 1e2a410f | 2012-09-06 15:34:56 -0700 | [diff] [blame] | 734 | extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); |
Martin K. Petersen | 7878cba | 2009-06-26 15:37:49 +0200 | [diff] [blame] | 735 | extern int bioset_integrity_create(struct bio_set *, int); |
| 736 | extern void bioset_integrity_free(struct bio_set *); |
| 737 | extern void bio_integrity_init(void); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 738 | |
| 739 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 740 | |
Martin K. Petersen | c611529 | 2014-09-26 19:20:08 -0400 | [diff] [blame] | 741 | static inline void *bio_integrity(struct bio *bio) |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 742 | { |
Martin K. Petersen | c611529 | 2014-09-26 19:20:08 -0400 | [diff] [blame] | 743 | return NULL; |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 744 | } |
| 745 | |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 746 | static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) |
| 747 | { |
| 748 | return 0; |
| 749 | } |
| 750 | |
| 751 | static inline void bioset_integrity_free (struct bio_set *bs) |
| 752 | { |
| 753 | return; |
| 754 | } |
| 755 | |
Dmitry Monakhov | e23947b | 2017-06-29 11:31:11 -0700 | [diff] [blame] | 756 | static inline bool bio_integrity_prep(struct bio *bio) |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 757 | { |
Dmitry Monakhov | e23947b | 2017-06-29 11:31:11 -0700 | [diff] [blame] | 758 | return true; |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 759 | } |
| 760 | |
Stephen Rothwell | 0c614e2 | 2011-11-16 09:21:48 +0100 | [diff] [blame] | 761 | static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, |
Kent Overstreet | 1e2a410f | 2012-09-06 15:34:56 -0700 | [diff] [blame] | 762 | gfp_t gfp_mask) |
Stephen Rothwell | 0c614e2 | 2011-11-16 09:21:48 +0100 | [diff] [blame] | 763 | { |
| 764 | return 0; |
| 765 | } |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 766 | |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 767 | static inline void bio_integrity_advance(struct bio *bio, |
| 768 | unsigned int bytes_done) |
| 769 | { |
| 770 | return; |
| 771 | } |
| 772 | |
Dmitry Monakhov | fbd08e7 | 2017-06-29 11:31:10 -0700 | [diff] [blame] | 773 | static inline void bio_integrity_trim(struct bio *bio) |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 774 | { |
| 775 | return; |
| 776 | } |
| 777 | |
| 778 | static inline void bio_integrity_init(void) |
| 779 | { |
| 780 | return; |
| 781 | } |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 782 | |
Martin K. Petersen | c611529 | 2014-09-26 19:20:08 -0400 | [diff] [blame] | 783 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) |
| 784 | { |
| 785 | return false; |
| 786 | } |
| 787 | |
Keith Busch | 06c1e39 | 2015-12-03 09:32:21 -0700 | [diff] [blame] | 788 | static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, |
| 789 | unsigned int nr) |
| 790 | { |
| 791 | return ERR_PTR(-EINVAL); |
| 792 | } |
| 793 | |
| 794 | static inline int bio_integrity_add_page(struct bio *bio, struct page *page, |
| 795 | unsigned int len, unsigned int offset) |
| 796 | { |
| 797 | return 0; |
| 798 | } |
| 799 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 800 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 801 | |
Jens Axboe | 0bbb280 | 2018-12-21 09:10:46 -0700 | [diff] [blame] | 802 | /* |
| 803 | * Mark a bio as polled. Note that for async polled IO, the caller must |
| 804 | * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). |
| 805 | * We cannot block waiting for requests on polled IO, as those completions |
| 806 | * must be found by the caller. This is different than IRQ driven IO, where |
| 807 | * it's safe to wait for IO to complete. |
| 808 | */ |
| 809 | static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) |
| 810 | { |
| 811 | bio->bi_opf |= REQ_HIPRI; |
| 812 | if (!is_sync_kiocb(kiocb)) |
| 813 | bio->bi_opf |= REQ_NOWAIT; |
| 814 | } |
| 815 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | #endif /* __LINUX_BIO_H */ |