Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 2 | #ifndef BLK_INTERNAL_H |
| 3 | #define BLK_INTERNAL_H |
| 4 | |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 5 | #include <linux/blk-crypto.h> |
Christoph Hellwig | 9bb33f2 | 2021-03-31 09:30:00 +0200 | [diff] [blame] | 6 | #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ |
Christoph Hellwig | c39ae60 | 2018-09-25 13:30:08 -0700 | [diff] [blame] | 7 | #include <xen/xen.h> |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 8 | #include "blk-crypto-internal.h" |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 9 | |
Christoph Hellwig | 2e9bc34 | 2021-09-20 14:33:23 +0200 | [diff] [blame] | 10 | struct elevator_type; |
| 11 | |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 12 | /* Max future timer expiry for timeouts */ |
| 13 | #define BLK_MAX_TIMEOUT (5 * HZ) |
| 14 | |
Omar Sandoval | 18fbda9 | 2017-01-31 14:53:20 -0800 | [diff] [blame] | 15 | extern struct dentry *blk_debugfs_root; |
Omar Sandoval | 18fbda9 | 2017-01-31 14:53:20 -0800 | [diff] [blame] | 16 | |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 17 | struct blk_flush_queue { |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 18 | unsigned int flush_pending_idx:1; |
| 19 | unsigned int flush_running_idx:1; |
Yufen Yu | 8d69966 | 2019-09-27 16:19:55 +0800 | [diff] [blame] | 20 | blk_status_t rq_status; |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 21 | unsigned long flush_pending_since; |
| 22 | struct list_head flush_queue[2]; |
| 23 | struct list_head flush_data_in_flight; |
| 24 | struct request *flush_rq; |
Ming Lei | 0048b48 | 2015-08-09 03:41:51 -0400 | [diff] [blame] | 25 | |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 26 | spinlock_t mq_flush_lock; |
| 27 | }; |
| 28 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 29 | extern struct kmem_cache *blk_requestq_cachep; |
Ming Lei | 704b914 | 2021-12-03 21:15:32 +0800 | [diff] [blame] | 30 | extern struct kmem_cache *blk_requestq_srcu_cachep; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 31 | extern struct kobj_type blk_queue_ktype; |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 32 | extern struct ida blk_queue_ida; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 33 | |
Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 34 | static inline void __blk_get_queue(struct request_queue *q) |
| 35 | { |
| 36 | kobject_get(&q->kobj); |
| 37 | } |
| 38 | |
Ming Lei | a9ed27a | 2021-08-18 09:09:25 +0800 | [diff] [blame] | 39 | bool is_flush_rq(struct request *req); |
Yufen Yu | 8d69966 | 2019-09-27 16:19:55 +0800 | [diff] [blame] | 40 | |
Guoqing Jiang | 754a157 | 2020-03-09 22:41:37 +0100 | [diff] [blame] | 41 | struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, |
| 42 | gfp_t flags); |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 43 | void blk_free_flush_queue(struct blk_flush_queue *q); |
Ming Lei | f355265 | 2014-09-25 23:23:40 +0800 | [diff] [blame] | 44 | |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 45 | void blk_freeze_queue(struct request_queue *q); |
Christoph Hellwig | aec89dc | 2021-09-29 09:12:41 +0200 | [diff] [blame] | 46 | void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); |
Christoph Hellwig | 8e141f9 | 2021-09-29 09:12:40 +0200 | [diff] [blame] | 47 | void blk_queue_start_drain(struct request_queue *q); |
Jens Axboe | c98cb5b | 2021-11-04 12:45:51 -0600 | [diff] [blame] | 48 | int __bio_queue_enter(struct request_queue *q, struct bio *bio); |
Jens Axboe | 900e080 | 2021-11-03 05:47:09 -0600 | [diff] [blame] | 49 | bool submit_bio_checks(struct bio *bio); |
Jens Axboe | c98cb5b | 2021-11-04 12:45:51 -0600 | [diff] [blame] | 50 | |
| 51 | static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) |
| 52 | { |
| 53 | rcu_read_lock(); |
| 54 | if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) |
| 55 | goto fail; |
| 56 | |
| 57 | /* |
| 58 | * The code that increments the pm_only counter must ensure that the |
| 59 | * counter is globally visible before the queue is unfrozen. |
| 60 | */ |
| 61 | if (blk_queue_pm_only(q) && |
| 62 | (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) |
| 63 | goto fail_put; |
| 64 | |
| 65 | rcu_read_unlock(); |
| 66 | return true; |
| 67 | |
| 68 | fail_put: |
| 69 | blk_queue_exit(q); |
| 70 | fail: |
| 71 | rcu_read_unlock(); |
| 72 | return false; |
| 73 | } |
| 74 | |
| 75 | static inline int bio_queue_enter(struct bio *bio) |
| 76 | { |
| 77 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
| 78 | |
| 79 | if (blk_try_enter_queue(q, false)) |
| 80 | return 0; |
| 81 | return __bio_queue_enter(q, bio); |
| 82 | } |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 83 | |
Christoph Hellwig | dc0b8a57 | 2021-02-02 18:19:19 +0100 | [diff] [blame] | 84 | #define BIO_INLINE_VECS 4 |
Christoph Hellwig | 7a800a2 | 2021-02-02 18:19:29 +0100 | [diff] [blame] | 85 | struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, |
| 86 | gfp_t gfp_mask); |
| 87 | void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs); |
Ming Lei | eec716a | 2021-01-11 11:05:56 +0800 | [diff] [blame] | 88 | |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 89 | static inline bool biovec_phys_mergeable(struct request_queue *q, |
| 90 | struct bio_vec *vec1, struct bio_vec *vec2) |
Christoph Hellwig | 6a9f5f2 | 2018-09-24 09:43:50 +0200 | [diff] [blame] | 91 | { |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 92 | unsigned long mask = queue_segment_boundary(q); |
Christoph Hellwig | 6e76846 | 2018-09-24 09:43:53 +0200 | [diff] [blame] | 93 | phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; |
| 94 | phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 95 | |
| 96 | if (addr1 + vec1->bv_len != addr2) |
Christoph Hellwig | 6a9f5f2 | 2018-09-24 09:43:50 +0200 | [diff] [blame] | 97 | return false; |
Ming Lei | 0383ad4 | 2019-03-29 15:07:54 +0800 | [diff] [blame] | 98 | if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) |
Christoph Hellwig | 6a9f5f2 | 2018-09-24 09:43:50 +0200 | [diff] [blame] | 99 | return false; |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 100 | if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) |
| 101 | return false; |
Christoph Hellwig | 6a9f5f2 | 2018-09-24 09:43:50 +0200 | [diff] [blame] | 102 | return true; |
| 103 | } |
| 104 | |
Christoph Hellwig | 27ca1d4 | 2018-09-24 09:43:49 +0200 | [diff] [blame] | 105 | static inline bool __bvec_gap_to_prev(struct request_queue *q, |
| 106 | struct bio_vec *bprv, unsigned int offset) |
| 107 | { |
Johannes Thumshirn | df376b2 | 2018-11-07 14:58:14 +0100 | [diff] [blame] | 108 | return (offset & queue_virt_boundary(q)) || |
Christoph Hellwig | 27ca1d4 | 2018-09-24 09:43:49 +0200 | [diff] [blame] | 109 | ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); |
| 110 | } |
| 111 | |
| 112 | /* |
| 113 | * Check if adding a bio_vec after bprv with offset would create a gap in |
| 114 | * the SG list. Most drivers don't care about this, but some do. |
| 115 | */ |
| 116 | static inline bool bvec_gap_to_prev(struct request_queue *q, |
| 117 | struct bio_vec *bprv, unsigned int offset) |
| 118 | { |
| 119 | if (!queue_virt_boundary(q)) |
| 120 | return false; |
| 121 | return __bvec_gap_to_prev(q, bprv, offset); |
| 122 | } |
| 123 | |
Christoph Hellwig | badf7f6 | 2021-09-20 14:33:26 +0200 | [diff] [blame] | 124 | static inline bool rq_mergeable(struct request *rq) |
| 125 | { |
| 126 | if (blk_rq_is_passthrough(rq)) |
| 127 | return false; |
| 128 | |
| 129 | if (req_op(rq) == REQ_OP_FLUSH) |
| 130 | return false; |
| 131 | |
| 132 | if (req_op(rq) == REQ_OP_WRITE_ZEROES) |
| 133 | return false; |
| 134 | |
| 135 | if (req_op(rq) == REQ_OP_ZONE_APPEND) |
| 136 | return false; |
| 137 | |
| 138 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) |
| 139 | return false; |
| 140 | if (rq->rq_flags & RQF_NOMERGE_FLAGS) |
| 141 | return false; |
| 142 | |
| 143 | return true; |
| 144 | } |
| 145 | |
| 146 | /* |
| 147 | * There are two different ways to handle DISCARD merges: |
| 148 | * 1) If max_discard_segments > 1, the driver treats every bio as a range and |
| 149 | * send the bios to controller together. The ranges don't need to be |
| 150 | * contiguous. |
| 151 | * 2) Otherwise, the request will be normal read/write requests. The ranges |
| 152 | * need to be contiguous. |
| 153 | */ |
| 154 | static inline bool blk_discard_mergable(struct request *req) |
| 155 | { |
| 156 | if (req_op(req) == REQ_OP_DISCARD && |
| 157 | queue_max_discard_segments(req->q) > 1) |
| 158 | return true; |
| 159 | return false; |
| 160 | } |
| 161 | |
Dan Williams | 5a48fc1 | 2015-10-21 13:20:23 -0400 | [diff] [blame] | 162 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
| 163 | void blk_flush_integrity(void); |
Christoph Hellwig | 7c20f11 | 2017-07-03 16:58:43 -0600 | [diff] [blame] | 164 | bool __bio_integrity_endio(struct bio *); |
Justin Tee | ece841a | 2019-12-05 10:09:01 +0800 | [diff] [blame] | 165 | void bio_integrity_free(struct bio *bio); |
Christoph Hellwig | 7c20f11 | 2017-07-03 16:58:43 -0600 | [diff] [blame] | 166 | static inline bool bio_integrity_endio(struct bio *bio) |
| 167 | { |
| 168 | if (bio_integrity(bio)) |
| 169 | return __bio_integrity_endio(bio); |
| 170 | return true; |
| 171 | } |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 172 | |
Christoph Hellwig | 92cf2fd | 2020-10-06 09:07:17 +0200 | [diff] [blame] | 173 | bool blk_integrity_merge_rq(struct request_queue *, struct request *, |
| 174 | struct request *); |
Christoph Hellwig | d59da41 | 2020-10-06 09:07:18 +0200 | [diff] [blame] | 175 | bool blk_integrity_merge_bio(struct request_queue *, struct request *, |
| 176 | struct bio *); |
Christoph Hellwig | 92cf2fd | 2020-10-06 09:07:17 +0200 | [diff] [blame] | 177 | |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 178 | static inline bool integrity_req_gap_back_merge(struct request *req, |
| 179 | struct bio *next) |
| 180 | { |
| 181 | struct bio_integrity_payload *bip = bio_integrity(req->bio); |
| 182 | struct bio_integrity_payload *bip_next = bio_integrity(next); |
| 183 | |
| 184 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], |
| 185 | bip_next->bip_vec[0].bv_offset); |
| 186 | } |
| 187 | |
| 188 | static inline bool integrity_req_gap_front_merge(struct request *req, |
| 189 | struct bio *bio) |
| 190 | { |
| 191 | struct bio_integrity_payload *bip = bio_integrity(bio); |
| 192 | struct bio_integrity_payload *bip_next = bio_integrity(req->bio); |
| 193 | |
| 194 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], |
| 195 | bip_next->bip_vec[0].bv_offset); |
| 196 | } |
Christoph Hellwig | 581e260 | 2020-03-25 16:48:41 +0100 | [diff] [blame] | 197 | |
Luis Chamberlain | 614310c | 2021-08-18 16:45:38 +0200 | [diff] [blame] | 198 | int blk_integrity_add(struct gendisk *disk); |
Christoph Hellwig | 581e260 | 2020-03-25 16:48:41 +0100 | [diff] [blame] | 199 | void blk_integrity_del(struct gendisk *); |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 200 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
Christoph Hellwig | 92cf2fd | 2020-10-06 09:07:17 +0200 | [diff] [blame] | 201 | static inline bool blk_integrity_merge_rq(struct request_queue *rq, |
| 202 | struct request *r1, struct request *r2) |
| 203 | { |
| 204 | return true; |
| 205 | } |
Christoph Hellwig | d59da41 | 2020-10-06 09:07:18 +0200 | [diff] [blame] | 206 | static inline bool blk_integrity_merge_bio(struct request_queue *rq, |
| 207 | struct request *r, struct bio *b) |
| 208 | { |
| 209 | return true; |
| 210 | } |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 211 | static inline bool integrity_req_gap_back_merge(struct request *req, |
| 212 | struct bio *next) |
| 213 | { |
| 214 | return false; |
| 215 | } |
| 216 | static inline bool integrity_req_gap_front_merge(struct request *req, |
| 217 | struct bio *bio) |
| 218 | { |
| 219 | return false; |
| 220 | } |
| 221 | |
Dan Williams | 5a48fc1 | 2015-10-21 13:20:23 -0400 | [diff] [blame] | 222 | static inline void blk_flush_integrity(void) |
| 223 | { |
| 224 | } |
Christoph Hellwig | 7c20f11 | 2017-07-03 16:58:43 -0600 | [diff] [blame] | 225 | static inline bool bio_integrity_endio(struct bio *bio) |
| 226 | { |
| 227 | return true; |
| 228 | } |
Justin Tee | ece841a | 2019-12-05 10:09:01 +0800 | [diff] [blame] | 229 | static inline void bio_integrity_free(struct bio *bio) |
| 230 | { |
| 231 | } |
Luis Chamberlain | 614310c | 2021-08-18 16:45:38 +0200 | [diff] [blame] | 232 | static inline int blk_integrity_add(struct gendisk *disk) |
Christoph Hellwig | 581e260 | 2020-03-25 16:48:41 +0100 | [diff] [blame] | 233 | { |
Luis Chamberlain | 614310c | 2021-08-18 16:45:38 +0200 | [diff] [blame] | 234 | return 0; |
Christoph Hellwig | 581e260 | 2020-03-25 16:48:41 +0100 | [diff] [blame] | 235 | } |
| 236 | static inline void blk_integrity_del(struct gendisk *disk) |
| 237 | { |
| 238 | } |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 239 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 240 | |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 241 | unsigned long blk_rq_timeout(unsigned long timeout); |
Jens Axboe | 87ee7b1 | 2014-04-24 08:51:47 -0600 | [diff] [blame] | 242 | void blk_add_timer(struct request *req); |
Christoph Hellwig | 0d7a29a | 2021-11-17 07:14:03 +0100 | [diff] [blame] | 243 | const char *blk_status_to_str(blk_status_t status); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 244 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 245 | bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, |
Christoph Hellwig | 0c5bcc9 | 2021-11-23 17:04:41 +0100 | [diff] [blame] | 246 | unsigned int nr_segs); |
Baolin Wang | bdc6a287 | 2020-08-28 10:52:55 +0800 | [diff] [blame] | 247 | bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, |
| 248 | struct bio *bio, unsigned int nr_segs); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 249 | |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 250 | /* |
Jens Axboe | ba0ffdd | 2021-10-06 12:01:07 -0600 | [diff] [blame] | 251 | * Plug flush limits |
| 252 | */ |
| 253 | #define BLK_MAX_REQUEST_COUNT 32 |
| 254 | #define BLK_PLUG_FLUSH_SIZE (128 * 1024) |
| 255 | |
| 256 | /* |
Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 257 | * Internal elevator interface |
| 258 | */ |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 259 | #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) |
Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 260 | |
Ming Lei | 2b504bd | 2021-11-18 23:30:41 +0800 | [diff] [blame] | 261 | void blk_insert_flush(struct request *rq); |
Tejun Heo | dd83100 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 262 | |
Jianchao Wang | d48ece2 | 2018-08-21 15:15:03 +0800 | [diff] [blame] | 263 | int elevator_switch_mq(struct request_queue *q, |
| 264 | struct elevator_type *new_e); |
Christoph Hellwig | 0c6cb3a | 2021-11-23 19:53:07 +0100 | [diff] [blame] | 265 | void elevator_exit(struct request_queue *q); |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 266 | int elv_register_queue(struct request_queue *q, bool uevent); |
Bart Van Assche | 83d016a | 2018-01-17 11:48:08 -0800 | [diff] [blame] | 267 | void elv_unregister_queue(struct request_queue *q); |
| 268 | |
Christoph Hellwig | 3ad5cee | 2020-03-24 08:25:13 +0100 | [diff] [blame] | 269 | ssize_t part_size_show(struct device *dev, struct device_attribute *attr, |
| 270 | char *buf); |
| 271 | ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, |
| 272 | char *buf); |
| 273 | ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, |
| 274 | char *buf); |
| 275 | ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, |
| 276 | char *buf); |
| 277 | ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, |
| 278 | const char *buf, size_t count); |
Jens Axboe | 581d4e2 | 2008-09-14 05:56:33 -0700 | [diff] [blame] | 279 | ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); |
| 280 | ssize_t part_timeout_store(struct device *, struct device_attribute *, |
| 281 | const char *, size_t); |
Jens Axboe | 581d4e2 | 2008-09-14 05:56:33 -0700 | [diff] [blame] | 282 | |
Jens Axboe | abd45c1 | 2021-10-13 12:43:41 -0600 | [diff] [blame] | 283 | static inline bool blk_may_split(struct request_queue *q, struct bio *bio) |
| 284 | { |
| 285 | switch (bio_op(bio)) { |
| 286 | case REQ_OP_DISCARD: |
| 287 | case REQ_OP_SECURE_ERASE: |
| 288 | case REQ_OP_WRITE_ZEROES: |
| 289 | case REQ_OP_WRITE_SAME: |
| 290 | return true; /* non-trivial splitting decisions */ |
| 291 | default: |
| 292 | break; |
| 293 | } |
| 294 | |
| 295 | /* |
| 296 | * All drivers must accept single-segments bios that are <= PAGE_SIZE. |
| 297 | * This is a quick and dirty check that relies on the fact that |
| 298 | * bi_io_vec[0] is always valid if a bio has data. The check might |
| 299 | * lead to occasional false negatives when bios are cloned, but compared |
| 300 | * to the performance impact of cloned bios themselves the loop below |
| 301 | * doesn't matter anyway. |
| 302 | */ |
| 303 | return q->limits.chunk_sectors || bio->bi_vcnt != 1 || |
| 304 | bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; |
| 305 | } |
| 306 | |
| 307 | void __blk_queue_split(struct request_queue *q, struct bio **bio, |
| 308 | unsigned int *nr_segs); |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 309 | int ll_back_merge_fn(struct request *req, struct bio *bio, |
| 310 | unsigned int nr_segs); |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 311 | bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 312 | struct request *next); |
Christoph Hellwig | e9cd19c | 2019-06-06 12:29:02 +0200 | [diff] [blame] | 313 | unsigned int blk_recalc_rq_segments(struct request *rq); |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 314 | void blk_rq_set_mixed_merge(struct request *rq); |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 315 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio); |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 316 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 317 | |
Adrian Bunk | ff88972 | 2008-03-04 11:23:45 +0100 | [diff] [blame] | 318 | int blk_dev_init(void); |
| 319 | |
Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 320 | /* |
| 321 | * Contribute to IO statistics IFF: |
| 322 | * |
| 323 | * a) it's attached to a gendisk, and |
Logan Gunthorpe | 48d9b0d | 2019-10-10 17:36:26 -0600 | [diff] [blame] | 324 | * b) the queue had IO stats enabled when this request was started |
Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 325 | */ |
Chengguang Xu | 599d067 | 2018-08-16 22:51:40 +0800 | [diff] [blame] | 326 | static inline bool blk_do_io_stat(struct request *rq) |
Jens Axboe | fb8ec18 | 2009-02-02 08:42:32 +0100 | [diff] [blame] | 327 | { |
Christoph Hellwig | f3fa33a | 2021-11-26 13:18:00 +0100 | [diff] [blame] | 328 | return (rq->rq_flags & RQF_IO_STAT) && rq->q->disk; |
Pavel Begunkov | be6bfe3 | 2021-10-09 13:25:41 +0100 | [diff] [blame] | 329 | } |
| 330 | |
Christoph Hellwig | 450b787 | 2021-11-17 07:14:01 +0100 | [diff] [blame] | 331 | void update_io_ticks(struct block_device *part, unsigned long now, bool end); |
Jens Axboe | fb8ec18 | 2009-02-02 08:42:32 +0100 | [diff] [blame] | 332 | |
Christoph Hellwig | 6cf7677 | 2017-02-08 14:46:47 +0100 | [diff] [blame] | 333 | static inline void req_set_nomerge(struct request_queue *q, struct request *req) |
| 334 | { |
| 335 | req->cmd_flags |= REQ_NOMERGE; |
| 336 | if (req == q->last_merge) |
| 337 | q->last_merge = NULL; |
| 338 | } |
| 339 | |
Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 340 | /* |
Ming Lei | 1adfc5e | 2018-10-29 20:57:17 +0800 | [diff] [blame] | 341 | * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size |
| 342 | * is defined as 'unsigned int', meantime it has to aligned to with logical |
| 343 | * block size which is the minimum accepted unit by hardware. |
| 344 | */ |
| 345 | static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) |
| 346 | { |
| 347 | return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; |
| 348 | } |
| 349 | |
| 350 | /* |
Coly Li | 9b15d10 | 2020-07-17 10:42:30 +0800 | [diff] [blame] | 351 | * The max bio size which is aligned to q->limits.discard_granularity. This |
| 352 | * is a hint to split large discard bio in generic block layer, then if device |
| 353 | * driver needs to split the discard bio into smaller ones, their bi_size can |
| 354 | * be very probably and easily aligned to discard_granularity of the device's |
| 355 | * queue. |
| 356 | */ |
| 357 | static inline unsigned int bio_aligned_discard_max_sectors( |
| 358 | struct request_queue *q) |
| 359 | { |
| 360 | return round_down(UINT_MAX, q->limits.discard_granularity) >> |
| 361 | SECTOR_SHIFT; |
| 362 | } |
| 363 | |
| 364 | /* |
Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 365 | * Internal io_context interface |
| 366 | */ |
Christoph Hellwig | 87dd1d6 | 2021-11-26 12:58:10 +0100 | [diff] [blame] | 367 | struct io_cq *ioc_find_get_icq(struct request_queue *q); |
Christoph Hellwig | eca5892 | 2021-11-26 12:58:17 +0100 | [diff] [blame] | 368 | struct io_cq *ioc_lookup_icq(struct request_queue *q); |
Christoph Hellwig | 5ef1630 | 2021-12-09 07:31:31 +0100 | [diff] [blame] | 369 | #ifdef CONFIG_BLK_ICQ |
Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 370 | void ioc_clear_queue(struct request_queue *q); |
Christoph Hellwig | 5ef1630 | 2021-12-09 07:31:31 +0100 | [diff] [blame] | 371 | #else |
| 372 | static inline void ioc_clear_queue(struct request_queue *q) |
| 373 | { |
| 374 | } |
| 375 | #endif /* CONFIG_BLK_ICQ */ |
Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 376 | |
Shaohua Li | 297e3d8 | 2017-03-27 10:51:37 -0700 | [diff] [blame] | 377 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
| 378 | extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); |
| 379 | extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, |
| 380 | const char *page, size_t count); |
Shaohua Li | 9e234ee | 2017-03-27 10:51:41 -0700 | [diff] [blame] | 381 | extern void blk_throtl_bio_endio(struct bio *bio); |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 382 | extern void blk_throtl_stat_add(struct request *rq, u64 time); |
Shaohua Li | 9e234ee | 2017-03-27 10:51:41 -0700 | [diff] [blame] | 383 | #else |
| 384 | static inline void blk_throtl_bio_endio(struct bio *bio) { } |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 385 | static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } |
Shaohua Li | 297e3d8 | 2017-03-27 10:51:37 -0700 | [diff] [blame] | 386 | #endif |
Tejun Heo | bc9fcbf | 2011-10-19 14:31:18 +0200 | [diff] [blame] | 387 | |
Christoph Hellwig | 9bb33f2 | 2021-03-31 09:30:00 +0200 | [diff] [blame] | 388 | void __blk_queue_bounce(struct request_queue *q, struct bio **bio); |
| 389 | |
| 390 | static inline bool blk_queue_may_bounce(struct request_queue *q) |
| 391 | { |
| 392 | return IS_ENABLED(CONFIG_BOUNCE) && |
| 393 | q->limits.bounce == BLK_BOUNCE_HIGH && |
| 394 | max_low_pfn >= max_pfn; |
| 395 | } |
| 396 | |
Christoph Hellwig | 3bce016 | 2017-06-19 09:26:21 +0200 | [diff] [blame] | 397 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) |
| 398 | { |
Christoph Hellwig | 9bb33f2 | 2021-03-31 09:30:00 +0200 | [diff] [blame] | 399 | if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio))) |
| 400 | __blk_queue_bounce(q, bio); |
Christoph Hellwig | 3bce016 | 2017-06-19 09:26:21 +0200 | [diff] [blame] | 401 | } |
Christoph Hellwig | 3bce016 | 2017-06-19 09:26:21 +0200 | [diff] [blame] | 402 | |
Josef Bacik | d706751 | 2018-07-03 11:15:01 -0400 | [diff] [blame] | 403 | #ifdef CONFIG_BLK_CGROUP_IOLATENCY |
| 404 | extern int blk_iolatency_init(struct request_queue *q); |
| 405 | #else |
| 406 | static inline int blk_iolatency_init(struct request_queue *q) { return 0; } |
| 407 | #endif |
| 408 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 409 | struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); |
| 410 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 411 | #ifdef CONFIG_BLK_DEV_ZONED |
| 412 | void blk_queue_free_zone_bitmaps(struct request_queue *q); |
Damien Le Moal | 508aebb | 2021-01-28 13:47:32 +0900 | [diff] [blame] | 413 | void blk_queue_clear_zone_settings(struct request_queue *q); |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 414 | #else |
| 415 | static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} |
Damien Le Moal | 508aebb | 2021-01-28 13:47:32 +0900 | [diff] [blame] | 416 | static inline void blk_queue_clear_zone_settings(struct request_queue *q) {} |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 417 | #endif |
| 418 | |
Christoph Hellwig | 7c3f828 | 2021-05-21 07:50:51 +0200 | [diff] [blame] | 419 | int blk_alloc_ext_minor(void); |
| 420 | void blk_free_ext_minor(unsigned int minor); |
Christoph Hellwig | 581e260 | 2020-03-25 16:48:41 +0100 | [diff] [blame] | 421 | #define ADDPART_FLAG_NONE 0 |
| 422 | #define ADDPART_FLAG_RAID 1 |
| 423 | #define ADDPART_FLAG_WHOLEDISK 2 |
Christoph Hellwig | 7f6be37 | 2021-08-10 17:45:10 +0200 | [diff] [blame] | 424 | int bdev_add_partition(struct gendisk *disk, int partno, sector_t start, |
| 425 | sector_t length); |
Christoph Hellwig | 926fbb1 | 2021-08-10 17:45:11 +0200 | [diff] [blame] | 426 | int bdev_del_partition(struct gendisk *disk, int partno); |
Christoph Hellwig | 3d2e798 | 2021-08-10 17:45:12 +0200 | [diff] [blame] | 427 | int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, |
| 428 | sector_t length); |
Christoph Hellwig | 581e260 | 2020-03-25 16:48:41 +0100 | [diff] [blame] | 429 | |
Christoph Hellwig | e458110 | 2020-05-12 17:55:46 +0900 | [diff] [blame] | 430 | int bio_add_hw_page(struct request_queue *q, struct bio *bio, |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 431 | struct page *page, unsigned int len, unsigned int offset, |
Christoph Hellwig | e458110 | 2020-05-12 17:55:46 +0900 | [diff] [blame] | 432 | unsigned int max_sectors, bool *same_page); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 433 | |
Ming Lei | 704b914 | 2021-12-03 21:15:32 +0800 | [diff] [blame] | 434 | static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu) |
| 435 | { |
| 436 | if (srcu) |
| 437 | return blk_requestq_srcu_cachep; |
| 438 | return blk_requestq_cachep; |
| 439 | } |
| 440 | struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu); |
| 441 | |
Christoph Hellwig | e16e506 | 2021-11-22 14:06:16 +0100 | [diff] [blame] | 442 | int disk_scan_partitions(struct gendisk *disk, fmode_t mode); |
Christoph Hellwig | da7ba72 | 2021-05-21 07:51:16 +0200 | [diff] [blame] | 443 | |
Luis Chamberlain | 92e7755 | 2021-08-18 16:45:39 +0200 | [diff] [blame] | 444 | int disk_alloc_events(struct gendisk *disk); |
Christoph Hellwig | d5870ed | 2021-06-24 09:38:42 +0200 | [diff] [blame] | 445 | void disk_add_events(struct gendisk *disk); |
| 446 | void disk_del_events(struct gendisk *disk); |
| 447 | void disk_release_events(struct gendisk *disk); |
Christoph Hellwig | 2bc8cda | 2021-06-24 09:38:43 +0200 | [diff] [blame] | 448 | extern struct device_attribute dev_attr_events; |
| 449 | extern struct device_attribute dev_attr_events_async; |
| 450 | extern struct device_attribute dev_attr_events_poll_msecs; |
Christoph Hellwig | d5870ed | 2021-06-24 09:38:42 +0200 | [diff] [blame] | 451 | |
Christoph Hellwig | 6ce913f | 2021-10-12 13:12:21 +0200 | [diff] [blame] | 452 | static inline void bio_clear_polled(struct bio *bio) |
Jens Axboe | 270a1c9 | 2021-08-12 11:42:53 -0600 | [diff] [blame] | 453 | { |
| 454 | /* can't support alloc cache if we turn off polling */ |
| 455 | bio_clear_flag(bio, BIO_PERCPU_CACHE); |
Christoph Hellwig | 6ce913f | 2021-10-12 13:12:21 +0200 | [diff] [blame] | 456 | bio->bi_opf &= ~REQ_POLLED; |
Jens Axboe | 270a1c9 | 2021-08-12 11:42:53 -0600 | [diff] [blame] | 457 | } |
| 458 | |
Christoph Hellwig | 8a70951 | 2021-10-12 12:44:50 +0200 | [diff] [blame] | 459 | long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); |
Christoph Hellwig | 84b8514 | 2021-10-12 12:44:49 +0200 | [diff] [blame] | 460 | long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); |
| 461 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 462 | extern const struct address_space_operations def_blk_aops; |
| 463 | |
Damien Le Moal | a2247f1 | 2021-10-27 11:22:19 +0900 | [diff] [blame] | 464 | int disk_register_independent_access_ranges(struct gendisk *disk, |
| 465 | struct blk_independent_access_ranges *new_iars); |
| 466 | void disk_unregister_independent_access_ranges(struct gendisk *disk); |
| 467 | |
Christoph Hellwig | 06c8c69 | 2021-11-17 07:13:58 +0100 | [diff] [blame] | 468 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 469 | bool should_fail_request(struct block_device *part, unsigned int bytes); |
| 470 | #else /* CONFIG_FAIL_MAKE_REQUEST */ |
| 471 | static inline bool should_fail_request(struct block_device *part, |
| 472 | unsigned int bytes) |
| 473 | { |
| 474 | return false; |
| 475 | } |
| 476 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ |
| 477 | |
Jens Axboe | 0a467d0 | 2021-10-14 14:39:59 -0600 | [diff] [blame] | 478 | /* |
| 479 | * Optimized request reference counting. Ideally we'd make timeouts be more |
| 480 | * clever, as that's the only reason we need references at all... But until |
| 481 | * this happens, this is faster than using refcount_t. Also see: |
| 482 | * |
| 483 | * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count") |
| 484 | */ |
| 485 | #define req_ref_zero_or_close_to_overflow(req) \ |
| 486 | ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u) |
| 487 | |
| 488 | static inline bool req_ref_inc_not_zero(struct request *req) |
| 489 | { |
| 490 | return atomic_inc_not_zero(&req->ref); |
| 491 | } |
| 492 | |
| 493 | static inline bool req_ref_put_and_test(struct request *req) |
| 494 | { |
| 495 | WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); |
| 496 | return atomic_dec_and_test(&req->ref); |
| 497 | } |
| 498 | |
| 499 | static inline void req_ref_set(struct request *req, int value) |
| 500 | { |
| 501 | atomic_set(&req->ref, value); |
| 502 | } |
| 503 | |
| 504 | static inline int req_ref_read(struct request *req) |
| 505 | { |
| 506 | return atomic_read(&req->ref); |
| 507 | } |
| 508 | |
Tejun Heo | bc9fcbf | 2011-10-19 14:31:18 +0200 | [diff] [blame] | 509 | #endif /* BLK_INTERNAL_H */ |