Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 2 | #ifndef BLK_MQ_H |
| 3 | #define BLK_MQ_H |
| 4 | |
| 5 | #include <linux/blkdev.h> |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 6 | #include <linux/sbitmap.h> |
Bart Van Assche | 6a83e74 | 2016-11-02 10:09:51 -0600 | [diff] [blame] | 7 | #include <linux/srcu.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 8 | |
| 9 | struct blk_mq_tags; |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 10 | struct blk_flush_queue; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 11 | |
Linus Walleij | fe64407 | 2018-04-20 10:29:51 +0200 | [diff] [blame] | 12 | /** |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 13 | * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware |
| 14 | * block device |
Linus Walleij | fe64407 | 2018-04-20 10:29:51 +0200 | [diff] [blame] | 15 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 16 | struct blk_mq_hw_ctx { |
| 17 | struct { |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 18 | /** @lock: Protects the dispatch list. */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 19 | spinlock_t lock; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 20 | /** |
| 21 | * @dispatch: Used for requests that are ready to be |
| 22 | * dispatched to the hardware but for some reason (e.g. lack of |
| 23 | * resources) could not be sent to the hardware. As soon as the |
| 24 | * driver can send new requests, requests at this list will |
| 25 | * be sent first for a fairer dispatch. |
| 26 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 27 | struct list_head dispatch; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 28 | /** |
| 29 | * @state: BLK_MQ_S_* flags. Defines the state of the hw |
| 30 | * queue (active, scheduled to restart, stopped). |
| 31 | */ |
| 32 | unsigned long state; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 33 | } ____cacheline_aligned_in_smp; |
| 34 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 35 | /** |
| 36 | * @run_work: Used for scheduling a hardware queue run at a later time. |
| 37 | */ |
Jens Axboe | 9f99373 | 2017-04-10 09:54:54 -0600 | [diff] [blame] | 38 | struct delayed_work run_work; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 39 | /** @cpumask: Map of available CPUs where this hctx can run. */ |
Jens Axboe | e4043dc | 2014-04-09 10:18:23 -0600 | [diff] [blame] | 40 | cpumask_var_t cpumask; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 41 | /** |
| 42 | * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU |
| 43 | * selection from @cpumask. |
| 44 | */ |
Jens Axboe | 506e931 | 2014-05-07 10:26:44 -0600 | [diff] [blame] | 45 | int next_cpu; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 46 | /** |
| 47 | * @next_cpu_batch: Counter of how many works left in the batch before |
| 48 | * changing to the next CPU. |
| 49 | */ |
Jens Axboe | 506e931 | 2014-05-07 10:26:44 -0600 | [diff] [blame] | 50 | int next_cpu_batch; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 51 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 52 | /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */ |
| 53 | unsigned long flags; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 54 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 55 | /** |
| 56 | * @sched_data: Pointer owned by the IO scheduler attached to a request |
| 57 | * queue. It's up to the IO scheduler how to use this pointer. |
| 58 | */ |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 59 | void *sched_data; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 60 | /** |
| 61 | * @queue: Pointer to the request queue that owns this hardware context. |
| 62 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 63 | struct request_queue *queue; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 64 | /** @fq: Queue of requests that need to perform a flush operation. */ |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 65 | struct blk_flush_queue *fq; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 66 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 67 | /** |
| 68 | * @driver_data: Pointer to data owned by the block driver that created |
| 69 | * this hctx |
| 70 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 71 | void *driver_data; |
| 72 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 73 | /** |
| 74 | * @ctx_map: Bitmap for each software queue. If bit is on, there is a |
| 75 | * pending request in that software queue. |
| 76 | */ |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 77 | struct sbitmap ctx_map; |
Jens Axboe | 1429d7c | 2014-05-19 09:23:55 -0600 | [diff] [blame] | 78 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 79 | /** |
| 80 | * @dispatch_from: Software queue to be used when no scheduler was |
| 81 | * selected. |
| 82 | */ |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 83 | struct blk_mq_ctx *dispatch_from; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 84 | /** |
| 85 | * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to |
| 86 | * decide if the hw_queue is busy using Exponential Weighted Moving |
| 87 | * Average algorithm. |
| 88 | */ |
Ming Lei | 6e768717 | 2018-07-03 09:03:16 -0600 | [diff] [blame] | 89 | unsigned int dispatch_busy; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 90 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 91 | /** @type: HCTX_TYPE_* flags. Type of hardware queue. */ |
Jens Axboe | f31967f | 2018-10-29 13:13:29 -0600 | [diff] [blame] | 92 | unsigned short type; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 93 | /** @nr_ctx: Number of software queues. */ |
Jens Axboe | f31967f | 2018-10-29 13:13:29 -0600 | [diff] [blame] | 94 | unsigned short nr_ctx; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 95 | /** @ctxs: Array of software queues. */ |
Ming Lei | 6e768717 | 2018-07-03 09:03:16 -0600 | [diff] [blame] | 96 | struct blk_mq_ctx **ctxs; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 97 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 98 | /** @dispatch_wait_lock: Lock for dispatch_wait queue. */ |
Ming Lei | 5815839b | 2018-06-25 19:31:47 +0800 | [diff] [blame] | 99 | spinlock_t dispatch_wait_lock; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 100 | /** |
| 101 | * @dispatch_wait: Waitqueue to put requests when there is no tag |
| 102 | * available at the moment, to wait for another try in the future. |
| 103 | */ |
Jens Axboe | eb619fd | 2017-11-09 08:32:43 -0700 | [diff] [blame] | 104 | wait_queue_entry_t dispatch_wait; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 105 | |
| 106 | /** |
| 107 | * @wait_index: Index of next available dispatch_wait queue to insert |
| 108 | * requests. |
| 109 | */ |
Alexander Gordeev | 8537b12 | 2014-06-17 22:12:35 -0700 | [diff] [blame] | 110 | atomic_t wait_index; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 111 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 112 | /** |
| 113 | * @tags: Tags owned by the block driver. A tag at this set is only |
| 114 | * assigned when a request is dispatched from a hardware queue. |
| 115 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 116 | struct blk_mq_tags *tags; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 117 | /** |
| 118 | * @sched_tags: Tags owned by I/O scheduler. If there is an I/O |
| 119 | * scheduler associated with a request queue, a tag is assigned when |
| 120 | * that request is allocated. Else, this member is not used. |
| 121 | */ |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 122 | struct blk_mq_tags *sched_tags; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 123 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 124 | /** @queued: Number of queued requests. */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 125 | unsigned long queued; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 126 | /** @run: Number of dispatched requests. */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 127 | unsigned long run; |
Jens Axboe | 8d354f1 | 2016-08-25 08:00:28 -0600 | [diff] [blame] | 128 | #define BLK_MQ_MAX_DISPATCH_ORDER 7 |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 129 | /** @dispatched: Number of dispatch requests by queue. */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 130 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
| 131 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 132 | /** @numa_node: NUMA node the storage adapter has been connected to. */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 133 | unsigned int numa_node; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 134 | /** @queue_num: Index of this hardware queue. */ |
Jens Axboe | 17ded32 | 2015-01-07 10:44:04 -0700 | [diff] [blame] | 135 | unsigned int queue_num; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 136 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 137 | /** |
| 138 | * @nr_active: Number of active requests. Only used when a tag set is |
| 139 | * shared across request queues. |
| 140 | */ |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 141 | atomic_t nr_active; |
| 142 | |
Ming Lei | bf0beec | 2020-05-29 15:53:15 +0200 | [diff] [blame] | 143 | /** @cpuhp_online: List to store request if CPU is going to die */ |
| 144 | struct hlist_node cpuhp_online; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 145 | /** @cpuhp_dead: List to store request if some CPU die. */ |
Thomas Gleixner | 9467f85 | 2016-09-22 08:05:17 -0600 | [diff] [blame] | 146 | struct hlist_node cpuhp_dead; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 147 | /** @kobj: Kernel object for sysfs. */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 148 | struct kobject kobj; |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 149 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 150 | /** @poll_considered: Count times blk_poll() was called. */ |
Stephen Bates | 6e21935 | 2016-09-13 12:23:15 -0600 | [diff] [blame] | 151 | unsigned long poll_considered; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 152 | /** @poll_invoked: Count how many requests blk_poll() polled. */ |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 153 | unsigned long poll_invoked; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 154 | /** @poll_success: Count how many polled requests were completed. */ |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 155 | unsigned long poll_success; |
Omar Sandoval | 9c1051a | 2017-05-04 08:17:21 -0600 | [diff] [blame] | 156 | |
| 157 | #ifdef CONFIG_BLK_DEBUG_FS |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 158 | /** |
| 159 | * @debugfs_dir: debugfs directory for this hardware queue. Named |
| 160 | * as cpu<cpu_number>. |
| 161 | */ |
Omar Sandoval | 9c1051a | 2017-05-04 08:17:21 -0600 | [diff] [blame] | 162 | struct dentry *debugfs_dir; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 163 | /** @sched_debugfs_dir: debugfs directory for the scheduler. */ |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 164 | struct dentry *sched_debugfs_dir; |
Omar Sandoval | 9c1051a | 2017-05-04 08:17:21 -0600 | [diff] [blame] | 165 | #endif |
Bart Van Assche | 0731967 | 2017-06-20 11:15:38 -0700 | [diff] [blame] | 166 | |
Bart Van Assche | 2dd209f | 2020-03-09 21:26:16 -0700 | [diff] [blame] | 167 | /** |
| 168 | * @hctx_list: if this hctx is not in use, this is an entry in |
| 169 | * q->unused_hctx_list. |
| 170 | */ |
Ming Lei | 2f8f133 | 2019-04-30 09:52:27 +0800 | [diff] [blame] | 171 | struct list_head hctx_list; |
| 172 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 173 | /** |
| 174 | * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is |
| 175 | * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also |
| 176 | * blk_mq_hw_ctx_size(). |
| 177 | */ |
Gustavo A. R. Silva | f36aaf8 | 2020-03-23 16:43:39 -0500 | [diff] [blame] | 178 | struct srcu_struct srcu[]; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 179 | }; |
| 180 | |
Bart Van Assche | 7a18312 | 2019-09-30 16:00:45 -0700 | [diff] [blame] | 181 | /** |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 182 | * struct blk_mq_queue_map - Map software queues to hardware queues |
Bart Van Assche | 7a18312 | 2019-09-30 16:00:45 -0700 | [diff] [blame] | 183 | * @mq_map: CPU ID to hardware queue index map. This is an array |
| 184 | * with nr_cpu_ids elements. Each element has a value in the range |
| 185 | * [@queue_offset, @queue_offset + @nr_queues). |
| 186 | * @nr_queues: Number of hardware queues to map CPU IDs onto. |
| 187 | * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe |
| 188 | * driver to map each hardware queue type (enum hctx_type) onto a distinct |
| 189 | * set of hardware queues. |
| 190 | */ |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 191 | struct blk_mq_queue_map { |
| 192 | unsigned int *mq_map; |
| 193 | unsigned int nr_queues; |
Jens Axboe | 843477d | 2018-10-24 13:16:11 -0600 | [diff] [blame] | 194 | unsigned int queue_offset; |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 195 | }; |
| 196 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 197 | /** |
| 198 | * enum hctx_type - Type of hardware queue |
| 199 | * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for. |
| 200 | * @HCTX_TYPE_READ: Just for READ I/O. |
| 201 | * @HCTX_TYPE_POLL: Polled I/O of any kind. |
| 202 | * @HCTX_MAX_TYPES: Number of types of hctx. |
| 203 | */ |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 204 | enum hctx_type { |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 205 | HCTX_TYPE_DEFAULT, |
| 206 | HCTX_TYPE_READ, |
| 207 | HCTX_TYPE_POLL, |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 208 | |
| 209 | HCTX_MAX_TYPES, |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 210 | }; |
| 211 | |
Bart Van Assche | 7a18312 | 2019-09-30 16:00:45 -0700 | [diff] [blame] | 212 | /** |
| 213 | * struct blk_mq_tag_set - tag set that can be shared between request queues |
| 214 | * @map: One or more ctx -> hctx mappings. One map exists for each |
| 215 | * hardware queue type (enum hctx_type) that the driver wishes |
| 216 | * to support. There are no restrictions on maps being of the |
| 217 | * same size, and it's perfectly legal to share maps between |
| 218 | * types. |
| 219 | * @nr_maps: Number of elements in the @map array. A number in the range |
| 220 | * [1, HCTX_MAX_TYPES]. |
| 221 | * @ops: Pointers to functions that implement block driver behavior. |
| 222 | * @nr_hw_queues: Number of hardware queues supported by the block driver that |
| 223 | * owns this data structure. |
| 224 | * @queue_depth: Number of tags per hardware queue, reserved tags included. |
| 225 | * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag |
| 226 | * allocations. |
| 227 | * @cmd_size: Number of additional bytes to allocate per request. The block |
| 228 | * driver owns these additional bytes. |
| 229 | * @numa_node: NUMA node the storage adapter has been connected to. |
| 230 | * @timeout: Request processing timeout in jiffies. |
| 231 | * @flags: Zero or more BLK_MQ_F_* flags. |
| 232 | * @driver_data: Pointer to data owned by the block driver that created this |
| 233 | * tag set. |
| 234 | * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues |
| 235 | * elements. |
| 236 | * @tag_list_lock: Serializes tag_list accesses. |
| 237 | * @tag_list: List of the request queues that use this tag set. See also |
| 238 | * request_queue.tag_set_list. |
| 239 | */ |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 240 | struct blk_mq_tag_set { |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 241 | struct blk_mq_queue_map map[HCTX_MAX_TYPES]; |
Bart Van Assche | 7a18312 | 2019-09-30 16:00:45 -0700 | [diff] [blame] | 242 | unsigned int nr_maps; |
Jens Axboe | f8a5b12 | 2016-12-13 09:24:51 -0700 | [diff] [blame] | 243 | const struct blk_mq_ops *ops; |
Bart Van Assche | 7a18312 | 2019-09-30 16:00:45 -0700 | [diff] [blame] | 244 | unsigned int nr_hw_queues; |
| 245 | unsigned int queue_depth; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 246 | unsigned int reserved_tags; |
Bart Van Assche | 7a18312 | 2019-09-30 16:00:45 -0700 | [diff] [blame] | 247 | unsigned int cmd_size; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 248 | int numa_node; |
| 249 | unsigned int timeout; |
Bart Van Assche | 7a18312 | 2019-09-30 16:00:45 -0700 | [diff] [blame] | 250 | unsigned int flags; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 251 | void *driver_data; |
| 252 | |
| 253 | struct blk_mq_tags **tags; |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 254 | |
| 255 | struct mutex tag_list_lock; |
| 256 | struct list_head tag_list; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 257 | }; |
| 258 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 259 | /** |
| 260 | * struct blk_mq_queue_data - Data about a request inserted in a queue |
| 261 | * |
| 262 | * @rq: Request pointer. |
| 263 | * @last: If it is the last request in the queue. |
| 264 | */ |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 265 | struct blk_mq_queue_data { |
| 266 | struct request *rq; |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 267 | bool last; |
| 268 | }; |
| 269 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 270 | typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, |
| 271 | const struct blk_mq_queue_data *); |
Jens Axboe | d666ba9 | 2018-11-27 17:02:25 -0700 | [diff] [blame] | 272 | typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); |
Ming Lei | 88022d7 | 2017-11-05 02:21:12 +0800 | [diff] [blame] | 273 | typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 274 | typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); |
Christoph Hellwig | 0152fb6 | 2014-09-13 16:40:13 -0700 | [diff] [blame] | 275 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 276 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
| 277 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); |
Christoph Hellwig | d6296d39 | 2017-05-01 10:19:08 -0600 | [diff] [blame] | 278 | typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 279 | unsigned int, unsigned int); |
Christoph Hellwig | d6296d39 | 2017-05-01 10:19:08 -0600 | [diff] [blame] | 280 | typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 281 | unsigned int); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 282 | |
Jens Axboe | 7baa857 | 2018-11-08 10:24:07 -0700 | [diff] [blame] | 283 | typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 284 | bool); |
Jens Axboe | 7baa857 | 2018-11-08 10:24:07 -0700 | [diff] [blame] | 285 | typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); |
Jens Axboe | 9743139 | 2018-11-16 09:48:21 -0700 | [diff] [blame] | 286 | typedef int (poll_fn)(struct blk_mq_hw_ctx *); |
Christoph Hellwig | da695ba | 2016-09-14 16:18:55 +0200 | [diff] [blame] | 287 | typedef int (map_queues_fn)(struct blk_mq_tag_set *set); |
Jens Axboe | 9ba2052 | 2018-10-29 10:15:10 -0600 | [diff] [blame] | 288 | typedef bool (busy_fn)(struct request_queue *); |
Jens Axboe | c7bb9ad | 2018-10-31 09:43:30 -0600 | [diff] [blame] | 289 | typedef void (complete_fn)(struct request *); |
Ming Lei | 226b4fc | 2019-07-25 10:04:59 +0800 | [diff] [blame] | 290 | typedef void (cleanup_rq_fn)(struct request *); |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 291 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 292 | /** |
| 293 | * struct blk_mq_ops - Callback functions that implements block driver |
| 294 | * behaviour. |
| 295 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 296 | struct blk_mq_ops { |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 297 | /** |
| 298 | * @queue_rq: Queue a new request from block IO. |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 299 | */ |
| 300 | queue_rq_fn *queue_rq; |
| 301 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 302 | /** |
| 303 | * @commit_rqs: If a driver uses bd->last to judge when to submit |
| 304 | * requests to hardware, it must define this function. In case of errors |
| 305 | * that make us stop issuing further requests, this hook serves the |
Jens Axboe | d666ba9 | 2018-11-27 17:02:25 -0700 | [diff] [blame] | 306 | * purpose of kicking the hardware (which the last request otherwise |
| 307 | * would have done). |
| 308 | */ |
| 309 | commit_rqs_fn *commit_rqs; |
| 310 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 311 | /** |
| 312 | * @get_budget: Reserve budget before queue request, once .queue_rq is |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 313 | * run, it is driver's responsibility to release the |
| 314 | * reserved budget. Also we have to handle failure case |
| 315 | * of .get_budget for avoiding I/O deadlock. |
| 316 | */ |
| 317 | get_budget_fn *get_budget; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 318 | /** |
| 319 | * @put_budget: Release the reserved budget. |
| 320 | */ |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 321 | put_budget_fn *put_budget; |
| 322 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 323 | /** |
| 324 | * @timeout: Called on request timeout. |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 325 | */ |
Christoph Hellwig | 0152fb6 | 2014-09-13 16:40:13 -0700 | [diff] [blame] | 326 | timeout_fn *timeout; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 327 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 328 | /** |
| 329 | * @poll: Called to poll for completion of a specific tag. |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 330 | */ |
| 331 | poll_fn *poll; |
| 332 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 333 | /** |
| 334 | * @complete: Mark the request as complete. |
| 335 | */ |
Jens Axboe | c7bb9ad | 2018-10-31 09:43:30 -0600 | [diff] [blame] | 336 | complete_fn *complete; |
Christoph Hellwig | 30a91cb | 2014-02-10 03:24:38 -0800 | [diff] [blame] | 337 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 338 | /** |
| 339 | * @init_hctx: Called when the block layer side of a hardware queue has |
| 340 | * been set up, allowing the driver to allocate/init matching |
| 341 | * structures. |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 342 | */ |
| 343 | init_hctx_fn *init_hctx; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 344 | /** |
| 345 | * @exit_hctx: Ditto for exit/teardown. |
| 346 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 347 | exit_hctx_fn *exit_hctx; |
Christoph Hellwig | e9b267d | 2014-04-15 13:59:10 -0600 | [diff] [blame] | 348 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 349 | /** |
| 350 | * @init_request: Called for every command allocated by the block layer |
| 351 | * to allow the driver to set up driver specific data. |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 352 | * |
| 353 | * Tag greater than or equal to queue_depth is for setting up |
| 354 | * flush request. |
Christoph Hellwig | e9b267d | 2014-04-15 13:59:10 -0600 | [diff] [blame] | 355 | */ |
| 356 | init_request_fn *init_request; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 357 | /** |
| 358 | * @exit_request: Ditto for exit/teardown. |
| 359 | */ |
Christoph Hellwig | e9b267d | 2014-04-15 13:59:10 -0600 | [diff] [blame] | 360 | exit_request_fn *exit_request; |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 361 | |
| 362 | /** |
| 363 | * @initialize_rq_fn: Called from inside blk_get_request(). |
| 364 | */ |
Bart Van Assche | d280bab | 2017-06-20 11:15:40 -0700 | [diff] [blame] | 365 | void (*initialize_rq_fn)(struct request *rq); |
Christoph Hellwig | da695ba | 2016-09-14 16:18:55 +0200 | [diff] [blame] | 366 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 367 | /** |
| 368 | * @cleanup_rq: Called before freeing one request which isn't completed |
| 369 | * yet, and usually for freeing the driver private data. |
Ming Lei | 226b4fc | 2019-07-25 10:04:59 +0800 | [diff] [blame] | 370 | */ |
| 371 | cleanup_rq_fn *cleanup_rq; |
| 372 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 373 | /** |
| 374 | * @busy: If set, returns whether or not this queue currently is busy. |
Jens Axboe | 9ba2052 | 2018-10-29 10:15:10 -0600 | [diff] [blame] | 375 | */ |
| 376 | busy_fn *busy; |
| 377 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 378 | /** |
| 379 | * @map_queues: This allows drivers specify their own queue mapping by |
| 380 | * overriding the setup-time function that builds the mq_map. |
| 381 | */ |
Christoph Hellwig | da695ba | 2016-09-14 16:18:55 +0200 | [diff] [blame] | 382 | map_queues_fn *map_queues; |
Bart Van Assche | 2836ee4 | 2017-04-26 13:47:56 -0700 | [diff] [blame] | 383 | |
| 384 | #ifdef CONFIG_BLK_DEBUG_FS |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 385 | /** |
| 386 | * @show_rq: Used by the debugfs implementation to show driver-specific |
Bart Van Assche | 2836ee4 | 2017-04-26 13:47:56 -0700 | [diff] [blame] | 387 | * information about a request. |
| 388 | */ |
| 389 | void (*show_rq)(struct seq_file *m, struct request *rq); |
| 390 | #endif |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 391 | }; |
| 392 | |
| 393 | enum { |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 394 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
Jens Axboe | 8a58d1f | 2014-08-15 12:38:41 -0600 | [diff] [blame] | 395 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
Ming Lei | bf0beec | 2020-05-29 15:53:15 +0200 | [diff] [blame] | 396 | /* |
| 397 | * Set when this device requires underlying blk-mq device for |
| 398 | * completing IO: |
| 399 | */ |
| 400 | BLK_MQ_F_STACKING = 1 << 2, |
Jens Axboe | 1b792f2 | 2016-09-21 10:12:13 -0600 | [diff] [blame] | 401 | BLK_MQ_F_BLOCKING = 1 << 5, |
Jens Axboe | d348499 | 2017-01-13 14:43:58 -0700 | [diff] [blame] | 402 | BLK_MQ_F_NO_SCHED = 1 << 6, |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 403 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
| 404 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 405 | |
Jens Axboe | 5d12f90 | 2014-03-19 15:25:02 -0600 | [diff] [blame] | 406 | BLK_MQ_S_STOPPED = 0, |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 407 | BLK_MQ_S_TAG_ACTIVE = 1, |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 408 | BLK_MQ_S_SCHED_RESTART = 2, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 409 | |
Ming Lei | bf0beec | 2020-05-29 15:53:15 +0200 | [diff] [blame] | 410 | /* hw queue is inactive after all its CPUs become offline */ |
| 411 | BLK_MQ_S_INACTIVE = 3, |
| 412 | |
Jens Axboe | a4391c6 | 2014-06-05 15:21:56 -0600 | [diff] [blame] | 413 | BLK_MQ_MAX_DEPTH = 10240, |
Jens Axboe | 506e931 | 2014-05-07 10:26:44 -0600 | [diff] [blame] | 414 | |
| 415 | BLK_MQ_CPU_WORK_BATCH = 8, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 416 | }; |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 417 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
| 418 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ |
| 419 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) |
| 420 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ |
| 421 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ |
| 422 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 423 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 424 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
Christoph Hellwig | 2f227bb | 2020-03-27 09:30:08 +0100 | [diff] [blame] | 425 | struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, |
| 426 | void *queuedata); |
Mike Snitzer | b62c21b | 2015-03-12 23:56:02 -0400 | [diff] [blame] | 427 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
Damien Le Moal | 737eb78 | 2019-09-05 18:51:33 +0900 | [diff] [blame] | 428 | struct request_queue *q, |
| 429 | bool elevator_init); |
Jens Axboe | 9316a9e | 2018-10-15 08:40:37 -0600 | [diff] [blame] | 430 | struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, |
| 431 | const struct blk_mq_ops *ops, |
| 432 | unsigned int queue_depth, |
| 433 | unsigned int set_flags); |
Matias Bjørling | b21d5b3 | 2016-09-16 14:25:06 +0200 | [diff] [blame] | 434 | void blk_mq_unregister_dev(struct device *, struct request_queue *); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 435 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 436 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
| 437 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); |
| 438 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 439 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
| 440 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 441 | void blk_mq_free_request(struct request *rq); |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 442 | |
Jens Axboe | 3c94d83 | 2018-12-17 21:11:17 -0700 | [diff] [blame] | 443 | bool blk_mq_queue_inflight(struct request_queue *q); |
Jens Axboe | ae87991 | 2018-11-08 09:03:51 -0700 | [diff] [blame] | 444 | |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 445 | enum { |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 446 | /* return when out of requests */ |
| 447 | BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), |
| 448 | /* allocate from reserved pool */ |
| 449 | BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), |
| 450 | /* allocate internal/sched tag */ |
| 451 | BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), |
| 452 | /* set RQF_PREEMPT */ |
| 453 | BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 454 | }; |
| 455 | |
Bart Van Assche | cd6ce14 | 2017-06-20 11:15:39 -0700 | [diff] [blame] | 456 | struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 457 | blk_mq_req_flags_t flags); |
Bart Van Assche | cd6ce14 | 2017-06-20 11:15:39 -0700 | [diff] [blame] | 458 | struct request *blk_mq_alloc_request_hctx(struct request_queue *q, |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 459 | unsigned int op, blk_mq_req_flags_t flags, |
| 460 | unsigned int hctx_idx); |
Jens Axboe | 0e62f51 | 2014-06-04 10:23:49 -0600 | [diff] [blame] | 461 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 462 | |
Bart Van Assche | 205fb5f | 2014-10-30 14:45:11 +0100 | [diff] [blame] | 463 | enum { |
| 464 | BLK_MQ_UNIQUE_TAG_BITS = 16, |
| 465 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, |
| 466 | }; |
| 467 | |
| 468 | u32 blk_mq_unique_tag(struct request *rq); |
| 469 | |
| 470 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) |
| 471 | { |
| 472 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; |
| 473 | } |
| 474 | |
| 475 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) |
| 476 | { |
| 477 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; |
| 478 | } |
| 479 | |
Pavel Begunkov | 27a4698 | 2019-09-30 11:25:49 +0300 | [diff] [blame] | 480 | /** |
| 481 | * blk_mq_rq_state() - read the current MQ_RQ_* state of a request |
| 482 | * @rq: target request. |
| 483 | */ |
| 484 | static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) |
| 485 | { |
| 486 | return READ_ONCE(rq->state); |
| 487 | } |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 488 | |
Pavel Begunkov | 27a4698 | 2019-09-30 11:25:49 +0300 | [diff] [blame] | 489 | static inline int blk_mq_request_started(struct request *rq) |
| 490 | { |
| 491 | return blk_mq_rq_state(rq) != MQ_RQ_IDLE; |
| 492 | } |
| 493 | |
| 494 | static inline int blk_mq_request_completed(struct request *rq) |
| 495 | { |
| 496 | return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; |
| 497 | } |
| 498 | |
Christoph Hellwig | e249007 | 2014-09-13 16:40:09 -0700 | [diff] [blame] | 499 | void blk_mq_start_request(struct request *rq); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 500 | void blk_mq_end_request(struct request *rq, blk_status_t error); |
| 501 | void __blk_mq_end_request(struct request *rq, blk_status_t error); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 502 | |
Bart Van Assche | 2b053ac | 2016-10-28 17:21:41 -0700 | [diff] [blame] | 503 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
Christoph Hellwig | 6fca6a6 | 2014-05-28 08:08:02 -0600 | [diff] [blame] | 504 | void blk_mq_kick_requeue_list(struct request_queue *q); |
Mike Snitzer | 2849450 | 2016-09-14 13:28:30 -0400 | [diff] [blame] | 505 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
Keith Busch | 16c15eb | 2018-11-26 09:54:28 -0700 | [diff] [blame] | 506 | bool blk_mq_complete_request(struct request *rq); |
Keith Busch | 7b11eab | 2020-05-29 07:51:59 -0700 | [diff] [blame] | 507 | void blk_mq_force_complete_rq(struct request *rq); |
Jens Axboe | 9c55873 | 2018-05-30 15:26:07 +0800 | [diff] [blame] | 508 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 509 | struct bio *bio, unsigned int nr_segs); |
Bart Van Assche | fd00144 | 2016-10-28 17:19:37 -0700 | [diff] [blame] | 510 | bool blk_mq_queue_stopped(struct request_queue *q); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 511 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 512 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
Christoph Hellwig | 280d45f | 2013-10-25 14:45:58 +0100 | [diff] [blame] | 513 | void blk_mq_stop_hw_queues(struct request_queue *q); |
Christoph Hellwig | 2f26855 | 2014-04-16 09:44:56 +0200 | [diff] [blame] | 514 | void blk_mq_start_hw_queues(struct request_queue *q); |
Jens Axboe | ae911c5 | 2016-12-08 13:19:30 -0700 | [diff] [blame] | 515 | void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
Christoph Hellwig | 1b4a325 | 2014-04-16 09:44:54 +0200 | [diff] [blame] | 516 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
Ming Lei | 97e0120 | 2017-06-06 23:22:01 +0800 | [diff] [blame] | 517 | void blk_mq_quiesce_queue(struct request_queue *q); |
Ming Lei | e4e7391 | 2017-06-06 23:22:03 +0800 | [diff] [blame] | 518 | void blk_mq_unquiesce_queue(struct request_queue *q); |
Bart Van Assche | 7587a5a | 2017-04-07 11:16:52 -0700 | [diff] [blame] | 519 | void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
John Garry | 626fb73 | 2019-10-30 00:59:30 +0800 | [diff] [blame] | 520 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
Mike Snitzer | b94ec29 | 2015-03-11 23:56:38 -0400 | [diff] [blame] | 521 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
Douglas Anderson | b9151e7 | 2020-04-20 09:24:52 -0700 | [diff] [blame] | 522 | void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); |
Sagi Grimberg | e048948 | 2016-03-10 13:58:46 +0200 | [diff] [blame] | 523 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
| 524 | busy_tag_iter_fn *fn, void *priv); |
Ming Lei | f9934a8 | 2019-07-24 11:48:40 +0800 | [diff] [blame] | 525 | void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); |
Jens Axboe | c761d96 | 2015-01-02 15:05:12 -0700 | [diff] [blame] | 526 | void blk_mq_freeze_queue(struct request_queue *q); |
Keith Busch | b4c6a02 | 2014-12-19 17:54:14 -0700 | [diff] [blame] | 527 | void blk_mq_unfreeze_queue(struct request_queue *q); |
Ming Lei | 1671d52 | 2017-03-27 20:06:57 +0800 | [diff] [blame] | 528 | void blk_freeze_queue_start(struct request_queue *q); |
Keith Busch | 6bae363e | 2017-03-01 14:22:10 -0500 | [diff] [blame] | 529 | void blk_mq_freeze_queue_wait(struct request_queue *q); |
Keith Busch | f91328c | 2017-03-01 14:22:11 -0500 | [diff] [blame] | 530 | int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, |
| 531 | unsigned long timeout); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 532 | |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 533 | int blk_mq_map_queues(struct blk_mq_queue_map *qmap); |
Keith Busch | 868f2f0 | 2015-12-17 17:08:14 -0700 | [diff] [blame] | 534 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
| 535 | |
Bart Van Assche | 852ec80 | 2017-06-21 10:55:47 -0700 | [diff] [blame] | 536 | void blk_mq_quiesce_queue_nowait(struct request_queue *q); |
Ming Lei | 4f084b4 | 2017-06-06 23:22:02 +0800 | [diff] [blame] | 537 | |
Jens Axboe | 9cf2bab | 2018-10-31 17:01:22 -0600 | [diff] [blame] | 538 | unsigned int blk_mq_rq_cpu(struct request *rq); |
| 539 | |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 540 | /** |
| 541 | * blk_mq_rq_from_pdu - cast a PDU to a request |
| 542 | * @pdu: the PDU (Protocol Data Unit) to be casted |
| 543 | * |
| 544 | * Return: request |
| 545 | * |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 546 | * Driver command data is immediately after the request. So subtract request |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 547 | * size to get back to the original request. |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 548 | */ |
| 549 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) |
| 550 | { |
| 551 | return pdu - sizeof(struct request); |
| 552 | } |
André Almeida | d386732 | 2019-10-21 21:07:24 -0300 | [diff] [blame] | 553 | |
| 554 | /** |
| 555 | * blk_mq_rq_to_pdu - cast a request to a PDU |
| 556 | * @rq: the request to be casted |
| 557 | * |
| 558 | * Return: pointer to the PDU |
| 559 | * |
| 560 | * Driver command data is immediately after the request. So add request to get |
| 561 | * the PDU. |
| 562 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 563 | static inline void *blk_mq_rq_to_pdu(struct request *rq) |
| 564 | { |
Jens Axboe | 2963e3f | 2015-04-09 15:54:05 -0600 | [diff] [blame] | 565 | return rq + 1; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 566 | } |
| 567 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 568 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
Jose Alonso | 0d0b7d4 | 2014-01-28 08:09:46 -0700 | [diff] [blame] | 569 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
| 570 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 571 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 572 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
Jose Alonso | 0d0b7d4 | 2014-01-28 08:09:46 -0700 | [diff] [blame] | 573 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
| 574 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 575 | |
Sagi Grimberg | 7b7ab78 | 2018-12-14 11:06:06 -0800 | [diff] [blame] | 576 | static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, |
| 577 | struct request *rq) |
| 578 | { |
| 579 | if (rq->tag != -1) |
| 580 | return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); |
| 581 | |
| 582 | return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | |
| 583 | BLK_QC_T_INTERNAL; |
| 584 | } |
| 585 | |
Ming Lei | 226b4fc | 2019-07-25 10:04:59 +0800 | [diff] [blame] | 586 | static inline void blk_mq_cleanup_rq(struct request *rq) |
| 587 | { |
| 588 | if (rq->q->mq_ops->cleanup_rq) |
| 589 | rq->q->mq_ops->cleanup_rq(rq); |
| 590 | } |
| 591 | |
Christoph Hellwig | 8cf7961 | 2020-04-25 09:53:36 +0200 | [diff] [blame] | 592 | blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio); |
| 593 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 594 | #endif |