Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 2 | #ifndef BLK_MQ_H |
| 3 | #define BLK_MQ_H |
| 4 | |
| 5 | #include <linux/blkdev.h> |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 6 | #include <linux/sbitmap.h> |
Bart Van Assche | 6a83e74 | 2016-11-02 10:09:51 -0600 | [diff] [blame] | 7 | #include <linux/srcu.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 8 | |
| 9 | struct blk_mq_tags; |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 10 | struct blk_flush_queue; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 11 | |
Linus Walleij | fe64407 | 2018-04-20 10:29:51 +0200 | [diff] [blame] | 12 | /** |
| 13 | * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device |
| 14 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 15 | struct blk_mq_hw_ctx { |
| 16 | struct { |
| 17 | spinlock_t lock; |
| 18 | struct list_head dispatch; |
Jens Axboe | 8d354f1 | 2016-08-25 08:00:28 -0600 | [diff] [blame] | 19 | unsigned long state; /* BLK_MQ_S_* flags */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 20 | } ____cacheline_aligned_in_smp; |
| 21 | |
Jens Axboe | 9f99373 | 2017-04-10 09:54:54 -0600 | [diff] [blame] | 22 | struct delayed_work run_work; |
Jens Axboe | e4043dc | 2014-04-09 10:18:23 -0600 | [diff] [blame] | 23 | cpumask_var_t cpumask; |
Jens Axboe | 506e931 | 2014-05-07 10:26:44 -0600 | [diff] [blame] | 24 | int next_cpu; |
| 25 | int next_cpu_batch; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 26 | |
| 27 | unsigned long flags; /* BLK_MQ_F_* flags */ |
| 28 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 29 | void *sched_data; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 30 | struct request_queue *queue; |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 31 | struct blk_flush_queue *fq; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 32 | |
| 33 | void *driver_data; |
| 34 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 35 | struct sbitmap ctx_map; |
Jens Axboe | 1429d7c | 2014-05-19 09:23:55 -0600 | [diff] [blame] | 36 | |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 37 | struct blk_mq_ctx *dispatch_from; |
Ming Lei | 6e768717 | 2018-07-03 09:03:16 -0600 | [diff] [blame] | 38 | unsigned int dispatch_busy; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 39 | |
Jens Axboe | f31967f | 2018-10-29 13:13:29 -0600 | [diff] [blame^] | 40 | unsigned short type; |
| 41 | unsigned short nr_ctx; |
Ming Lei | 6e768717 | 2018-07-03 09:03:16 -0600 | [diff] [blame] | 42 | struct blk_mq_ctx **ctxs; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 43 | |
Ming Lei | 5815839b | 2018-06-25 19:31:47 +0800 | [diff] [blame] | 44 | spinlock_t dispatch_wait_lock; |
Jens Axboe | eb619fd | 2017-11-09 08:32:43 -0700 | [diff] [blame] | 45 | wait_queue_entry_t dispatch_wait; |
Alexander Gordeev | 8537b12 | 2014-06-17 22:12:35 -0700 | [diff] [blame] | 46 | atomic_t wait_index; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 47 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 48 | struct blk_mq_tags *tags; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 49 | struct blk_mq_tags *sched_tags; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 50 | |
| 51 | unsigned long queued; |
| 52 | unsigned long run; |
Jens Axboe | 8d354f1 | 2016-08-25 08:00:28 -0600 | [diff] [blame] | 53 | #define BLK_MQ_MAX_DISPATCH_ORDER 7 |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 54 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
| 55 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 56 | unsigned int numa_node; |
Jens Axboe | 17ded32 | 2015-01-07 10:44:04 -0700 | [diff] [blame] | 57 | unsigned int queue_num; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 58 | |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 59 | atomic_t nr_active; |
Tejun Heo | 1d9bd51 | 2018-01-09 08:29:48 -0800 | [diff] [blame] | 60 | unsigned int nr_expired; |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 61 | |
Thomas Gleixner | 9467f85 | 2016-09-22 08:05:17 -0600 | [diff] [blame] | 62 | struct hlist_node cpuhp_dead; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 63 | struct kobject kobj; |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 64 | |
Stephen Bates | 6e21935 | 2016-09-13 12:23:15 -0600 | [diff] [blame] | 65 | unsigned long poll_considered; |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 66 | unsigned long poll_invoked; |
| 67 | unsigned long poll_success; |
Omar Sandoval | 9c1051a | 2017-05-04 08:17:21 -0600 | [diff] [blame] | 68 | |
| 69 | #ifdef CONFIG_BLK_DEBUG_FS |
| 70 | struct dentry *debugfs_dir; |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 71 | struct dentry *sched_debugfs_dir; |
Omar Sandoval | 9c1051a | 2017-05-04 08:17:21 -0600 | [diff] [blame] | 72 | #endif |
Bart Van Assche | 0731967 | 2017-06-20 11:15:38 -0700 | [diff] [blame] | 73 | |
| 74 | /* Must be the last member - see also blk_mq_hw_ctx_size(). */ |
Tejun Heo | 05707b6 | 2018-01-09 08:29:53 -0800 | [diff] [blame] | 75 | struct srcu_struct srcu[0]; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 76 | }; |
| 77 | |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 78 | struct blk_mq_queue_map { |
| 79 | unsigned int *mq_map; |
| 80 | unsigned int nr_queues; |
| 81 | }; |
| 82 | |
| 83 | enum { |
| 84 | HCTX_MAX_TYPES = 1, |
| 85 | }; |
| 86 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 87 | struct blk_mq_tag_set { |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 88 | struct blk_mq_queue_map map[HCTX_MAX_TYPES]; |
Jens Axboe | f8a5b12 | 2016-12-13 09:24:51 -0700 | [diff] [blame] | 89 | const struct blk_mq_ops *ops; |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 90 | unsigned int nr_hw_queues; /* nr hw queues across maps */ |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 91 | unsigned int queue_depth; /* max hw supported */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 92 | unsigned int reserved_tags; |
| 93 | unsigned int cmd_size; /* per-request extra data */ |
| 94 | int numa_node; |
| 95 | unsigned int timeout; |
| 96 | unsigned int flags; /* BLK_MQ_F_* */ |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 97 | void *driver_data; |
| 98 | |
| 99 | struct blk_mq_tags **tags; |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 100 | |
| 101 | struct mutex tag_list_lock; |
| 102 | struct list_head tag_list; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 103 | }; |
| 104 | |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 105 | struct blk_mq_queue_data { |
| 106 | struct request *rq; |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 107 | bool last; |
| 108 | }; |
| 109 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 110 | typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, |
| 111 | const struct blk_mq_queue_data *); |
Ming Lei | 88022d7 | 2017-11-05 02:21:12 +0800 | [diff] [blame] | 112 | typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 113 | typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); |
Christoph Hellwig | 0152fb6 | 2014-09-13 16:40:13 -0700 | [diff] [blame] | 114 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 115 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
| 116 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); |
Christoph Hellwig | d6296d39 | 2017-05-01 10:19:08 -0600 | [diff] [blame] | 117 | typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 118 | unsigned int, unsigned int); |
Christoph Hellwig | d6296d39 | 2017-05-01 10:19:08 -0600 | [diff] [blame] | 119 | typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 120 | unsigned int); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 121 | |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 122 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
| 123 | bool); |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 124 | typedef void (busy_tag_iter_fn)(struct request *, void *, bool); |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 125 | typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); |
Christoph Hellwig | da695ba | 2016-09-14 16:18:55 +0200 | [diff] [blame] | 126 | typedef int (map_queues_fn)(struct blk_mq_tag_set *set); |
Jens Axboe | 9ba2052 | 2018-10-29 10:15:10 -0600 | [diff] [blame] | 127 | typedef bool (busy_fn)(struct request_queue *); |
Jens Axboe | c7bb9ad | 2018-10-31 09:43:30 -0600 | [diff] [blame] | 128 | typedef void (complete_fn)(struct request *); |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 129 | |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 130 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 131 | struct blk_mq_ops { |
| 132 | /* |
| 133 | * Queue request |
| 134 | */ |
| 135 | queue_rq_fn *queue_rq; |
| 136 | |
| 137 | /* |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 138 | * Reserve budget before queue request, once .queue_rq is |
| 139 | * run, it is driver's responsibility to release the |
| 140 | * reserved budget. Also we have to handle failure case |
| 141 | * of .get_budget for avoiding I/O deadlock. |
| 142 | */ |
| 143 | get_budget_fn *get_budget; |
| 144 | put_budget_fn *put_budget; |
| 145 | |
| 146 | /* |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 147 | * Called on request timeout |
| 148 | */ |
Christoph Hellwig | 0152fb6 | 2014-09-13 16:40:13 -0700 | [diff] [blame] | 149 | timeout_fn *timeout; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 150 | |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 151 | /* |
| 152 | * Called to poll for completion of a specific tag. |
| 153 | */ |
| 154 | poll_fn *poll; |
| 155 | |
Jens Axboe | c7bb9ad | 2018-10-31 09:43:30 -0600 | [diff] [blame] | 156 | complete_fn *complete; |
Christoph Hellwig | 30a91cb | 2014-02-10 03:24:38 -0800 | [diff] [blame] | 157 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 158 | /* |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 159 | * Called when the block layer side of a hardware queue has been |
| 160 | * set up, allowing the driver to allocate/init matching structures. |
| 161 | * Ditto for exit/teardown. |
| 162 | */ |
| 163 | init_hctx_fn *init_hctx; |
| 164 | exit_hctx_fn *exit_hctx; |
Christoph Hellwig | e9b267d | 2014-04-15 13:59:10 -0600 | [diff] [blame] | 165 | |
| 166 | /* |
| 167 | * Called for every command allocated by the block layer to allow |
| 168 | * the driver to set up driver specific data. |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 169 | * |
| 170 | * Tag greater than or equal to queue_depth is for setting up |
| 171 | * flush request. |
| 172 | * |
Christoph Hellwig | e9b267d | 2014-04-15 13:59:10 -0600 | [diff] [blame] | 173 | * Ditto for exit/teardown. |
| 174 | */ |
| 175 | init_request_fn *init_request; |
| 176 | exit_request_fn *exit_request; |
Bart Van Assche | d280bab | 2017-06-20 11:15:40 -0700 | [diff] [blame] | 177 | /* Called from inside blk_get_request() */ |
| 178 | void (*initialize_rq_fn)(struct request *rq); |
Christoph Hellwig | da695ba | 2016-09-14 16:18:55 +0200 | [diff] [blame] | 179 | |
Jens Axboe | 9ba2052 | 2018-10-29 10:15:10 -0600 | [diff] [blame] | 180 | /* |
| 181 | * If set, returns whether or not this queue currently is busy |
| 182 | */ |
| 183 | busy_fn *busy; |
| 184 | |
Christoph Hellwig | da695ba | 2016-09-14 16:18:55 +0200 | [diff] [blame] | 185 | map_queues_fn *map_queues; |
Bart Van Assche | 2836ee4 | 2017-04-26 13:47:56 -0700 | [diff] [blame] | 186 | |
| 187 | #ifdef CONFIG_BLK_DEBUG_FS |
| 188 | /* |
| 189 | * Used by the debugfs implementation to show driver-specific |
| 190 | * information about a request. |
| 191 | */ |
| 192 | void (*show_rq)(struct seq_file *m, struct request *rq); |
| 193 | #endif |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 194 | }; |
| 195 | |
| 196 | enum { |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 197 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
Jens Axboe | 8a58d1f | 2014-08-15 12:38:41 -0600 | [diff] [blame] | 198 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
| 199 | BLK_MQ_F_SG_MERGE = 1 << 2, |
Jens Axboe | 1b792f2 | 2016-09-21 10:12:13 -0600 | [diff] [blame] | 200 | BLK_MQ_F_BLOCKING = 1 << 5, |
Jens Axboe | d348499 | 2017-01-13 14:43:58 -0700 | [diff] [blame] | 201 | BLK_MQ_F_NO_SCHED = 1 << 6, |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 202 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
| 203 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 204 | |
Jens Axboe | 5d12f90 | 2014-03-19 15:25:02 -0600 | [diff] [blame] | 205 | BLK_MQ_S_STOPPED = 0, |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 206 | BLK_MQ_S_TAG_ACTIVE = 1, |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 207 | BLK_MQ_S_SCHED_RESTART = 2, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 208 | |
Jens Axboe | a4391c6 | 2014-06-05 15:21:56 -0600 | [diff] [blame] | 209 | BLK_MQ_MAX_DEPTH = 10240, |
Jens Axboe | 506e931 | 2014-05-07 10:26:44 -0600 | [diff] [blame] | 210 | |
| 211 | BLK_MQ_CPU_WORK_BATCH = 8, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 212 | }; |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 213 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
| 214 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ |
| 215 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) |
| 216 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ |
| 217 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ |
| 218 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 219 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 220 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
Mike Snitzer | b62c21b | 2015-03-12 23:56:02 -0400 | [diff] [blame] | 221 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
| 222 | struct request_queue *q); |
Jens Axboe | 9316a9e | 2018-10-15 08:40:37 -0600 | [diff] [blame] | 223 | struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, |
| 224 | const struct blk_mq_ops *ops, |
| 225 | unsigned int queue_depth, |
| 226 | unsigned int set_flags); |
Matias Bjørling | b21d5b3 | 2016-09-16 14:25:06 +0200 | [diff] [blame] | 227 | int blk_mq_register_dev(struct device *, struct request_queue *); |
| 228 | void blk_mq_unregister_dev(struct device *, struct request_queue *); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 229 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 230 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
| 231 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); |
| 232 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 233 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
| 234 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 235 | void blk_mq_free_request(struct request *rq); |
| 236 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 237 | |
| 238 | enum { |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 239 | /* return when out of requests */ |
| 240 | BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), |
| 241 | /* allocate from reserved pool */ |
| 242 | BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), |
| 243 | /* allocate internal/sched tag */ |
| 244 | BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), |
| 245 | /* set RQF_PREEMPT */ |
| 246 | BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 247 | }; |
| 248 | |
Bart Van Assche | cd6ce14 | 2017-06-20 11:15:39 -0700 | [diff] [blame] | 249 | struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 250 | blk_mq_req_flags_t flags); |
Bart Van Assche | cd6ce14 | 2017-06-20 11:15:39 -0700 | [diff] [blame] | 251 | struct request *blk_mq_alloc_request_hctx(struct request_queue *q, |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 252 | unsigned int op, blk_mq_req_flags_t flags, |
| 253 | unsigned int hctx_idx); |
Jens Axboe | 0e62f51 | 2014-06-04 10:23:49 -0600 | [diff] [blame] | 254 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 255 | |
Bart Van Assche | 205fb5f | 2014-10-30 14:45:11 +0100 | [diff] [blame] | 256 | enum { |
| 257 | BLK_MQ_UNIQUE_TAG_BITS = 16, |
| 258 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, |
| 259 | }; |
| 260 | |
| 261 | u32 blk_mq_unique_tag(struct request *rq); |
| 262 | |
| 263 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) |
| 264 | { |
| 265 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; |
| 266 | } |
| 267 | |
| 268 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) |
| 269 | { |
| 270 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; |
| 271 | } |
| 272 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 273 | |
Keith Busch | 973c019 | 2015-01-07 18:55:43 -0700 | [diff] [blame] | 274 | int blk_mq_request_started(struct request *rq); |
Christoph Hellwig | e249007 | 2014-09-13 16:40:09 -0700 | [diff] [blame] | 275 | void blk_mq_start_request(struct request *rq); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 276 | void blk_mq_end_request(struct request *rq, blk_status_t error); |
| 277 | void __blk_mq_end_request(struct request *rq, blk_status_t error); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 278 | |
Bart Van Assche | 2b053ac | 2016-10-28 17:21:41 -0700 | [diff] [blame] | 279 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
| 280 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, |
| 281 | bool kick_requeue_list); |
Christoph Hellwig | 6fca6a6 | 2014-05-28 08:08:02 -0600 | [diff] [blame] | 282 | void blk_mq_kick_requeue_list(struct request_queue *q); |
Mike Snitzer | 2849450 | 2016-09-14 13:28:30 -0400 | [diff] [blame] | 283 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
Christoph Hellwig | 08e0029 | 2017-04-20 16:03:09 +0200 | [diff] [blame] | 284 | void blk_mq_complete_request(struct request *rq); |
Jens Axboe | 9c55873 | 2018-05-30 15:26:07 +0800 | [diff] [blame] | 285 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
| 286 | struct bio *bio); |
Bart Van Assche | fd00144 | 2016-10-28 17:19:37 -0700 | [diff] [blame] | 287 | bool blk_mq_queue_stopped(struct request_queue *q); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 288 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 289 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
Christoph Hellwig | 280d45f | 2013-10-25 14:45:58 +0100 | [diff] [blame] | 290 | void blk_mq_stop_hw_queues(struct request_queue *q); |
Christoph Hellwig | 2f26855 | 2014-04-16 09:44:56 +0200 | [diff] [blame] | 291 | void blk_mq_start_hw_queues(struct request_queue *q); |
Jens Axboe | ae911c5 | 2016-12-08 13:19:30 -0700 | [diff] [blame] | 292 | void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
Christoph Hellwig | 1b4a325 | 2014-04-16 09:44:54 +0200 | [diff] [blame] | 293 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
Ming Lei | 97e0120 | 2017-06-06 23:22:01 +0800 | [diff] [blame] | 294 | void blk_mq_quiesce_queue(struct request_queue *q); |
Ming Lei | e4e7391 | 2017-06-06 23:22:03 +0800 | [diff] [blame] | 295 | void blk_mq_unquiesce_queue(struct request_queue *q); |
Bart Van Assche | 7587a5a | 2017-04-07 11:16:52 -0700 | [diff] [blame] | 296 | void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
Jens Axboe | 79f720a | 2017-11-10 09:13:21 -0700 | [diff] [blame] | 297 | bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
Mike Snitzer | b94ec29 | 2015-03-11 23:56:38 -0400 | [diff] [blame] | 298 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
Sagi Grimberg | e048948 | 2016-03-10 13:58:46 +0200 | [diff] [blame] | 299 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
| 300 | busy_tag_iter_fn *fn, void *priv); |
Jens Axboe | c761d96 | 2015-01-02 15:05:12 -0700 | [diff] [blame] | 301 | void blk_mq_freeze_queue(struct request_queue *q); |
Keith Busch | b4c6a02 | 2014-12-19 17:54:14 -0700 | [diff] [blame] | 302 | void blk_mq_unfreeze_queue(struct request_queue *q); |
Ming Lei | 1671d52 | 2017-03-27 20:06:57 +0800 | [diff] [blame] | 303 | void blk_freeze_queue_start(struct request_queue *q); |
Keith Busch | 6bae363e | 2017-03-01 14:22:10 -0500 | [diff] [blame] | 304 | void blk_mq_freeze_queue_wait(struct request_queue *q); |
Keith Busch | f91328c | 2017-03-01 14:22:11 -0500 | [diff] [blame] | 305 | int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, |
| 306 | unsigned long timeout); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 307 | |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 308 | int blk_mq_map_queues(struct blk_mq_queue_map *qmap); |
Keith Busch | 868f2f0 | 2015-12-17 17:08:14 -0700 | [diff] [blame] | 309 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
| 310 | |
Bart Van Assche | 852ec80 | 2017-06-21 10:55:47 -0700 | [diff] [blame] | 311 | void blk_mq_quiesce_queue_nowait(struct request_queue *q); |
Ming Lei | 4f084b4 | 2017-06-06 23:22:02 +0800 | [diff] [blame] | 312 | |
Jens Axboe | 9cf2bab | 2018-10-31 17:01:22 -0600 | [diff] [blame] | 313 | unsigned int blk_mq_rq_cpu(struct request *rq); |
| 314 | |
Keith Busch | 0fc09f9 | 2018-07-23 08:37:50 -0600 | [diff] [blame] | 315 | /** |
| 316 | * blk_mq_mark_complete() - Set request state to complete |
| 317 | * @rq: request to set to complete state |
| 318 | * |
| 319 | * Returns true if request state was successfully set to complete. If |
| 320 | * successful, the caller is responsibile for seeing this request is ended, as |
| 321 | * blk_mq_complete_request will not work again. |
| 322 | */ |
| 323 | static inline bool blk_mq_mark_complete(struct request *rq) |
| 324 | { |
| 325 | return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) == |
| 326 | MQ_RQ_IN_FLIGHT; |
| 327 | } |
| 328 | |
Ming Lei | 4f084b4 | 2017-06-06 23:22:02 +0800 | [diff] [blame] | 329 | /* |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 330 | * Driver command data is immediately after the request. So subtract request |
Jens Axboe | 2963e3f | 2015-04-09 15:54:05 -0600 | [diff] [blame] | 331 | * size to get back to the original request, add request size to get the PDU. |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 332 | */ |
| 333 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) |
| 334 | { |
| 335 | return pdu - sizeof(struct request); |
| 336 | } |
| 337 | static inline void *blk_mq_rq_to_pdu(struct request *rq) |
| 338 | { |
Jens Axboe | 2963e3f | 2015-04-09 15:54:05 -0600 | [diff] [blame] | 339 | return rq + 1; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 340 | } |
| 341 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 342 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
Jose Alonso | 0d0b7d4 | 2014-01-28 08:09:46 -0700 | [diff] [blame] | 343 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
| 344 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 345 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 346 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
Jose Alonso | 0d0b7d4 | 2014-01-28 08:09:46 -0700 | [diff] [blame] | 347 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
| 348 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 349 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 350 | #endif |