Christoph Hellwig | 3dcf60b | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 2 | /* |
| 3 | * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, |
| 4 | * for the blk-mq scheduling framework |
| 5 | * |
| 6 | * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> |
| 7 | */ |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/blkdev.h> |
| 11 | #include <linux/blk-mq.h> |
| 12 | #include <linux/elevator.h> |
| 13 | #include <linux/bio.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/compiler.h> |
| 18 | #include <linux/rbtree.h> |
| 19 | #include <linux/sbitmap.h> |
| 20 | |
Chaitanya Kulkarni | b357e4a | 2021-02-21 21:29:59 -0800 | [diff] [blame] | 21 | #include <trace/events/block.h> |
| 22 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 23 | #include "blk.h" |
| 24 | #include "blk-mq.h" |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 25 | #include "blk-mq-debugfs.h" |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 26 | #include "blk-mq-tag.h" |
| 27 | #include "blk-mq-sched.h" |
| 28 | |
| 29 | /* |
Mauro Carvalho Chehab | 898bd37 | 2019-04-18 19:45:00 -0300 | [diff] [blame] | 30 | * See Documentation/block/deadline-iosched.rst |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 31 | */ |
| 32 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ |
| 33 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ |
| 34 | static const int writes_starved = 2; /* max times reads can starve a write */ |
| 35 | static const int fifo_batch = 16; /* # of sequential requests treated as one |
| 36 | by the above parameters. For throughput. */ |
| 37 | |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 38 | enum dd_data_dir { |
| 39 | DD_READ = READ, |
| 40 | DD_WRITE = WRITE, |
| 41 | }; |
| 42 | |
| 43 | enum { DD_DIR_COUNT = 2 }; |
| 44 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 45 | enum dd_prio { |
| 46 | DD_RT_PRIO = 0, |
| 47 | DD_BE_PRIO = 1, |
| 48 | DD_IDLE_PRIO = 2, |
| 49 | DD_PRIO_MAX = 2, |
| 50 | }; |
| 51 | |
| 52 | enum { DD_PRIO_COUNT = 3 }; |
| 53 | |
Tejun Heo | 0f78399 | 2021-08-11 07:41:45 -1000 | [diff] [blame] | 54 | /* I/O statistics per I/O priority. */ |
| 55 | struct io_stats_per_prio { |
| 56 | local_t inserted; |
| 57 | local_t merged; |
| 58 | local_t dispatched; |
| 59 | local_t completed; |
| 60 | }; |
| 61 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 62 | /* I/O statistics for all I/O priorities (enum dd_prio). */ |
| 63 | struct io_stats { |
| 64 | struct io_stats_per_prio stats[DD_PRIO_COUNT]; |
| 65 | }; |
| 66 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 67 | /* |
| 68 | * Deadline scheduler data per I/O priority (enum dd_prio). Requests are |
| 69 | * present on both sort_list[] and fifo_list[]. |
| 70 | */ |
| 71 | struct dd_per_prio { |
| 72 | struct list_head dispatch; |
| 73 | struct rb_root sort_list[DD_DIR_COUNT]; |
| 74 | struct list_head fifo_list[DD_DIR_COUNT]; |
| 75 | /* Next request in FIFO order. Read, write or both are NULL. */ |
| 76 | struct request *next_rq[DD_DIR_COUNT]; |
| 77 | }; |
| 78 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 79 | struct deadline_data { |
| 80 | /* |
| 81 | * run time data |
| 82 | */ |
| 83 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 84 | struct dd_per_prio per_prio[DD_PRIO_COUNT]; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 85 | |
Bart Van Assche | d672d32 | 2021-06-17 17:44:52 -0700 | [diff] [blame] | 86 | /* Data direction of latest dispatched request. */ |
| 87 | enum dd_data_dir last_dir; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 88 | unsigned int batching; /* number of sequential requests made */ |
| 89 | unsigned int starved; /* times reads have starved writes */ |
| 90 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 91 | struct io_stats __percpu *stats; |
| 92 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 93 | /* |
| 94 | * settings that change how the i/o scheduler behaves |
| 95 | */ |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 96 | int fifo_expire[DD_DIR_COUNT]; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 97 | int fifo_batch; |
| 98 | int writes_starved; |
| 99 | int front_merges; |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 100 | u32 async_depth; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 101 | |
| 102 | spinlock_t lock; |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 103 | spinlock_t zone_lock; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 104 | }; |
| 105 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 106 | /* Count one event of type 'event_type' and with I/O priority 'prio' */ |
| 107 | #define dd_count(dd, event_type, prio) do { \ |
| 108 | struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \ |
| 109 | \ |
| 110 | BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ |
| 111 | BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ |
| 112 | local_inc(&io_stats->stats[(prio)].event_type); \ |
| 113 | put_cpu_ptr(io_stats); \ |
| 114 | } while (0) |
| 115 | |
| 116 | /* |
| 117 | * Returns the total number of dd_count(dd, event_type, prio) calls across all |
| 118 | * CPUs. No locking or barriers since it is fine if the returned sum is slightly |
| 119 | * outdated. |
| 120 | */ |
| 121 | #define dd_sum(dd, event_type, prio) ({ \ |
| 122 | unsigned int cpu; \ |
| 123 | u32 sum = 0; \ |
| 124 | \ |
| 125 | BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ |
| 126 | BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ |
| 127 | for_each_present_cpu(cpu) \ |
| 128 | sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \ |
| 129 | stats[(prio)].event_type); \ |
| 130 | sum; \ |
| 131 | }) |
| 132 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 133 | /* Maps an I/O priority class to a deadline scheduler priority. */ |
| 134 | static const enum dd_prio ioprio_class_to_prio[] = { |
| 135 | [IOPRIO_CLASS_NONE] = DD_BE_PRIO, |
| 136 | [IOPRIO_CLASS_RT] = DD_RT_PRIO, |
| 137 | [IOPRIO_CLASS_BE] = DD_BE_PRIO, |
| 138 | [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 139 | }; |
| 140 | |
| 141 | static inline struct rb_root * |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 142 | deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 143 | { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 144 | return &per_prio->sort_list[rq_data_dir(rq)]; |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a |
| 149 | * request. |
| 150 | */ |
| 151 | static u8 dd_rq_ioclass(struct request *rq) |
| 152 | { |
| 153 | return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 154 | } |
| 155 | |
| 156 | /* |
| 157 | * get the request after `rq' in sector-sorted order |
| 158 | */ |
| 159 | static inline struct request * |
| 160 | deadline_latter_request(struct request *rq) |
| 161 | { |
| 162 | struct rb_node *node = rb_next(&rq->rb_node); |
| 163 | |
| 164 | if (node) |
| 165 | return rb_entry_rq(node); |
| 166 | |
| 167 | return NULL; |
| 168 | } |
| 169 | |
| 170 | static void |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 171 | deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 172 | { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 173 | struct rb_root *root = deadline_rb_root(per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 174 | |
| 175 | elv_rb_add(root, rq); |
| 176 | } |
| 177 | |
| 178 | static inline void |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 179 | deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 180 | { |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 181 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 182 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 183 | if (per_prio->next_rq[data_dir] == rq) |
| 184 | per_prio->next_rq[data_dir] = deadline_latter_request(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 185 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 186 | elv_rb_del(deadline_rb_root(per_prio, rq), rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | /* |
| 190 | * remove rq from rbtree and fifo. |
| 191 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 192 | static void deadline_remove_request(struct request_queue *q, |
| 193 | struct dd_per_prio *per_prio, |
| 194 | struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 195 | { |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 196 | list_del_init(&rq->queuelist); |
| 197 | |
| 198 | /* |
| 199 | * We might not be on the rbtree, if we are doing an insert merge |
| 200 | */ |
| 201 | if (!RB_EMPTY_NODE(&rq->rb_node)) |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 202 | deadline_del_rq_rb(per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 203 | |
| 204 | elv_rqhash_del(q, rq); |
| 205 | if (q->last_merge == rq) |
| 206 | q->last_merge = NULL; |
| 207 | } |
| 208 | |
| 209 | static void dd_request_merged(struct request_queue *q, struct request *req, |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 210 | enum elv_merge type) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 211 | { |
| 212 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 213 | const u8 ioprio_class = dd_rq_ioclass(req); |
| 214 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; |
| 215 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 216 | |
| 217 | /* |
| 218 | * if the merge was a front merge, we need to reposition request |
| 219 | */ |
| 220 | if (type == ELEVATOR_FRONT_MERGE) { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 221 | elv_rb_del(deadline_rb_root(per_prio, req), req); |
| 222 | deadline_add_rq_rb(per_prio, req); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 223 | } |
| 224 | } |
| 225 | |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 226 | /* |
| 227 | * Callback function that is invoked after @next has been merged into @req. |
| 228 | */ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 229 | static void dd_merged_requests(struct request_queue *q, struct request *req, |
| 230 | struct request *next) |
| 231 | { |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 232 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 233 | const u8 ioprio_class = dd_rq_ioclass(next); |
| 234 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; |
| 235 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 236 | dd_count(dd, merged, prio); |
| 237 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 238 | /* |
| 239 | * if next expires before rq, assign its expire time to rq |
| 240 | * and move into next position (next will be deleted) in fifo |
| 241 | */ |
| 242 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { |
| 243 | if (time_before((unsigned long)next->fifo_time, |
| 244 | (unsigned long)req->fifo_time)) { |
| 245 | list_move(&req->queuelist, &next->queuelist); |
| 246 | req->fifo_time = next->fifo_time; |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * kill knowledge of next, this one is a goner |
| 252 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 253 | deadline_remove_request(q, &dd->per_prio[prio], next); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | /* |
| 257 | * move an entry to dispatch queue |
| 258 | */ |
| 259 | static void |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 260 | deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
| 261 | struct request *rq) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 262 | { |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 263 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 264 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 265 | per_prio->next_rq[data_dir] = deadline_latter_request(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 266 | |
| 267 | /* |
| 268 | * take it off the sort and fifo list |
| 269 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 270 | deadline_remove_request(rq->q, per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 271 | } |
| 272 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 273 | /* Number of requests queued for a given priority level. */ |
| 274 | static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) |
| 275 | { |
| 276 | return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); |
| 277 | } |
| 278 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 279 | /* |
| 280 | * deadline_check_fifo returns 0 if there are no expired requests on the fifo, |
| 281 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) |
| 282 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 283 | static inline int deadline_check_fifo(struct dd_per_prio *per_prio, |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 284 | enum dd_data_dir data_dir) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 285 | { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 286 | struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 287 | |
| 288 | /* |
| 289 | * rq is expired! |
| 290 | */ |
| 291 | if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) |
| 292 | return 1; |
| 293 | |
| 294 | return 0; |
| 295 | } |
| 296 | |
| 297 | /* |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 298 | * For the specified data direction, return the next request to |
| 299 | * dispatch using arrival ordered lists. |
| 300 | */ |
| 301 | static struct request * |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 302 | deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
| 303 | enum dd_data_dir data_dir) |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 304 | { |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 305 | struct request *rq; |
| 306 | unsigned long flags; |
| 307 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 308 | if (list_empty(&per_prio->fifo_list[data_dir])) |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 309 | return NULL; |
| 310 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 311 | rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 312 | if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 313 | return rq; |
| 314 | |
| 315 | /* |
| 316 | * Look for a write request that can be dispatched, that is one with |
| 317 | * an unlocked target zone. |
| 318 | */ |
| 319 | spin_lock_irqsave(&dd->zone_lock, flags); |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 320 | list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 321 | if (blk_req_can_dispatch_to_zone(rq)) |
| 322 | goto out; |
| 323 | } |
| 324 | rq = NULL; |
| 325 | out: |
| 326 | spin_unlock_irqrestore(&dd->zone_lock, flags); |
| 327 | |
| 328 | return rq; |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 329 | } |
| 330 | |
| 331 | /* |
| 332 | * For the specified data direction, return the next request to |
| 333 | * dispatch using sector position sorted lists. |
| 334 | */ |
| 335 | static struct request * |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 336 | deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
| 337 | enum dd_data_dir data_dir) |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 338 | { |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 339 | struct request *rq; |
| 340 | unsigned long flags; |
| 341 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 342 | rq = per_prio->next_rq[data_dir]; |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 343 | if (!rq) |
| 344 | return NULL; |
| 345 | |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 346 | if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 347 | return rq; |
| 348 | |
| 349 | /* |
| 350 | * Look for a write request that can be dispatched, that is one with |
| 351 | * an unlocked target zone. |
| 352 | */ |
| 353 | spin_lock_irqsave(&dd->zone_lock, flags); |
| 354 | while (rq) { |
| 355 | if (blk_req_can_dispatch_to_zone(rq)) |
| 356 | break; |
| 357 | rq = deadline_latter_request(rq); |
| 358 | } |
| 359 | spin_unlock_irqrestore(&dd->zone_lock, flags); |
| 360 | |
| 361 | return rq; |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 362 | } |
| 363 | |
| 364 | /* |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 365 | * deadline_dispatch_requests selects the best request according to |
Jens Axboe | 7b05bf7 | 2021-08-26 12:59:44 -0600 | [diff] [blame^] | 366 | * read/write expire, fifo_batch, etc |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 367 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 368 | static struct request *__dd_dispatch_request(struct deadline_data *dd, |
Jens Axboe | 7b05bf7 | 2021-08-26 12:59:44 -0600 | [diff] [blame^] | 369 | struct dd_per_prio *per_prio) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 370 | { |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 371 | struct request *rq, *next_rq; |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 372 | enum dd_data_dir data_dir; |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 373 | enum dd_prio prio; |
| 374 | u8 ioprio_class; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 375 | |
Bart Van Assche | 3bd473f | 2021-06-17 17:44:46 -0700 | [diff] [blame] | 376 | lockdep_assert_held(&dd->lock); |
| 377 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 378 | if (!list_empty(&per_prio->dispatch)) { |
| 379 | rq = list_first_entry(&per_prio->dispatch, struct request, |
| 380 | queuelist); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 381 | list_del_init(&rq->queuelist); |
| 382 | goto done; |
| 383 | } |
| 384 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 385 | /* |
| 386 | * batches are currently reads XOR writes |
| 387 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 388 | rq = deadline_next_request(dd, per_prio, dd->last_dir); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 389 | if (rq && dd->batching < dd->fifo_batch) |
| 390 | /* we have a next request are still entitled to batch */ |
| 391 | goto dispatch_request; |
| 392 | |
| 393 | /* |
| 394 | * at this point we are not running a batch. select the appropriate |
| 395 | * data direction (read / write) |
| 396 | */ |
| 397 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 398 | if (!list_empty(&per_prio->fifo_list[DD_READ])) { |
| 399 | BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ])); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 400 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 401 | if (deadline_fifo_request(dd, per_prio, DD_WRITE) && |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 402 | (dd->starved++ >= dd->writes_starved)) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 403 | goto dispatch_writes; |
| 404 | |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 405 | data_dir = DD_READ; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 406 | |
| 407 | goto dispatch_find_request; |
| 408 | } |
| 409 | |
| 410 | /* |
| 411 | * there are either no reads or writes have been starved |
| 412 | */ |
| 413 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 414 | if (!list_empty(&per_prio->fifo_list[DD_WRITE])) { |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 415 | dispatch_writes: |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 416 | BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE])); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 417 | |
| 418 | dd->starved = 0; |
| 419 | |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 420 | data_dir = DD_WRITE; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 421 | |
| 422 | goto dispatch_find_request; |
| 423 | } |
| 424 | |
| 425 | return NULL; |
| 426 | |
| 427 | dispatch_find_request: |
| 428 | /* |
| 429 | * we are not running a batch, find best request for selected data_dir |
| 430 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 431 | next_rq = deadline_next_request(dd, per_prio, data_dir); |
| 432 | if (deadline_check_fifo(per_prio, data_dir) || !next_rq) { |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 433 | /* |
| 434 | * A deadline has expired, the last request was in the other |
| 435 | * direction, or we have run out of higher-sectored requests. |
| 436 | * Start again from the request with the earliest expiry time. |
| 437 | */ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 438 | rq = deadline_fifo_request(dd, per_prio, data_dir); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 439 | } else { |
| 440 | /* |
| 441 | * The last req was the same dir and we have a next request in |
| 442 | * sort order. No expired requests so continue on from here. |
| 443 | */ |
Damien Le Moal | bf09ce5 | 2017-12-21 15:43:39 +0900 | [diff] [blame] | 444 | rq = next_rq; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 445 | } |
| 446 | |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 447 | /* |
| 448 | * For a zoned block device, if we only have writes queued and none of |
| 449 | * them can be dispatched, rq will be NULL. |
| 450 | */ |
| 451 | if (!rq) |
| 452 | return NULL; |
| 453 | |
Bart Van Assche | d672d32 | 2021-06-17 17:44:52 -0700 | [diff] [blame] | 454 | dd->last_dir = data_dir; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 455 | dd->batching = 0; |
| 456 | |
| 457 | dispatch_request: |
| 458 | /* |
| 459 | * rq is the selected appropriate request. |
| 460 | */ |
| 461 | dd->batching++; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 462 | deadline_move_request(dd, per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 463 | done: |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 464 | ioprio_class = dd_rq_ioclass(rq); |
| 465 | prio = ioprio_class_to_prio[ioprio_class]; |
| 466 | dd_count(dd, dispatched, prio); |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 467 | /* |
| 468 | * If the request needs its target zone locked, do it. |
| 469 | */ |
| 470 | blk_req_zone_write_lock(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 471 | rq->rq_flags |= RQF_STARTED; |
| 472 | return rq; |
| 473 | } |
| 474 | |
Jens Axboe | ca11f20 | 2018-01-06 09:23:11 -0700 | [diff] [blame] | 475 | /* |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 476 | * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests(). |
| 477 | * |
Jens Axboe | ca11f20 | 2018-01-06 09:23:11 -0700 | [diff] [blame] | 478 | * One confusing aspect here is that we get called for a specific |
Damien Le Moal | 7211aef8 | 2018-12-17 15:14:05 +0900 | [diff] [blame] | 479 | * hardware queue, but we may return a request that is for a |
Jens Axboe | ca11f20 | 2018-01-06 09:23:11 -0700 | [diff] [blame] | 480 | * different hardware queue. This is because mq-deadline has shared |
| 481 | * state for all hardware queues, in terms of sorting, FIFOs, etc. |
| 482 | */ |
Jens Axboe | c13660a | 2017-01-26 12:40:07 -0700 | [diff] [blame] | 483 | static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 484 | { |
| 485 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; |
Jens Axboe | 7b05bf7 | 2021-08-26 12:59:44 -0600 | [diff] [blame^] | 486 | struct request *rq; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 487 | enum dd_prio prio; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 488 | |
| 489 | spin_lock(&dd->lock); |
Bart Van Assche | fb926032 | 2021-06-17 17:44:56 -0700 | [diff] [blame] | 490 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
Jens Axboe | 7b05bf7 | 2021-08-26 12:59:44 -0600 | [diff] [blame^] | 491 | rq = __dd_dispatch_request(dd, &dd->per_prio[prio]); |
| 492 | if (rq) |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 493 | break; |
| 494 | } |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 495 | spin_unlock(&dd->lock); |
Jens Axboe | c13660a | 2017-01-26 12:40:07 -0700 | [diff] [blame] | 496 | |
| 497 | return rq; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 498 | } |
| 499 | |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 500 | /* |
| 501 | * Called by __blk_mq_alloc_request(). The shallow_depth value set by this |
| 502 | * function is used by __blk_mq_get_tag(). |
| 503 | */ |
| 504 | static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) |
| 505 | { |
| 506 | struct deadline_data *dd = data->q->elevator->elevator_data; |
| 507 | |
| 508 | /* Do not throttle synchronous reads. */ |
| 509 | if (op_is_sync(op) && !op_is_write(op)) |
| 510 | return; |
| 511 | |
| 512 | /* |
| 513 | * Throttle asynchronous requests and writes such that these requests |
| 514 | * do not block the allocation of synchronous requests. |
| 515 | */ |
| 516 | data->shallow_depth = dd->async_depth; |
| 517 | } |
| 518 | |
| 519 | /* Called by blk_mq_update_nr_requests(). */ |
| 520 | static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) |
| 521 | { |
| 522 | struct request_queue *q = hctx->queue; |
| 523 | struct deadline_data *dd = q->elevator->elevator_data; |
| 524 | struct blk_mq_tags *tags = hctx->sched_tags; |
| 525 | |
| 526 | dd->async_depth = max(1UL, 3 * q->nr_requests / 4); |
| 527 | |
| 528 | sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth); |
| 529 | } |
| 530 | |
| 531 | /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ |
| 532 | static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) |
| 533 | { |
| 534 | dd_depth_updated(hctx); |
| 535 | return 0; |
| 536 | } |
| 537 | |
Bart Van Assche | 3e9a99e | 2021-06-17 17:44:48 -0700 | [diff] [blame] | 538 | static void dd_exit_sched(struct elevator_queue *e) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 539 | { |
| 540 | struct deadline_data *dd = e->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 541 | enum dd_prio prio; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 542 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 543 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
| 544 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
| 545 | |
| 546 | WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); |
| 547 | WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); |
| 548 | } |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 549 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 550 | free_percpu(dd->stats); |
| 551 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 552 | kfree(dd); |
| 553 | } |
| 554 | |
| 555 | /* |
Tejun Heo | 0f78399 | 2021-08-11 07:41:45 -1000 | [diff] [blame] | 556 | * initialize elevator private data (deadline_data). |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 557 | */ |
Bart Van Assche | 3e9a99e | 2021-06-17 17:44:48 -0700 | [diff] [blame] | 558 | static int dd_init_sched(struct request_queue *q, struct elevator_type *e) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 559 | { |
| 560 | struct deadline_data *dd; |
| 561 | struct elevator_queue *eq; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 562 | enum dd_prio prio; |
| 563 | int ret = -ENOMEM; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 564 | |
| 565 | eq = elevator_alloc(q, e); |
| 566 | if (!eq) |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 567 | return ret; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 568 | |
| 569 | dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 570 | if (!dd) |
| 571 | goto put_eq; |
| 572 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 573 | eq->elevator_data = dd; |
| 574 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 575 | dd->stats = alloc_percpu_gfp(typeof(*dd->stats), |
| 576 | GFP_KERNEL | __GFP_ZERO); |
| 577 | if (!dd->stats) |
| 578 | goto free_dd; |
| 579 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 580 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
| 581 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
| 582 | |
| 583 | INIT_LIST_HEAD(&per_prio->dispatch); |
| 584 | INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]); |
| 585 | INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]); |
| 586 | per_prio->sort_list[DD_READ] = RB_ROOT; |
| 587 | per_prio->sort_list[DD_WRITE] = RB_ROOT; |
| 588 | } |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 589 | dd->fifo_expire[DD_READ] = read_expire; |
| 590 | dd->fifo_expire[DD_WRITE] = write_expire; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 591 | dd->writes_starved = writes_starved; |
| 592 | dd->front_merges = 1; |
Bart Van Assche | d672d32 | 2021-06-17 17:44:52 -0700 | [diff] [blame] | 593 | dd->last_dir = DD_WRITE; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 594 | dd->fifo_batch = fifo_batch; |
| 595 | spin_lock_init(&dd->lock); |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 596 | spin_lock_init(&dd->zone_lock); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 597 | |
| 598 | q->elevator = eq; |
| 599 | return 0; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 600 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 601 | free_dd: |
| 602 | kfree(dd); |
| 603 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 604 | put_eq: |
| 605 | kobject_put(&eq->kobj); |
| 606 | return ret; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 607 | } |
| 608 | |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 609 | /* |
| 610 | * Try to merge @bio into an existing request. If @bio has been merged into |
| 611 | * an existing request, store the pointer to that request into *@rq. |
| 612 | */ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 613 | static int dd_request_merge(struct request_queue *q, struct request **rq, |
| 614 | struct bio *bio) |
| 615 | { |
| 616 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 617 | const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio); |
| 618 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; |
| 619 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 620 | sector_t sector = bio_end_sector(bio); |
| 621 | struct request *__rq; |
| 622 | |
| 623 | if (!dd->front_merges) |
| 624 | return ELEVATOR_NO_MERGE; |
| 625 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 626 | __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 627 | if (__rq) { |
| 628 | BUG_ON(sector != blk_rq_pos(__rq)); |
| 629 | |
| 630 | if (elv_bio_merge_ok(__rq, bio)) { |
| 631 | *rq = __rq; |
| 632 | return ELEVATOR_FRONT_MERGE; |
| 633 | } |
| 634 | } |
| 635 | |
| 636 | return ELEVATOR_NO_MERGE; |
| 637 | } |
| 638 | |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 639 | /* |
| 640 | * Attempt to merge a bio into an existing request. This function is called |
| 641 | * before @bio is associated with a request. |
| 642 | */ |
Omar Sandoval | efed9a3 | 2021-05-10 17:05:35 -0700 | [diff] [blame] | 643 | static bool dd_bio_merge(struct request_queue *q, struct bio *bio, |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 644 | unsigned int nr_segs) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 645 | { |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 646 | struct deadline_data *dd = q->elevator->elevator_data; |
Jens Axboe | e4d750c | 2017-02-03 09:48:28 -0700 | [diff] [blame] | 647 | struct request *free = NULL; |
| 648 | bool ret; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 649 | |
| 650 | spin_lock(&dd->lock); |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 651 | ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 652 | spin_unlock(&dd->lock); |
| 653 | |
Jens Axboe | e4d750c | 2017-02-03 09:48:28 -0700 | [diff] [blame] | 654 | if (free) |
| 655 | blk_mq_free_request(free); |
| 656 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 657 | return ret; |
| 658 | } |
| 659 | |
| 660 | /* |
| 661 | * add rq to rbtree and fifo |
| 662 | */ |
| 663 | static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
| 664 | bool at_head) |
| 665 | { |
| 666 | struct request_queue *q = hctx->queue; |
| 667 | struct deadline_data *dd = q->elevator->elevator_data; |
Bart Van Assche | 004a26b | 2021-06-17 17:44:49 -0700 | [diff] [blame] | 668 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 669 | u16 ioprio = req_get_ioprio(rq); |
| 670 | u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); |
| 671 | struct dd_per_prio *per_prio; |
| 672 | enum dd_prio prio; |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 673 | LIST_HEAD(free); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 674 | |
Bart Van Assche | 3bd473f | 2021-06-17 17:44:46 -0700 | [diff] [blame] | 675 | lockdep_assert_held(&dd->lock); |
| 676 | |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 677 | /* |
| 678 | * This may be a requeue of a write request that has locked its |
| 679 | * target zone. If it is the case, this releases the zone lock. |
| 680 | */ |
| 681 | blk_req_zone_write_unlock(rq); |
| 682 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 683 | prio = ioprio_class_to_prio[ioprio_class]; |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 684 | dd_count(dd, inserted, prio); |
Bart Van Assche | b6d2b05 | 2021-08-24 10:05:20 -0700 | [diff] [blame] | 685 | rq->elv.priv[0] = (void *)(uintptr_t)1; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 686 | |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 687 | if (blk_mq_sched_try_insert_merge(q, rq, &free)) { |
| 688 | blk_mq_free_requests(&free); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 689 | return; |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 690 | } |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 691 | |
Chaitanya Kulkarni | b357e4a | 2021-02-21 21:29:59 -0800 | [diff] [blame] | 692 | trace_block_rq_insert(rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 693 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 694 | per_prio = &dd->per_prio[prio]; |
Lin Feng | 7687b38 | 2021-04-15 11:43:26 +0800 | [diff] [blame] | 695 | if (at_head) { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 696 | list_add(&rq->queuelist, &per_prio->dispatch); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 697 | } else { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 698 | deadline_add_rq_rb(per_prio, rq); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 699 | |
| 700 | if (rq_mergeable(rq)) { |
| 701 | elv_rqhash_add(q, rq); |
| 702 | if (!q->last_merge) |
| 703 | q->last_merge = rq; |
| 704 | } |
| 705 | |
| 706 | /* |
| 707 | * set expire time and add to fifo list |
| 708 | */ |
| 709 | rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 710 | list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 711 | } |
| 712 | } |
| 713 | |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 714 | /* |
| 715 | * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests(). |
| 716 | */ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 717 | static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, |
| 718 | struct list_head *list, bool at_head) |
| 719 | { |
| 720 | struct request_queue *q = hctx->queue; |
| 721 | struct deadline_data *dd = q->elevator->elevator_data; |
| 722 | |
| 723 | spin_lock(&dd->lock); |
| 724 | while (!list_empty(list)) { |
| 725 | struct request *rq; |
| 726 | |
| 727 | rq = list_first_entry(list, struct request, queuelist); |
| 728 | list_del_init(&rq->queuelist); |
| 729 | dd_insert_request(hctx, rq, at_head); |
| 730 | } |
| 731 | spin_unlock(&dd->lock); |
| 732 | } |
| 733 | |
Bart Van Assche | b6d2b05 | 2021-08-24 10:05:20 -0700 | [diff] [blame] | 734 | /* Callback from inside blk_mq_rq_ctx_init(). */ |
Christoph Hellwig | 5d9c305 | 2020-05-29 15:53:08 +0200 | [diff] [blame] | 735 | static void dd_prepare_request(struct request *rq) |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 736 | { |
Bart Van Assche | b6d2b05 | 2021-08-24 10:05:20 -0700 | [diff] [blame] | 737 | rq->elv.priv[0] = NULL; |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 738 | } |
| 739 | |
| 740 | /* |
Bart Van Assche | 46eae2e | 2021-06-17 17:44:45 -0700 | [diff] [blame] | 741 | * Callback from inside blk_mq_free_request(). |
| 742 | * |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 743 | * For zoned block devices, write unlock the target zone of |
| 744 | * completed write requests. Do this while holding the zone lock |
| 745 | * spinlock so that the zone is never unlocked while deadline_fifo_request() |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 746 | * or deadline_next_request() are executing. This function is called for |
| 747 | * all requests, whether or not these requests complete successfully. |
Damien Le Moal | cb8acab | 2019-08-28 13:40:20 +0900 | [diff] [blame] | 748 | * |
| 749 | * For a zoned block device, __dd_dispatch_request() may have stopped |
| 750 | * dispatching requests if all the queued requests are write requests directed |
| 751 | * at zones that are already locked due to on-going write requests. To ensure |
| 752 | * write request dispatch progress in this case, mark the queue as needing a |
| 753 | * restart to ensure that the queue is run again after completion of the |
| 754 | * request and zones being unlocked. |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 755 | */ |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 756 | static void dd_finish_request(struct request *rq) |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 757 | { |
| 758 | struct request_queue *q = rq->q; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 759 | struct deadline_data *dd = q->elevator->elevator_data; |
| 760 | const u8 ioprio_class = dd_rq_ioclass(rq); |
| 761 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; |
| 762 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 763 | |
Bart Van Assche | b6d2b05 | 2021-08-24 10:05:20 -0700 | [diff] [blame] | 764 | /* |
| 765 | * The block layer core may call dd_finish_request() without having |
| 766 | * called dd_insert_requests(). Hence only update statistics for |
| 767 | * requests for which dd_insert_requests() has been called. See also |
| 768 | * blk_mq_request_bypass_insert(). |
| 769 | */ |
| 770 | if (rq->elv.priv[0]) |
| 771 | dd_count(dd, completed, prio); |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 772 | |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 773 | if (blk_queue_is_zoned(q)) { |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 774 | unsigned long flags; |
| 775 | |
| 776 | spin_lock_irqsave(&dd->zone_lock, flags); |
| 777 | blk_req_zone_write_unlock(rq); |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 778 | if (!list_empty(&per_prio->fifo_list[DD_WRITE])) |
Damien Le Moal | cb8acab | 2019-08-28 13:40:20 +0900 | [diff] [blame] | 779 | blk_mq_sched_mark_restart_hctx(rq->mq_hctx); |
Damien Le Moal | 5700f69 | 2017-12-21 15:43:40 +0900 | [diff] [blame] | 780 | spin_unlock_irqrestore(&dd->zone_lock, flags); |
| 781 | } |
| 782 | } |
| 783 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 784 | static bool dd_has_work_for_prio(struct dd_per_prio *per_prio) |
| 785 | { |
| 786 | return !list_empty_careful(&per_prio->dispatch) || |
| 787 | !list_empty_careful(&per_prio->fifo_list[DD_READ]) || |
| 788 | !list_empty_careful(&per_prio->fifo_list[DD_WRITE]); |
| 789 | } |
| 790 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 791 | static bool dd_has_work(struct blk_mq_hw_ctx *hctx) |
| 792 | { |
| 793 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 794 | enum dd_prio prio; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 795 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 796 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) |
| 797 | if (dd_has_work_for_prio(&dd->per_prio[prio])) |
| 798 | return true; |
| 799 | |
| 800 | return false; |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 801 | } |
| 802 | |
| 803 | /* |
| 804 | * sysfs parts below |
| 805 | */ |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 806 | #define SHOW_INT(__FUNC, __VAR) \ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 807 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
| 808 | { \ |
| 809 | struct deadline_data *dd = e->elevator_data; \ |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 810 | \ |
| 811 | return sysfs_emit(page, "%d\n", __VAR); \ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 812 | } |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 813 | #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) |
| 814 | SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); |
| 815 | SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); |
| 816 | SHOW_INT(deadline_writes_starved_show, dd->writes_starved); |
| 817 | SHOW_INT(deadline_front_merges_show, dd->front_merges); |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 818 | SHOW_INT(deadline_async_depth_show, dd->front_merges); |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 819 | SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); |
| 820 | #undef SHOW_INT |
| 821 | #undef SHOW_JIFFIES |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 822 | |
| 823 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
| 824 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
| 825 | { \ |
| 826 | struct deadline_data *dd = e->elevator_data; \ |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 827 | int __data, __ret; \ |
| 828 | \ |
| 829 | __ret = kstrtoint(page, 0, &__data); \ |
| 830 | if (__ret < 0) \ |
| 831 | return __ret; \ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 832 | if (__data < (MIN)) \ |
| 833 | __data = (MIN); \ |
| 834 | else if (__data > (MAX)) \ |
| 835 | __data = (MAX); \ |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 836 | *(__PTR) = __CONV(__data); \ |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 837 | return count; \ |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 838 | } |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 839 | #define STORE_INT(__FUNC, __PTR, MIN, MAX) \ |
| 840 | STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, ) |
| 841 | #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \ |
| 842 | STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) |
| 843 | STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); |
| 844 | STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); |
| 845 | STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); |
| 846 | STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 847 | STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX); |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 848 | STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 849 | #undef STORE_FUNCTION |
Bart Van Assche | d6d7f01 | 2021-06-17 17:44:50 -0700 | [diff] [blame] | 850 | #undef STORE_INT |
| 851 | #undef STORE_JIFFIES |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 852 | |
| 853 | #define DD_ATTR(name) \ |
Joe Perches | 5657a81 | 2018-05-24 13:38:59 -0600 | [diff] [blame] | 854 | __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 855 | |
| 856 | static struct elv_fs_entry deadline_attrs[] = { |
| 857 | DD_ATTR(read_expire), |
| 858 | DD_ATTR(write_expire), |
| 859 | DD_ATTR(writes_starved), |
| 860 | DD_ATTR(front_merges), |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 861 | DD_ATTR(async_depth), |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 862 | DD_ATTR(fifo_batch), |
| 863 | __ATTR_NULL |
| 864 | }; |
| 865 | |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 866 | #ifdef CONFIG_BLK_DEBUG_FS |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 867 | #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 868 | static void *deadline_##name##_fifo_start(struct seq_file *m, \ |
| 869 | loff_t *pos) \ |
| 870 | __acquires(&dd->lock) \ |
| 871 | { \ |
| 872 | struct request_queue *q = m->private; \ |
| 873 | struct deadline_data *dd = q->elevator->elevator_data; \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 874 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 875 | \ |
| 876 | spin_lock(&dd->lock); \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 877 | return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 878 | } \ |
| 879 | \ |
| 880 | static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ |
| 881 | loff_t *pos) \ |
| 882 | { \ |
| 883 | struct request_queue *q = m->private; \ |
| 884 | struct deadline_data *dd = q->elevator->elevator_data; \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 885 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 886 | \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 887 | return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 888 | } \ |
| 889 | \ |
| 890 | static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ |
| 891 | __releases(&dd->lock) \ |
| 892 | { \ |
| 893 | struct request_queue *q = m->private; \ |
| 894 | struct deadline_data *dd = q->elevator->elevator_data; \ |
| 895 | \ |
| 896 | spin_unlock(&dd->lock); \ |
| 897 | } \ |
| 898 | \ |
| 899 | static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ |
| 900 | .start = deadline_##name##_fifo_start, \ |
| 901 | .next = deadline_##name##_fifo_next, \ |
| 902 | .stop = deadline_##name##_fifo_stop, \ |
| 903 | .show = blk_mq_debugfs_rq_show, \ |
| 904 | }; \ |
| 905 | \ |
| 906 | static int deadline_##name##_next_rq_show(void *data, \ |
| 907 | struct seq_file *m) \ |
| 908 | { \ |
| 909 | struct request_queue *q = data; \ |
| 910 | struct deadline_data *dd = q->elevator->elevator_data; \ |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 911 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
| 912 | struct request *rq = per_prio->next_rq[data_dir]; \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 913 | \ |
| 914 | if (rq) \ |
| 915 | __blk_mq_debugfs_rq_show(m, rq); \ |
| 916 | return 0; \ |
| 917 | } |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 918 | |
| 919 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0); |
| 920 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0); |
| 921 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1); |
| 922 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1); |
| 923 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2); |
| 924 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2); |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 925 | #undef DEADLINE_DEBUGFS_DDIR_ATTRS |
| 926 | |
| 927 | static int deadline_batching_show(void *data, struct seq_file *m) |
| 928 | { |
| 929 | struct request_queue *q = data; |
| 930 | struct deadline_data *dd = q->elevator->elevator_data; |
| 931 | |
| 932 | seq_printf(m, "%u\n", dd->batching); |
| 933 | return 0; |
| 934 | } |
| 935 | |
| 936 | static int deadline_starved_show(void *data, struct seq_file *m) |
| 937 | { |
| 938 | struct request_queue *q = data; |
| 939 | struct deadline_data *dd = q->elevator->elevator_data; |
| 940 | |
| 941 | seq_printf(m, "%u\n", dd->starved); |
| 942 | return 0; |
| 943 | } |
| 944 | |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 945 | static int dd_async_depth_show(void *data, struct seq_file *m) |
| 946 | { |
| 947 | struct request_queue *q = data; |
| 948 | struct deadline_data *dd = q->elevator->elevator_data; |
| 949 | |
| 950 | seq_printf(m, "%u\n", dd->async_depth); |
| 951 | return 0; |
| 952 | } |
| 953 | |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 954 | static int dd_queued_show(void *data, struct seq_file *m) |
| 955 | { |
| 956 | struct request_queue *q = data; |
| 957 | struct deadline_data *dd = q->elevator->elevator_data; |
| 958 | |
| 959 | seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO), |
| 960 | dd_queued(dd, DD_BE_PRIO), |
| 961 | dd_queued(dd, DD_IDLE_PRIO)); |
| 962 | return 0; |
| 963 | } |
| 964 | |
| 965 | /* Number of requests owned by the block driver for a given priority. */ |
| 966 | static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio) |
| 967 | { |
| 968 | return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio) |
| 969 | - dd_sum(dd, completed, prio); |
| 970 | } |
| 971 | |
| 972 | static int dd_owned_by_driver_show(void *data, struct seq_file *m) |
| 973 | { |
| 974 | struct request_queue *q = data; |
| 975 | struct deadline_data *dd = q->elevator->elevator_data; |
| 976 | |
| 977 | seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO), |
| 978 | dd_owned_by_driver(dd, DD_BE_PRIO), |
| 979 | dd_owned_by_driver(dd, DD_IDLE_PRIO)); |
| 980 | return 0; |
| 981 | } |
| 982 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 983 | #define DEADLINE_DISPATCH_ATTR(prio) \ |
| 984 | static void *deadline_dispatch##prio##_start(struct seq_file *m, \ |
| 985 | loff_t *pos) \ |
| 986 | __acquires(&dd->lock) \ |
| 987 | { \ |
| 988 | struct request_queue *q = m->private; \ |
| 989 | struct deadline_data *dd = q->elevator->elevator_data; \ |
| 990 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
| 991 | \ |
| 992 | spin_lock(&dd->lock); \ |
| 993 | return seq_list_start(&per_prio->dispatch, *pos); \ |
| 994 | } \ |
| 995 | \ |
| 996 | static void *deadline_dispatch##prio##_next(struct seq_file *m, \ |
| 997 | void *v, loff_t *pos) \ |
| 998 | { \ |
| 999 | struct request_queue *q = m->private; \ |
| 1000 | struct deadline_data *dd = q->elevator->elevator_data; \ |
| 1001 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
| 1002 | \ |
| 1003 | return seq_list_next(v, &per_prio->dispatch, pos); \ |
| 1004 | } \ |
| 1005 | \ |
| 1006 | static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \ |
| 1007 | __releases(&dd->lock) \ |
| 1008 | { \ |
| 1009 | struct request_queue *q = m->private; \ |
| 1010 | struct deadline_data *dd = q->elevator->elevator_data; \ |
| 1011 | \ |
| 1012 | spin_unlock(&dd->lock); \ |
| 1013 | } \ |
| 1014 | \ |
| 1015 | static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \ |
| 1016 | .start = deadline_dispatch##prio##_start, \ |
| 1017 | .next = deadline_dispatch##prio##_next, \ |
| 1018 | .stop = deadline_dispatch##prio##_stop, \ |
| 1019 | .show = blk_mq_debugfs_rq_show, \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1020 | } |
| 1021 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1022 | DEADLINE_DISPATCH_ATTR(0); |
| 1023 | DEADLINE_DISPATCH_ATTR(1); |
| 1024 | DEADLINE_DISPATCH_ATTR(2); |
| 1025 | #undef DEADLINE_DISPATCH_ATTR |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1026 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1027 | #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ |
| 1028 | {#name "_fifo_list", 0400, \ |
| 1029 | .seq_ops = &deadline_##name##_fifo_seq_ops} |
| 1030 | #define DEADLINE_NEXT_RQ_ATTR(name) \ |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1031 | {#name "_next_rq", 0400, deadline_##name##_next_rq_show} |
| 1032 | static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1033 | DEADLINE_QUEUE_DDIR_ATTRS(read0), |
| 1034 | DEADLINE_QUEUE_DDIR_ATTRS(write0), |
| 1035 | DEADLINE_QUEUE_DDIR_ATTRS(read1), |
| 1036 | DEADLINE_QUEUE_DDIR_ATTRS(write1), |
| 1037 | DEADLINE_QUEUE_DDIR_ATTRS(read2), |
| 1038 | DEADLINE_QUEUE_DDIR_ATTRS(write2), |
| 1039 | DEADLINE_NEXT_RQ_ATTR(read0), |
| 1040 | DEADLINE_NEXT_RQ_ATTR(write0), |
| 1041 | DEADLINE_NEXT_RQ_ATTR(read1), |
| 1042 | DEADLINE_NEXT_RQ_ATTR(write1), |
| 1043 | DEADLINE_NEXT_RQ_ATTR(read2), |
| 1044 | DEADLINE_NEXT_RQ_ATTR(write2), |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1045 | {"batching", 0400, deadline_batching_show}, |
| 1046 | {"starved", 0400, deadline_starved_show}, |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 1047 | {"async_depth", 0400, dd_async_depth_show}, |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1048 | {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops}, |
| 1049 | {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops}, |
| 1050 | {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops}, |
Bart Van Assche | 38ba64d | 2021-06-17 17:44:54 -0700 | [diff] [blame] | 1051 | {"owned_by_driver", 0400, dd_owned_by_driver_show}, |
| 1052 | {"queued", 0400, dd_queued_show}, |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1053 | {}, |
| 1054 | }; |
| 1055 | #undef DEADLINE_QUEUE_DDIR_ATTRS |
| 1056 | #endif |
| 1057 | |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1058 | static struct elevator_type mq_deadline = { |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 1059 | .ops = { |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 1060 | .depth_updated = dd_depth_updated, |
| 1061 | .limit_depth = dd_limit_depth, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1062 | .insert_requests = dd_insert_requests, |
Jens Axboe | c13660a | 2017-01-26 12:40:07 -0700 | [diff] [blame] | 1063 | .dispatch_request = dd_dispatch_request, |
Damien Le Moal | f3bc78d | 2018-02-28 09:35:29 -0800 | [diff] [blame] | 1064 | .prepare_request = dd_prepare_request, |
| 1065 | .finish_request = dd_finish_request, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1066 | .next_request = elv_rb_latter_request, |
| 1067 | .former_request = elv_rb_former_request, |
| 1068 | .bio_merge = dd_bio_merge, |
| 1069 | .request_merge = dd_request_merge, |
| 1070 | .requests_merged = dd_merged_requests, |
| 1071 | .request_merged = dd_request_merged, |
| 1072 | .has_work = dd_has_work, |
Bart Van Assche | 3e9a99e | 2021-06-17 17:44:48 -0700 | [diff] [blame] | 1073 | .init_sched = dd_init_sched, |
| 1074 | .exit_sched = dd_exit_sched, |
Bart Van Assche | 0775758 | 2021-06-17 17:44:51 -0700 | [diff] [blame] | 1075 | .init_hctx = dd_init_hctx, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1076 | }, |
| 1077 | |
Omar Sandoval | daaadb3 | 2017-05-04 00:31:34 -0700 | [diff] [blame] | 1078 | #ifdef CONFIG_BLK_DEBUG_FS |
| 1079 | .queue_debugfs_attrs = deadline_queue_debugfs_attrs, |
| 1080 | #endif |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1081 | .elevator_attrs = deadline_attrs, |
| 1082 | .elevator_name = "mq-deadline", |
Jens Axboe | 4d740bc | 2017-10-25 09:47:20 -0600 | [diff] [blame] | 1083 | .elevator_alias = "deadline", |
Damien Le Moal | 68c43f1 | 2019-09-05 18:51:31 +0900 | [diff] [blame] | 1084 | .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE, |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1085 | .elevator_owner = THIS_MODULE, |
| 1086 | }; |
Ben Hutchings | 7de967e | 2017-08-13 18:03:15 +0100 | [diff] [blame] | 1087 | MODULE_ALIAS("mq-deadline-iosched"); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1088 | |
| 1089 | static int __init deadline_init(void) |
| 1090 | { |
Tejun Heo | 0f78399 | 2021-08-11 07:41:45 -1000 | [diff] [blame] | 1091 | return elv_register(&mq_deadline); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1092 | } |
| 1093 | |
| 1094 | static void __exit deadline_exit(void) |
| 1095 | { |
| 1096 | elv_unregister(&mq_deadline); |
| 1097 | } |
| 1098 | |
| 1099 | module_init(deadline_init); |
| 1100 | module_exit(deadline_exit); |
| 1101 | |
Bart Van Assche | c807ab5 | 2021-06-17 17:44:53 -0700 | [diff] [blame] | 1102 | MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche"); |
Jens Axboe | 945ffb6 | 2017-01-14 17:11:11 -0700 | [diff] [blame] | 1103 | MODULE_LICENSE("GPL"); |
| 1104 | MODULE_DESCRIPTION("MQ deadline IO scheduler"); |