Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Deadline i/o scheduler. |
| 3 | * |
| 4 | * Copyright (C) 2002 Jens Axboe <axboe@suse.de> |
| 5 | */ |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/fs.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/elevator.h> |
| 10 | #include <linux/bio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/rbtree.h> |
| 16 | |
| 17 | /* |
| 18 | * See Documentation/block/deadline-iosched.txt |
| 19 | */ |
Arjan van de Ven | 6410009 | 2006-01-06 09:46:02 +0100 | [diff] [blame] | 20 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ |
| 21 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ |
| 22 | static const int writes_starved = 2; /* max times reads can starve a write */ |
| 23 | static const int fifo_batch = 16; /* # of sequential requests treated as one |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | by the above parameters. For throughput. */ |
| 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | struct deadline_data { |
| 27 | /* |
| 28 | * run time data |
| 29 | */ |
| 30 | |
| 31 | /* |
| 32 | * requests (deadline_rq s) are present on both sort_list and fifo_list |
| 33 | */ |
| 34 | struct rb_root sort_list[2]; |
| 35 | struct list_head fifo_list[2]; |
| 36 | |
| 37 | /* |
| 38 | * next in sort order. read, write or both are NULL |
| 39 | */ |
| 40 | struct deadline_rq *next_drq[2]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | unsigned int batching; /* number of sequential requests made */ |
| 42 | sector_t last_sector; /* head position */ |
| 43 | unsigned int starved; /* times reads have starved writes */ |
| 44 | |
| 45 | /* |
| 46 | * settings that change how the i/o scheduler behaves |
| 47 | */ |
| 48 | int fifo_expire[2]; |
| 49 | int fifo_batch; |
| 50 | int writes_starved; |
| 51 | int front_merges; |
| 52 | |
| 53 | mempool_t *drq_pool; |
| 54 | }; |
| 55 | |
| 56 | /* |
| 57 | * pre-request data. |
| 58 | */ |
| 59 | struct deadline_rq { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | struct request *request; |
| 61 | |
| 62 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | * expire fifo |
| 64 | */ |
| 65 | struct list_head fifo; |
| 66 | unsigned long expires; |
| 67 | }; |
| 68 | |
| 69 | static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq); |
| 70 | |
| 71 | static kmem_cache_t *drq_pool; |
| 72 | |
| 73 | #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private) |
| 74 | |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 75 | #define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))]) |
| 76 | #define DRQ_RB_ROOT(dd, drq) RQ_RB_ROOT((drq)->request) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | |
| 78 | static void |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 79 | deadline_add_drq_rb(struct deadline_data *dd, struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | { |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 81 | struct rb_root *root = RQ_RB_ROOT(dd, rq); |
| 82 | struct request *__alias; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
| 84 | retry: |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 85 | __alias = elv_rb_add(root, rq); |
| 86 | if (unlikely(__alias)) { |
| 87 | deadline_move_request(dd, RQ_DATA(__alias)); |
| 88 | goto retry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static inline void |
| 93 | deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) |
| 94 | { |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 95 | struct request *rq = drq->request; |
| 96 | const int data_dir = rq_data_dir(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
| 98 | if (dd->next_drq[data_dir] == drq) { |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 99 | struct rb_node *rbnext = rb_next(&rq->rb_node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | |
| 101 | dd->next_drq[data_dir] = NULL; |
| 102 | if (rbnext) |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 103 | dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | } |
| 105 | |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 106 | elv_rb_del(RQ_RB_ROOT(dd, rq), rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | /* |
| 110 | * add drq to rbtree and fifo |
| 111 | */ |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 112 | static void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | deadline_add_request(struct request_queue *q, struct request *rq) |
| 114 | { |
| 115 | struct deadline_data *dd = q->elevator->elevator_data; |
| 116 | struct deadline_rq *drq = RQ_DATA(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | const int data_dir = rq_data_dir(drq->request); |
| 118 | |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 119 | deadline_add_drq_rb(dd, rq); |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 120 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | /* |
| 122 | * set expire time (only used for reads) and add to fifo list |
| 123 | */ |
| 124 | drq->expires = jiffies + dd->fifo_expire[data_dir]; |
| 125 | list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | /* |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 129 | * remove rq from rbtree and fifo. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | */ |
| 131 | static void deadline_remove_request(request_queue_t *q, struct request *rq) |
| 132 | { |
| 133 | struct deadline_rq *drq = RQ_DATA(rq); |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 134 | struct deadline_data *dd = q->elevator->elevator_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 136 | list_del_init(&drq->fifo); |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 137 | deadline_del_drq_rb(dd, drq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | } |
| 139 | |
| 140 | static int |
| 141 | deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) |
| 142 | { |
| 143 | struct deadline_data *dd = q->elevator->elevator_data; |
| 144 | struct request *__rq; |
| 145 | int ret; |
| 146 | |
| 147 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | * check for front merge |
| 149 | */ |
| 150 | if (dd->front_merges) { |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 151 | sector_t sector = bio->bi_sector + bio_sectors(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 153 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | if (__rq) { |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 155 | BUG_ON(sector != __rq->sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
| 157 | if (elv_rq_merge_ok(__rq, bio)) { |
| 158 | ret = ELEVATOR_FRONT_MERGE; |
| 159 | goto out; |
| 160 | } |
| 161 | } |
| 162 | } |
| 163 | |
| 164 | return ELEVATOR_NO_MERGE; |
| 165 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | *req = __rq; |
| 167 | return ret; |
| 168 | } |
| 169 | |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 170 | static void deadline_merged_request(request_queue_t *q, struct request *req, |
| 171 | int type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | { |
| 173 | struct deadline_data *dd = q->elevator->elevator_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
| 175 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | * if the merge was a front merge, we need to reposition request |
| 177 | */ |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 178 | if (type == ELEVATOR_FRONT_MERGE) { |
| 179 | elv_rb_del(RQ_RB_ROOT(dd, req), req); |
| 180 | deadline_add_drq_rb(dd, req); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | static void |
| 185 | deadline_merged_requests(request_queue_t *q, struct request *req, |
| 186 | struct request *next) |
| 187 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | struct deadline_rq *drq = RQ_DATA(req); |
| 189 | struct deadline_rq *dnext = RQ_DATA(next); |
| 190 | |
| 191 | BUG_ON(!drq); |
| 192 | BUG_ON(!dnext); |
| 193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | /* |
| 195 | * if dnext expires before drq, assign its expire time to drq |
| 196 | * and move into dnext position (dnext will be deleted) in fifo |
| 197 | */ |
| 198 | if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) { |
| 199 | if (time_before(dnext->expires, drq->expires)) { |
| 200 | list_move(&drq->fifo, &dnext->fifo); |
| 201 | drq->expires = dnext->expires; |
| 202 | } |
| 203 | } |
| 204 | |
| 205 | /* |
| 206 | * kill knowledge of next, this one is a goner |
| 207 | */ |
| 208 | deadline_remove_request(q, next); |
| 209 | } |
| 210 | |
| 211 | /* |
| 212 | * move request from sort list to dispatch queue. |
| 213 | */ |
| 214 | static inline void |
| 215 | deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) |
| 216 | { |
| 217 | request_queue_t *q = drq->request->q; |
| 218 | |
| 219 | deadline_remove_request(q, drq->request); |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 220 | elv_dispatch_add_tail(q, drq->request); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | /* |
| 224 | * move an entry to dispatch queue |
| 225 | */ |
| 226 | static void |
| 227 | deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq) |
| 228 | { |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 229 | struct request *rq = drq->request; |
| 230 | const int data_dir = rq_data_dir(rq); |
| 231 | struct rb_node *rbnext = rb_next(&rq->rb_node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
| 233 | dd->next_drq[READ] = NULL; |
| 234 | dd->next_drq[WRITE] = NULL; |
| 235 | |
| 236 | if (rbnext) |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 237 | dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | |
| 239 | dd->last_sector = drq->request->sector + drq->request->nr_sectors; |
| 240 | |
| 241 | /* |
| 242 | * take it off the sort and fifo list, move |
| 243 | * to dispatch queue |
| 244 | */ |
| 245 | deadline_move_to_dispatch(dd, drq); |
| 246 | } |
| 247 | |
| 248 | #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo) |
| 249 | |
| 250 | /* |
| 251 | * deadline_check_fifo returns 0 if there are no expired reads on the fifo, |
| 252 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) |
| 253 | */ |
| 254 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) |
| 255 | { |
| 256 | struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next); |
| 257 | |
| 258 | /* |
| 259 | * drq is expired! |
| 260 | */ |
| 261 | if (time_after(jiffies, drq->expires)) |
| 262 | return 1; |
| 263 | |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | /* |
| 268 | * deadline_dispatch_requests selects the best request according to |
| 269 | * read/write expire, fifo_batch, etc |
| 270 | */ |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 271 | static int deadline_dispatch_requests(request_queue_t *q, int force) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | { |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 273 | struct deadline_data *dd = q->elevator->elevator_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | const int reads = !list_empty(&dd->fifo_list[READ]); |
| 275 | const int writes = !list_empty(&dd->fifo_list[WRITE]); |
| 276 | struct deadline_rq *drq; |
Nikita Danilov | 4b0dc07 | 2005-09-06 15:17:20 -0700 | [diff] [blame] | 277 | int data_dir; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | |
| 279 | /* |
| 280 | * batches are currently reads XOR writes |
| 281 | */ |
Andrew Morton | 9d5c1e1 | 2005-09-09 13:02:12 -0700 | [diff] [blame] | 282 | if (dd->next_drq[WRITE]) |
| 283 | drq = dd->next_drq[WRITE]; |
| 284 | else |
| 285 | drq = dd->next_drq[READ]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | |
| 287 | if (drq) { |
| 288 | /* we have a "next request" */ |
| 289 | |
| 290 | if (dd->last_sector != drq->request->sector) |
| 291 | /* end the batch on a non sequential request */ |
| 292 | dd->batching += dd->fifo_batch; |
| 293 | |
| 294 | if (dd->batching < dd->fifo_batch) |
| 295 | /* we are still entitled to batch */ |
| 296 | goto dispatch_request; |
| 297 | } |
| 298 | |
| 299 | /* |
| 300 | * at this point we are not running a batch. select the appropriate |
| 301 | * data direction (read / write) |
| 302 | */ |
| 303 | |
| 304 | if (reads) { |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 305 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | |
| 307 | if (writes && (dd->starved++ >= dd->writes_starved)) |
| 308 | goto dispatch_writes; |
| 309 | |
| 310 | data_dir = READ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | |
| 312 | goto dispatch_find_request; |
| 313 | } |
| 314 | |
| 315 | /* |
| 316 | * there are either no reads or writes have been starved |
| 317 | */ |
| 318 | |
| 319 | if (writes) { |
| 320 | dispatch_writes: |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 321 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | |
| 323 | dd->starved = 0; |
| 324 | |
| 325 | data_dir = WRITE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | |
| 327 | goto dispatch_find_request; |
| 328 | } |
| 329 | |
| 330 | return 0; |
| 331 | |
| 332 | dispatch_find_request: |
| 333 | /* |
| 334 | * we are not running a batch, find best request for selected data_dir |
| 335 | */ |
| 336 | if (deadline_check_fifo(dd, data_dir)) { |
| 337 | /* An expired request exists - satisfy it */ |
| 338 | dd->batching = 0; |
| 339 | drq = list_entry_fifo(dd->fifo_list[data_dir].next); |
| 340 | |
| 341 | } else if (dd->next_drq[data_dir]) { |
| 342 | /* |
| 343 | * The last req was the same dir and we have a next request in |
| 344 | * sort order. No expired requests so continue on from here. |
| 345 | */ |
| 346 | drq = dd->next_drq[data_dir]; |
| 347 | } else { |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 348 | struct rb_node *n; |
| 349 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | /* |
| 351 | * The last req was the other direction or we have run out of |
| 352 | * higher-sectored requests. Go back to the lowest sectored |
| 353 | * request (1 way elevator) and start a new batch. |
| 354 | */ |
| 355 | dd->batching = 0; |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 356 | n = rb_first(&dd->sort_list[data_dir]); |
| 357 | if (n) |
| 358 | drq = RQ_DATA(rb_entry_rq(n)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | } |
| 360 | |
| 361 | dispatch_request: |
| 362 | /* |
| 363 | * drq is the selected appropriate request. |
| 364 | */ |
| 365 | dd->batching++; |
| 366 | deadline_move_request(dd, drq); |
| 367 | |
| 368 | return 1; |
| 369 | } |
| 370 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | static int deadline_queue_empty(request_queue_t *q) |
| 372 | { |
| 373 | struct deadline_data *dd = q->elevator->elevator_data; |
| 374 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 375 | return list_empty(&dd->fifo_list[WRITE]) |
| 376 | && list_empty(&dd->fifo_list[READ]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | } |
| 378 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | static void deadline_exit_queue(elevator_t *e) |
| 380 | { |
| 381 | struct deadline_data *dd = e->elevator_data; |
| 382 | |
| 383 | BUG_ON(!list_empty(&dd->fifo_list[READ])); |
| 384 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); |
| 385 | |
| 386 | mempool_destroy(dd->drq_pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | kfree(dd); |
| 388 | } |
| 389 | |
| 390 | /* |
| 391 | * initialize elevator private data (deadline_data), and alloc a drq for |
| 392 | * each request on the free lists |
| 393 | */ |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 394 | static void *deadline_init_queue(request_queue_t *q, elevator_t *e) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | { |
| 396 | struct deadline_data *dd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | |
| 398 | if (!drq_pool) |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 399 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 401 | dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | if (!dd) |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 403 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | memset(dd, 0, sizeof(*dd)); |
| 405 | |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 406 | dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
| 407 | mempool_free_slab, drq_pool, q->node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | if (!dd->drq_pool) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | kfree(dd); |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 410 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | } |
| 412 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | INIT_LIST_HEAD(&dd->fifo_list[READ]); |
| 414 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); |
| 415 | dd->sort_list[READ] = RB_ROOT; |
| 416 | dd->sort_list[WRITE] = RB_ROOT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | dd->fifo_expire[READ] = read_expire; |
| 418 | dd->fifo_expire[WRITE] = write_expire; |
| 419 | dd->writes_starved = writes_starved; |
| 420 | dd->front_merges = 1; |
| 421 | dd->fifo_batch = fifo_batch; |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 422 | return dd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | } |
| 424 | |
| 425 | static void deadline_put_request(request_queue_t *q, struct request *rq) |
| 426 | { |
| 427 | struct deadline_data *dd = q->elevator->elevator_data; |
| 428 | struct deadline_rq *drq = RQ_DATA(rq); |
| 429 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 430 | mempool_free(drq, dd->drq_pool); |
| 431 | rq->elevator_private = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | } |
| 433 | |
| 434 | static int |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 435 | deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, |
Al Viro | 8267e26 | 2005-10-21 03:20:53 -0400 | [diff] [blame] | 436 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | { |
| 438 | struct deadline_data *dd = q->elevator->elevator_data; |
| 439 | struct deadline_rq *drq; |
| 440 | |
| 441 | drq = mempool_alloc(dd->drq_pool, gfp_mask); |
| 442 | if (drq) { |
| 443 | memset(drq, 0, sizeof(*drq)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | drq->request = rq; |
| 445 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | INIT_LIST_HEAD(&drq->fifo); |
| 447 | |
| 448 | rq->elevator_private = drq; |
| 449 | return 0; |
| 450 | } |
| 451 | |
| 452 | return 1; |
| 453 | } |
| 454 | |
| 455 | /* |
| 456 | * sysfs parts below |
| 457 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | |
| 459 | static ssize_t |
| 460 | deadline_var_show(int var, char *page) |
| 461 | { |
| 462 | return sprintf(page, "%d\n", var); |
| 463 | } |
| 464 | |
| 465 | static ssize_t |
| 466 | deadline_var_store(int *var, const char *page, size_t count) |
| 467 | { |
| 468 | char *p = (char *) page; |
| 469 | |
| 470 | *var = simple_strtol(p, &p, 10); |
| 471 | return count; |
| 472 | } |
| 473 | |
| 474 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 475 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | { \ |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 477 | struct deadline_data *dd = e->elevator_data; \ |
| 478 | int __data = __VAR; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | if (__CONV) \ |
| 480 | __data = jiffies_to_msecs(__data); \ |
| 481 | return deadline_var_show(__data, (page)); \ |
| 482 | } |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 483 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); |
| 484 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); |
| 485 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); |
| 486 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); |
| 487 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | #undef SHOW_FUNCTION |
| 489 | |
| 490 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 491 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | { \ |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 493 | struct deadline_data *dd = e->elevator_data; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | int __data; \ |
| 495 | int ret = deadline_var_store(&__data, (page), count); \ |
| 496 | if (__data < (MIN)) \ |
| 497 | __data = (MIN); \ |
| 498 | else if (__data > (MAX)) \ |
| 499 | __data = (MAX); \ |
| 500 | if (__CONV) \ |
| 501 | *(__PTR) = msecs_to_jiffies(__data); \ |
| 502 | else \ |
| 503 | *(__PTR) = __data; \ |
| 504 | return ret; \ |
| 505 | } |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 506 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); |
| 507 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); |
| 508 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); |
| 509 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); |
| 510 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | #undef STORE_FUNCTION |
| 512 | |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 513 | #define DD_ATTR(name) \ |
| 514 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ |
| 515 | deadline_##name##_store) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 517 | static struct elv_fs_entry deadline_attrs[] = { |
| 518 | DD_ATTR(read_expire), |
| 519 | DD_ATTR(write_expire), |
| 520 | DD_ATTR(writes_starved), |
| 521 | DD_ATTR(front_merges), |
| 522 | DD_ATTR(fifo_batch), |
| 523 | __ATTR_NULL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | }; |
| 525 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | static struct elevator_type iosched_deadline = { |
| 527 | .ops = { |
| 528 | .elevator_merge_fn = deadline_merge, |
| 529 | .elevator_merged_fn = deadline_merged_request, |
| 530 | .elevator_merge_req_fn = deadline_merged_requests, |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 531 | .elevator_dispatch_fn = deadline_dispatch_requests, |
| 532 | .elevator_add_req_fn = deadline_add_request, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | .elevator_queue_empty_fn = deadline_queue_empty, |
Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame^] | 534 | .elevator_former_req_fn = elv_rb_former_request, |
| 535 | .elevator_latter_req_fn = elv_rb_latter_request, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | .elevator_set_req_fn = deadline_set_request, |
| 537 | .elevator_put_req_fn = deadline_put_request, |
| 538 | .elevator_init_fn = deadline_init_queue, |
| 539 | .elevator_exit_fn = deadline_exit_queue, |
| 540 | }, |
| 541 | |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 542 | .elevator_attrs = deadline_attrs, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | .elevator_name = "deadline", |
| 544 | .elevator_owner = THIS_MODULE, |
| 545 | }; |
| 546 | |
| 547 | static int __init deadline_init(void) |
| 548 | { |
| 549 | int ret; |
| 550 | |
| 551 | drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq), |
| 552 | 0, 0, NULL, NULL); |
| 553 | |
| 554 | if (!drq_pool) |
| 555 | return -ENOMEM; |
| 556 | |
| 557 | ret = elv_register(&iosched_deadline); |
| 558 | if (ret) |
| 559 | kmem_cache_destroy(drq_pool); |
| 560 | |
| 561 | return ret; |
| 562 | } |
| 563 | |
| 564 | static void __exit deadline_exit(void) |
| 565 | { |
| 566 | kmem_cache_destroy(drq_pool); |
| 567 | elv_unregister(&iosched_deadline); |
| 568 | } |
| 569 | |
| 570 | module_init(deadline_init); |
| 571 | module_exit(deadline_exit); |
| 572 | |
| 573 | MODULE_AUTHOR("Jens Axboe"); |
| 574 | MODULE_LICENSE("GPL"); |
| 575 | MODULE_DESCRIPTION("deadline IO scheduler"); |