blob: 8fdcd64ae12e19a5c6734780fa838f1c5a533eee [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
Jens Axboe0fe23472006-09-04 15:41:16 +02006 * 30042000 Jens Axboe <axboe@kernel.dk> :
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
Jens Axboe2056a782006-03-23 20:00:26 +010034#include <linux/blktrace_api.h>
Jens Axboe98170642006-07-28 09:23:08 +020035#include <linux/hash.h>
Jens Axboe0835da62008-08-26 09:15:47 +020036#include <linux/uaccess.h>
Lin Mingc8158812013-03-23 11:42:27 +080037#include <linux/pm_runtime.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040038#include <linux/blk-cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Li Zefan55782132009-06-09 13:43:05 +080040#include <trace/events/block.h>
41
Jens Axboe242f9dc2008-09-14 05:55:09 -070042#include "blk.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070043#include "blk-mq-sched.h"
Bart Van Asschebca6b062018-09-26 14:01:03 -070044#include "blk-pm.h"
Jan Kara8330cdb2017-04-19 11:33:27 +020045#include "blk-wbt.h"
Jens Axboe242f9dc2008-09-14 05:55:09 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static DEFINE_SPINLOCK(elv_list_lock);
48static LIST_HEAD(elv_list);
49
50/*
Jens Axboe98170642006-07-28 09:23:08 +020051 * Merge hash stuff.
52 */
Tejun Heo83096eb2009-05-07 22:24:39 +090053#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
Jens Axboe98170642006-07-28 09:23:08 +020054
55/*
Jens Axboeda775262006-12-20 11:04:12 +010056 * Query io scheduler to see if the current process issuing bio may be
57 * merged with rq.
58 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070059static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
Jens Axboeda775262006-12-20 11:04:12 +010060{
Jens Axboe165125e2007-07-24 09:28:11 +020061 struct request_queue *q = rq->q;
Jens Axboeb374d182008-10-31 10:05:07 +010062 struct elevator_queue *e = q->elevator;
Jens Axboeda775262006-12-20 11:04:12 +010063
Jens Axboebd166ef2017-01-17 06:03:22 -070064 if (e->uses_mq && e->type->ops.mq.allow_merge)
65 return e->type->ops.mq.allow_merge(q, rq, bio);
66 else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -070067 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
Jens Axboeda775262006-12-20 11:04:12 +010068
69 return 1;
70}
71
72/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 * can we safely merge with this request?
74 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070075bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
Tejun Heo050c8ea2012-02-08 09:19:38 +010077 if (!blk_rq_merge_ok(rq, bio))
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070078 return false;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +020079
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070080 if (!elv_iosched_allow_bio_merge(rq, bio))
81 return false;
Jens Axboeda775262006-12-20 11:04:12 +010082
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070083 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070085EXPORT_SYMBOL(elv_bio_merge_ok);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Jens Axboe8ac0d9a2017-10-25 12:35:02 -060087static bool elevator_match(const struct elevator_type *e, const char *name)
88{
89 if (!strcmp(e->elevator_name, name))
90 return true;
91 if (e->elevator_alias && !strcmp(e->elevator_alias, name))
92 return true;
93
94 return false;
95}
96
Jens Axboe2527d992017-10-25 12:33:42 -060097/*
98 * Return scheduler with name 'name' and with matching 'mq capability
99 */
100static struct elevator_type *elevator_find(const char *name, bool mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
Vasily Tarasova22b1692006-10-11 09:24:27 +0200102 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Matthias Kaehlcke70cee262007-07-10 12:26:24 +0200104 list_for_each_entry(e, &elv_list, list) {
Jens Axboe8ac0d9a2017-10-25 12:35:02 -0600105 if (elevator_match(e, name) && (mq == e->uses_mq))
Vasily Tarasova22b1692006-10-11 09:24:27 +0200106 return e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Vasily Tarasova22b1692006-10-11 09:24:27 +0200109 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
112static void elevator_put(struct elevator_type *e)
113{
114 module_put(e->elevator_owner);
115}
116
Jens Axboe2527d992017-10-25 12:33:42 -0600117static struct elevator_type *elevator_get(struct request_queue *q,
118 const char *name, bool try_loading)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
Tejun Heo2824bc932005-10-20 10:56:41 +0200120 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200122 spin_lock(&elv_list_lock);
Tejun Heo2824bc932005-10-20 10:56:41 +0200123
Jens Axboe2527d992017-10-25 12:33:42 -0600124 e = elevator_find(name, q->mq_ops != NULL);
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800125 if (!e && try_loading) {
Jens Axboee1640942008-02-19 10:20:37 +0100126 spin_unlock(&elv_list_lock);
Kees Cook490b94b2011-05-05 18:02:12 -0600127 request_module("%s-iosched", name);
Jens Axboee1640942008-02-19 10:20:37 +0100128 spin_lock(&elv_list_lock);
Jens Axboe2527d992017-10-25 12:33:42 -0600129 e = elevator_find(name, q->mq_ops != NULL);
Jens Axboee1640942008-02-19 10:20:37 +0100130 }
131
Tejun Heo2824bc932005-10-20 10:56:41 +0200132 if (e && !try_module_get(e->elevator_owner))
133 e = NULL;
134
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200135 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 return e;
137}
138
Wang Sheng-Hui484fc252011-09-08 12:32:14 +0200139static char chosen_elevator[ELV_NAME_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Nate Diller5f003972006-01-24 10:07:58 +0100141static int __init elevator_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Chuck Ebbert752a3b72006-01-16 09:47:37 +0100143 /*
144 * Be backwards-compatible with previous kernels, so users
145 * won't get the wrong elevator.
146 */
Jens Axboe492af632009-10-03 09:37:51 +0200147 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800148 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149}
150
151__setup("elevator=", elevator_setup);
152
Tejun Heobb813f42013-01-18 14:05:56 -0800153/* called during boot to load the elevator chosen by the elevator param */
154void __init load_default_elevator_module(void)
155{
156 struct elevator_type *e;
157
158 if (!chosen_elevator[0])
159 return;
160
Jens Axboe2527d992017-10-25 12:33:42 -0600161 /*
162 * Boot parameter is deprecated, we haven't supported that for MQ.
163 * Only look for non-mq schedulers from here.
164 */
Tejun Heobb813f42013-01-18 14:05:56 -0800165 spin_lock(&elv_list_lock);
Jens Axboe2527d992017-10-25 12:33:42 -0600166 e = elevator_find(chosen_elevator, false);
Tejun Heobb813f42013-01-18 14:05:56 -0800167 spin_unlock(&elv_list_lock);
168
169 if (!e)
170 request_module("%s-iosched", chosen_elevator);
171}
172
Al Viro3d1ab402006-03-18 18:35:43 -0500173static struct kobj_type elv_ktype;
174
Jianpeng Mad50235b2013-07-03 13:25:24 +0200175struct elevator_queue *elevator_alloc(struct request_queue *q,
Jens Axboe165125e2007-07-24 09:28:11 +0200176 struct elevator_type *e)
Al Viro3d1ab402006-03-18 18:35:43 -0500177{
Jens Axboeb374d182008-10-31 10:05:07 +0100178 struct elevator_queue *eq;
Jens Axboe98170642006-07-28 09:23:08 +0200179
Joe Perchesc1b511e2013-08-29 15:21:42 -0700180 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
Jens Axboe98170642006-07-28 09:23:08 +0200181 if (unlikely(!eq))
Chao Yu8406a4d2015-04-23 10:47:44 -0600182 return NULL;
Jens Axboe98170642006-07-28 09:23:08 +0200183
Tejun Heo22f746e2011-12-14 00:33:41 +0100184 eq->type = e;
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -0700185 kobject_init(&eq->kobj, &elv_ktype);
Jens Axboe98170642006-07-28 09:23:08 +0200186 mutex_init(&eq->sysfs_lock);
Sasha Levin242d98f2012-12-17 10:01:27 -0500187 hash_init(eq->hash);
Jens Axboebd166ef2017-01-17 06:03:22 -0700188 eq->uses_mq = e->uses_mq;
Jens Axboe98170642006-07-28 09:23:08 +0200189
Al Viro3d1ab402006-03-18 18:35:43 -0500190 return eq;
191}
Jianpeng Mad50235b2013-07-03 13:25:24 +0200192EXPORT_SYMBOL(elevator_alloc);
Al Viro3d1ab402006-03-18 18:35:43 -0500193
194static void elevator_release(struct kobject *kobj)
195{
Jens Axboeb374d182008-10-31 10:05:07 +0100196 struct elevator_queue *e;
Jens Axboe98170642006-07-28 09:23:08 +0200197
Jens Axboeb374d182008-10-31 10:05:07 +0100198 e = container_of(kobj, struct elevator_queue, kobj);
Tejun Heo22f746e2011-12-14 00:33:41 +0100199 elevator_put(e->type);
Al Viro3d1ab402006-03-18 18:35:43 -0500200 kfree(e);
201}
202
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200203/*
204 * Use the default elevator specified by config boot param for non-mq devices,
205 * or by config option. Don't try to load modules as we could be running off
206 * async and request_module() isn't allowed from async.
207 */
Christoph Hellwigddb72532018-05-31 19:11:38 +0200208int elevator_init(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
210 struct elevator_type *e = NULL;
Christoph Hellwigacddf3b2018-05-31 19:11:39 +0200211 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Tomoki Sekiyamaeb1c1602013-10-15 16:42:16 -0600213 /*
214 * q->sysfs_lock must be held to provide mutual exclusion between
215 * elevator_switch() and here.
216 */
Christoph Hellwigacddf3b2018-05-31 19:11:39 +0200217 mutex_lock(&q->sysfs_lock);
Mike Snitzer1abec4f2010-05-25 13:15:15 -0400218 if (unlikely(q->elevator))
Christoph Hellwigacddf3b2018-05-31 19:11:39 +0200219 goto out_unlock;
Mike Snitzer1abec4f2010-05-25 13:15:15 -0400220
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200221 if (*chosen_elevator) {
Jens Axboe2527d992017-10-25 12:33:42 -0600222 e = elevator_get(q, chosen_elevator, false);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100223 if (!e)
224 printk(KERN_ERR "I/O scheduler %s not found\n",
225 chosen_elevator);
226 }
Nate Diller248d5ca2006-01-24 10:09:14 +0100227
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200228 if (!e)
229 e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100230 if (!e) {
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200231 printk(KERN_ERR
232 "Default I/O scheduler not found. Using noop.\n");
233 e = elevator_get(q, "noop", false);
Nate Diller5f003972006-01-24 10:07:58 +0100234 }
235
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200236 err = e->ops.sq.elevator_init_fn(q, e);
Omar Sandoval6917ff02017-04-05 12:01:30 -0700237 if (err)
Sudip Mukherjeed32f6b52014-10-23 22:16:48 +0530238 elevator_put(e);
Christoph Hellwigacddf3b2018-05-31 19:11:39 +0200239out_unlock:
240 mutex_unlock(&q->sysfs_lock);
Sudip Mukherjeed32f6b52014-10-23 22:16:48 +0530241 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242}
Jens Axboe2e662b62006-07-13 11:55:04 +0200243
Omar Sandoval54d53292017-04-07 08:52:27 -0600244void elevator_exit(struct request_queue *q, struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
Al Viro3d1ab402006-03-18 18:35:43 -0500246 mutex_lock(&e->sysfs_lock);
Jens Axboebd166ef2017-01-17 06:03:22 -0700247 if (e->uses_mq && e->type->ops.mq.exit_sched)
Omar Sandoval54d53292017-04-07 08:52:27 -0600248 blk_mq_exit_sched(q, e);
Jens Axboebd166ef2017-01-17 06:03:22 -0700249 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700250 e->type->ops.sq.elevator_exit_fn(e);
Al Viro3d1ab402006-03-18 18:35:43 -0500251 mutex_unlock(&e->sysfs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Al Viro3d1ab402006-03-18 18:35:43 -0500253 kobject_put(&e->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
Jens Axboe2e662b62006-07-13 11:55:04 +0200255
Jens Axboe98170642006-07-28 09:23:08 +0200256static inline void __elv_rqhash_del(struct request *rq)
257{
Sasha Levin242d98f2012-12-17 10:01:27 -0500258 hash_del(&rq->hash);
Christoph Hellwige8064022016-10-20 15:12:13 +0200259 rq->rq_flags &= ~RQF_HASHED;
Jens Axboe98170642006-07-28 09:23:08 +0200260}
261
Jens Axboe70b3ea02016-12-07 08:43:31 -0700262void elv_rqhash_del(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200263{
264 if (ELV_ON_HASH(rq))
265 __elv_rqhash_del(rq);
266}
Jens Axboebd166ef2017-01-17 06:03:22 -0700267EXPORT_SYMBOL_GPL(elv_rqhash_del);
Jens Axboe98170642006-07-28 09:23:08 +0200268
Jens Axboe70b3ea02016-12-07 08:43:31 -0700269void elv_rqhash_add(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200270{
Jens Axboeb374d182008-10-31 10:05:07 +0100271 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200272
273 BUG_ON(ELV_ON_HASH(rq));
Sasha Levin242d98f2012-12-17 10:01:27 -0500274 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
Christoph Hellwige8064022016-10-20 15:12:13 +0200275 rq->rq_flags |= RQF_HASHED;
Jens Axboe98170642006-07-28 09:23:08 +0200276}
Jens Axboebd166ef2017-01-17 06:03:22 -0700277EXPORT_SYMBOL_GPL(elv_rqhash_add);
Jens Axboe98170642006-07-28 09:23:08 +0200278
Jens Axboe70b3ea02016-12-07 08:43:31 -0700279void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200280{
281 __elv_rqhash_del(rq);
282 elv_rqhash_add(q, rq);
283}
284
Jens Axboe70b3ea02016-12-07 08:43:31 -0700285struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
Jens Axboe98170642006-07-28 09:23:08 +0200286{
Jens Axboeb374d182008-10-31 10:05:07 +0100287 struct elevator_queue *e = q->elevator;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800288 struct hlist_node *next;
Jens Axboe98170642006-07-28 09:23:08 +0200289 struct request *rq;
290
Linus Torvaldsee89f812013-02-28 12:52:24 -0800291 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
Jens Axboe98170642006-07-28 09:23:08 +0200292 BUG_ON(!ELV_ON_HASH(rq));
293
294 if (unlikely(!rq_mergeable(rq))) {
295 __elv_rqhash_del(rq);
296 continue;
297 }
298
299 if (rq_hash_key(rq) == offset)
300 return rq;
301 }
302
303 return NULL;
304}
305
Tejun Heo8922e162005-10-20 16:23:44 +0200306/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200307 * RB-tree support functions for inserting/lookup/removal of requests
308 * in a sorted RB tree.
309 */
Jeff Moyer796d5112011-06-02 21:19:05 +0200310void elv_rb_add(struct rb_root *root, struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +0200311{
312 struct rb_node **p = &root->rb_node;
313 struct rb_node *parent = NULL;
314 struct request *__rq;
315
316 while (*p) {
317 parent = *p;
318 __rq = rb_entry(parent, struct request, rb_node);
319
Tejun Heo83096eb2009-05-07 22:24:39 +0900320 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200321 p = &(*p)->rb_left;
Jeff Moyer796d5112011-06-02 21:19:05 +0200322 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200323 p = &(*p)->rb_right;
Jens Axboe2e662b62006-07-13 11:55:04 +0200324 }
325
326 rb_link_node(&rq->rb_node, parent, p);
327 rb_insert_color(&rq->rb_node, root);
Jens Axboe2e662b62006-07-13 11:55:04 +0200328}
Jens Axboe2e662b62006-07-13 11:55:04 +0200329EXPORT_SYMBOL(elv_rb_add);
330
331void elv_rb_del(struct rb_root *root, struct request *rq)
332{
333 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
334 rb_erase(&rq->rb_node, root);
335 RB_CLEAR_NODE(&rq->rb_node);
336}
Jens Axboe2e662b62006-07-13 11:55:04 +0200337EXPORT_SYMBOL(elv_rb_del);
338
339struct request *elv_rb_find(struct rb_root *root, sector_t sector)
340{
341 struct rb_node *n = root->rb_node;
342 struct request *rq;
343
344 while (n) {
345 rq = rb_entry(n, struct request, rb_node);
346
Tejun Heo83096eb2009-05-07 22:24:39 +0900347 if (sector < blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200348 n = n->rb_left;
Tejun Heo83096eb2009-05-07 22:24:39 +0900349 else if (sector > blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200350 n = n->rb_right;
351 else
352 return rq;
353 }
354
355 return NULL;
356}
Jens Axboe2e662b62006-07-13 11:55:04 +0200357EXPORT_SYMBOL(elv_rb_find);
358
359/*
Tejun Heo8922e162005-10-20 16:23:44 +0200360 * Insert rq into dispatch queue of q. Queue lock must be held on
Uwe Kleine-Königdbe7f762007-10-20 01:55:04 +0200361 * entry. rq is sort instead into the dispatch queue. To be used by
Jens Axboe2e662b62006-07-13 11:55:04 +0200362 * specific elevators.
Tejun Heo8922e162005-10-20 16:23:44 +0200363 */
Jens Axboe165125e2007-07-24 09:28:11 +0200364void elv_dispatch_sort(struct request_queue *q, struct request *rq)
Tejun Heo8922e162005-10-20 16:23:44 +0200365{
366 sector_t boundary;
Tejun Heo8922e162005-10-20 16:23:44 +0200367 struct list_head *entry;
368
Tejun Heo06b86242005-10-20 16:46:23 +0200369 if (q->last_merge == rq)
370 q->last_merge = NULL;
Jens Axboe98170642006-07-28 09:23:08 +0200371
372 elv_rqhash_del(q, rq);
373
Tejun Heo15853af2005-11-10 08:52:05 +0100374 q->nr_sorted--;
Tejun Heo06b86242005-10-20 16:46:23 +0200375
Jens Axboe1b47f532005-10-20 16:37:00 +0200376 boundary = q->end_sector;
Tejun Heo8922e162005-10-20 16:23:44 +0200377 list_for_each_prev(entry, &q->queue_head) {
378 struct request *pos = list_entry_rq(entry);
379
Adrian Hunter7afafc82016-08-16 10:59:35 +0300380 if (req_op(rq) != req_op(pos))
David Woodhousee17fc0a2008-08-09 16:42:20 +0100381 break;
Jens Axboe783660b2007-01-19 11:27:47 +1100382 if (rq_data_dir(rq) != rq_data_dir(pos))
383 break;
Christoph Hellwige8064022016-10-20 15:12:13 +0200384 if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
Tejun Heo8922e162005-10-20 16:23:44 +0200385 break;
Tejun Heo83096eb2009-05-07 22:24:39 +0900386 if (blk_rq_pos(rq) >= boundary) {
387 if (blk_rq_pos(pos) < boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200388 continue;
389 } else {
Tejun Heo83096eb2009-05-07 22:24:39 +0900390 if (blk_rq_pos(pos) >= boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200391 break;
392 }
Tejun Heo83096eb2009-05-07 22:24:39 +0900393 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
Tejun Heo8922e162005-10-20 16:23:44 +0200394 break;
395 }
396
397 list_add(&rq->queuelist, entry);
398}
Jens Axboe2e662b62006-07-13 11:55:04 +0200399EXPORT_SYMBOL(elv_dispatch_sort);
400
Jens Axboe98170642006-07-28 09:23:08 +0200401/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200402 * Insert rq into dispatch queue of q. Queue lock must be held on
403 * entry. rq is added to the back of the dispatch queue. To be used by
404 * specific elevators.
Jens Axboe98170642006-07-28 09:23:08 +0200405 */
406void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
407{
408 if (q->last_merge == rq)
409 q->last_merge = NULL;
410
411 elv_rqhash_del(q, rq);
412
413 q->nr_sorted--;
414
415 q->end_sector = rq_end_sector(rq);
416 q->boundary_rq = rq;
417 list_add_tail(&rq->queuelist, &q->queue_head);
418}
Jens Axboe2e662b62006-07-13 11:55:04 +0200419EXPORT_SYMBOL(elv_dispatch_add_tail);
420
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100421enum elv_merge elv_merge(struct request_queue *q, struct request **req,
422 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423{
Jens Axboeb374d182008-10-31 10:05:07 +0100424 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200425 struct request *__rq;
Tejun Heo06b86242005-10-20 16:46:23 +0200426
Jens Axboe98170642006-07-28 09:23:08 +0200427 /*
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100428 * Levels of merges:
429 * nomerges: No merges at all attempted
430 * noxmerges: Only simple one-hit cache try
431 * merges: All merge tries attempted
432 */
Ming Lei7460d382015-10-20 23:13:55 +0800433 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100434 return ELEVATOR_NO_MERGE;
435
436 /*
Jens Axboe98170642006-07-28 09:23:08 +0200437 * First try one-hit cache.
438 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -0700439 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100440 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
441
Tejun Heo06b86242005-10-20 16:46:23 +0200442 if (ret != ELEVATOR_NO_MERGE) {
443 *req = q->last_merge;
444 return ret;
445 }
446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100448 if (blk_queue_noxmerges(q))
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200449 return ELEVATOR_NO_MERGE;
450
Jens Axboe98170642006-07-28 09:23:08 +0200451 /*
452 * See if our hash lookup can find a potential backmerge.
453 */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700454 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
Tahsin Erdogan72ef7992016-07-07 11:48:22 -0700455 if (__rq && elv_bio_merge_ok(__rq, bio)) {
Jens Axboe98170642006-07-28 09:23:08 +0200456 *req = __rq;
457 return ELEVATOR_BACK_MERGE;
458 }
459
Jens Axboebd166ef2017-01-17 06:03:22 -0700460 if (e->uses_mq && e->type->ops.mq.request_merge)
461 return e->type->ops.mq.request_merge(q, req, bio);
462 else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700463 return e->type->ops.sq.elevator_merge_fn(q, req, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 return ELEVATOR_NO_MERGE;
466}
467
Jens Axboe5e84ea32011-03-21 10:14:27 +0100468/*
469 * Attempt to do an insertion back merge. Only check for the case where
470 * we can append 'rq' to an existing request, so we can throw 'rq' away
471 * afterwards.
472 *
473 * Returns true if we merged, false otherwise
474 */
Jens Axboebd166ef2017-01-17 06:03:22 -0700475bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
Jens Axboe5e84ea32011-03-21 10:14:27 +0100476{
477 struct request *__rq;
Shaohua Libee03932012-11-09 08:44:27 +0100478 bool ret;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100479
480 if (blk_queue_nomerges(q))
481 return false;
482
483 /*
484 * First try one-hit cache.
485 */
486 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
487 return true;
488
489 if (blk_queue_noxmerges(q))
490 return false;
491
Shaohua Libee03932012-11-09 08:44:27 +0100492 ret = false;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100493 /*
494 * See if our hash lookup can find a potential backmerge.
495 */
Shaohua Libee03932012-11-09 08:44:27 +0100496 while (1) {
497 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
498 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
499 break;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100500
Shaohua Libee03932012-11-09 08:44:27 +0100501 /* The merged request could be merged with others, try again */
502 ret = true;
503 rq = __rq;
504 }
505
506 return ret;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100507}
508
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100509void elv_merged_request(struct request_queue *q, struct request *rq,
510 enum elv_merge type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511{
Jens Axboeb374d182008-10-31 10:05:07 +0100512 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Jens Axboebd166ef2017-01-17 06:03:22 -0700514 if (e->uses_mq && e->type->ops.mq.request_merged)
515 e->type->ops.mq.request_merged(q, rq, type);
516 else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700517 e->type->ops.sq.elevator_merged_fn(q, rq, type);
Tejun Heo06b86242005-10-20 16:46:23 +0200518
Jens Axboe2e662b62006-07-13 11:55:04 +0200519 if (type == ELEVATOR_BACK_MERGE)
520 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200521
Tejun Heo06b86242005-10-20 16:46:23 +0200522 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523}
524
Jens Axboe165125e2007-07-24 09:28:11 +0200525void elv_merge_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 struct request *next)
527{
Jens Axboeb374d182008-10-31 10:05:07 +0100528 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -0700529 bool next_sorted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
Jens Axboebd166ef2017-01-17 06:03:22 -0700531 if (e->uses_mq && e->type->ops.mq.requests_merged)
532 e->type->ops.mq.requests_merged(q, rq, next);
533 else if (e->type->ops.sq.elevator_merge_req_fn) {
Bart Van Asschea1ae0f72017-02-01 12:22:23 -0700534 next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
Jens Axboebd166ef2017-01-17 06:03:22 -0700535 if (next_sorted)
536 e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
537 }
Tejun Heo06b86242005-10-20 16:46:23 +0200538
Jens Axboe98170642006-07-28 09:23:08 +0200539 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200540
Jens Axboe5e84ea32011-03-21 10:14:27 +0100541 if (next_sorted) {
542 elv_rqhash_del(q, next);
543 q->nr_sorted--;
544 }
545
Tejun Heo06b86242005-10-20 16:46:23 +0200546 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
Divyesh Shah812d4022010-04-08 21:14:23 -0700549void elv_bio_merged(struct request_queue *q, struct request *rq,
550 struct bio *bio)
551{
552 struct elevator_queue *e = q->elevator;
553
Jens Axboebd166ef2017-01-17 06:03:22 -0700554 if (WARN_ON_ONCE(e->uses_mq))
555 return;
556
Jens Axboec51ca6c2016-12-10 15:13:59 -0700557 if (e->type->ops.sq.elevator_bio_merged_fn)
558 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
Divyesh Shah812d4022010-04-08 21:14:23 -0700559}
560
Jens Axboe165125e2007-07-24 09:28:11 +0200561void elv_requeue_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 /*
564 * it already went through dequeue, we need to decrement the
565 * in_flight count again
566 */
Tejun Heo8922e162005-10-20 16:23:44 +0200567 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200568 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwige8064022016-10-20 15:12:13 +0200569 if (rq->rq_flags & RQF_SORTED)
Jens Axboecad97512007-01-14 22:26:09 +1100570 elv_deactivate_rq(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
Christoph Hellwige8064022016-10-20 15:12:13 +0200573 rq->rq_flags &= ~RQF_STARTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
Lin Mingc8158812013-03-23 11:42:27 +0800575 blk_pm_requeue_request(rq);
576
Jens Axboeb710a482011-03-30 09:52:30 +0200577 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578}
579
Jerome Marchand26308ea2009-03-27 10:31:51 +0100580void elv_drain_elevator(struct request_queue *q)
Tejun Heo15853af2005-11-10 08:52:05 +0100581{
Jens Axboebd166ef2017-01-17 06:03:22 -0700582 struct elevator_queue *e = q->elevator;
Tejun Heo15853af2005-11-10 08:52:05 +0100583 static int printed;
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200584
Jens Axboebd166ef2017-01-17 06:03:22 -0700585 if (WARN_ON_ONCE(e->uses_mq))
586 return;
587
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200588 lockdep_assert_held(q->queue_lock);
589
Jens Axboebd166ef2017-01-17 06:03:22 -0700590 while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
Tejun Heo15853af2005-11-10 08:52:05 +0100591 ;
Damien Le Moal854f31c2018-09-27 10:55:13 +0900592 if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
Tejun Heo15853af2005-11-10 08:52:05 +0100593 printk(KERN_ERR "%s: forced dispatching is broken "
594 "(nr_sorted=%u), please report this\n",
Tejun Heo22f746e2011-12-14 00:33:41 +0100595 q->elevator->type->elevator_name, q->nr_sorted);
Tejun Heo15853af2005-11-10 08:52:05 +0100596 }
597}
598
Jens Axboeb710a482011-03-30 09:52:30 +0200599void __elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600{
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100601 trace_block_rq_insert(q, rq);
Jens Axboe2056a782006-03-23 20:00:26 +0100602
Lin Mingc8158812013-03-23 11:42:27 +0800603 blk_pm_add_request(q, rq);
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 rq->q = q;
606
Christoph Hellwige8064022016-10-20 15:12:13 +0200607 if (rq->rq_flags & RQF_SOFTBARRIER) {
Jens Axboeb710a482011-03-30 09:52:30 +0200608 /* barriers are scheduling boundary, update end_sector */
Christoph Hellwig57292b52017-01-31 16:57:29 +0100609 if (!blk_rq_is_passthrough(rq)) {
Jens Axboeb710a482011-03-30 09:52:30 +0200610 q->end_sector = rq_end_sector(rq);
611 q->boundary_rq = rq;
612 }
Christoph Hellwige8064022016-10-20 15:12:13 +0200613 } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
Jens Axboe3aa72872011-04-21 19:28:35 +0200614 (where == ELEVATOR_INSERT_SORT ||
615 where == ELEVATOR_INSERT_SORT_MERGE))
Jens Axboeb710a482011-03-30 09:52:30 +0200616 where = ELEVATOR_INSERT_BACK;
617
Tejun Heo8922e162005-10-20 16:23:44 +0200618 switch (where) {
Tejun Heo28e7d182010-09-03 11:56:16 +0200619 case ELEVATOR_INSERT_REQUEUE:
Tejun Heo8922e162005-10-20 16:23:44 +0200620 case ELEVATOR_INSERT_FRONT:
Christoph Hellwige8064022016-10-20 15:12:13 +0200621 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heo8922e162005-10-20 16:23:44 +0200622 list_add(&rq->queuelist, &q->queue_head);
623 break;
624
625 case ELEVATOR_INSERT_BACK:
Christoph Hellwige8064022016-10-20 15:12:13 +0200626 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heo15853af2005-11-10 08:52:05 +0100627 elv_drain_elevator(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200628 list_add_tail(&rq->queuelist, &q->queue_head);
629 /*
630 * We kick the queue here for the following reasons.
631 * - The elevator might have returned NULL previously
632 * to delay requests and returned them now. As the
633 * queue wasn't empty before this request, ll_rw_blk
634 * won't run the queue on return, resulting in hang.
635 * - Usually, back inserted requests won't be merged
636 * with anything. There's no point in delaying queue
637 * processing.
638 */
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200639 __blk_run_queue(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200640 break;
641
Jens Axboe5e84ea32011-03-21 10:14:27 +0100642 case ELEVATOR_INSERT_SORT_MERGE:
643 /*
644 * If we succeed in merging this request with one in the
645 * queue already, we are done - rq has now been freed,
646 * so no need to do anything further.
647 */
648 if (elv_attempt_insert_merge(q, rq))
649 break;
Bart Van Asschee29387e2017-06-21 09:40:11 -0700650 /* fall through */
Tejun Heo8922e162005-10-20 16:23:44 +0200651 case ELEVATOR_INSERT_SORT:
Christoph Hellwig57292b52017-01-31 16:57:29 +0100652 BUG_ON(blk_rq_is_passthrough(rq));
Christoph Hellwige8064022016-10-20 15:12:13 +0200653 rq->rq_flags |= RQF_SORTED;
Tejun Heo15853af2005-11-10 08:52:05 +0100654 q->nr_sorted++;
Jens Axboe98170642006-07-28 09:23:08 +0200655 if (rq_mergeable(rq)) {
656 elv_rqhash_add(q, rq);
657 if (!q->last_merge)
658 q->last_merge = rq;
659 }
660
Tejun Heoca235092005-11-01 17:23:49 +0900661 /*
662 * Some ioscheds (cfq) run q->request_fn directly, so
663 * rq cannot be accessed after calling
664 * elevator_add_req_fn.
665 */
Jens Axboec51ca6c2016-12-10 15:13:59 -0700666 q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200667 break;
668
Tejun Heoae1b1532011-01-25 12:43:54 +0100669 case ELEVATOR_INSERT_FLUSH:
Christoph Hellwige8064022016-10-20 15:12:13 +0200670 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heoae1b1532011-01-25 12:43:54 +0100671 blk_insert_flush(rq);
672 break;
Tejun Heo8922e162005-10-20 16:23:44 +0200673 default:
674 printk(KERN_ERR "%s: bad insertion point %d\n",
Harvey Harrison24c03d42008-05-01 04:35:17 -0700675 __func__, where);
Tejun Heo8922e162005-10-20 16:23:44 +0200676 BUG();
677 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
Jens Axboe2e662b62006-07-13 11:55:04 +0200679EXPORT_SYMBOL(__elv_add_request);
680
Jens Axboe7eaceac2011-03-10 08:52:07 +0100681void elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 unsigned long flags;
684
685 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100686 __elv_add_request(q, rq, where);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 spin_unlock_irqrestore(q->queue_lock, flags);
688}
Jens Axboe2e662b62006-07-13 11:55:04 +0200689EXPORT_SYMBOL(elv_add_request);
690
Jens Axboe165125e2007-07-24 09:28:11 +0200691struct request *elv_latter_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{
Jens Axboeb374d182008-10-31 10:05:07 +0100693 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
Jens Axboebd166ef2017-01-17 06:03:22 -0700695 if (e->uses_mq && e->type->ops.mq.next_request)
696 return e->type->ops.mq.next_request(q, rq);
697 else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700698 return e->type->ops.sq.elevator_latter_req_fn(q, rq);
Jens Axboebd166ef2017-01-17 06:03:22 -0700699
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 return NULL;
701}
702
Jens Axboe165125e2007-07-24 09:28:11 +0200703struct request *elv_former_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704{
Jens Axboeb374d182008-10-31 10:05:07 +0100705 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
Jens Axboebd166ef2017-01-17 06:03:22 -0700707 if (e->uses_mq && e->type->ops.mq.former_request)
708 return e->type->ops.mq.former_request(q, rq);
709 if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700710 return e->type->ops.sq.elevator_former_req_fn(q, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 return NULL;
712}
713
Tejun Heo852c7882012-03-05 13:15:27 -0800714int elv_set_request(struct request_queue *q, struct request *rq,
715 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
Jens Axboeb374d182008-10-31 10:05:07 +0100717 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
Jens Axboebd166ef2017-01-17 06:03:22 -0700719 if (WARN_ON_ONCE(e->uses_mq))
720 return 0;
721
Jens Axboec51ca6c2016-12-10 15:13:59 -0700722 if (e->type->ops.sq.elevator_set_req_fn)
723 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 return 0;
725}
726
Jens Axboe165125e2007-07-24 09:28:11 +0200727void elv_put_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728{
Jens Axboeb374d182008-10-31 10:05:07 +0100729 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Jens Axboebd166ef2017-01-17 06:03:22 -0700731 if (WARN_ON_ONCE(e->uses_mq))
732 return;
733
Jens Axboec51ca6c2016-12-10 15:13:59 -0700734 if (e->type->ops.sq.elevator_put_req_fn)
735 e->type->ops.sq.elevator_put_req_fn(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600738int elv_may_queue(struct request_queue *q, unsigned int op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
Jens Axboeb374d182008-10-31 10:05:07 +0100740 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
Jens Axboebd166ef2017-01-17 06:03:22 -0700742 if (WARN_ON_ONCE(e->uses_mq))
743 return 0;
744
Jens Axboec51ca6c2016-12-10 15:13:59 -0700745 if (e->type->ops.sq.elevator_may_queue_fn)
746 return e->type->ops.sq.elevator_may_queue_fn(q, op);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
748 return ELV_MQUEUE_MAY;
749}
750
Jens Axboe165125e2007-07-24 09:28:11 +0200751void elv_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752{
Jens Axboeb374d182008-10-31 10:05:07 +0100753 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
Jens Axboebd166ef2017-01-17 06:03:22 -0700755 if (WARN_ON_ONCE(e->uses_mq))
756 return;
757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 /*
759 * request is released from the driver, io must be done
760 */
Tejun Heo8922e162005-10-20 16:23:44 +0200761 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200762 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwige8064022016-10-20 15:12:13 +0200763 if ((rq->rq_flags & RQF_SORTED) &&
Jens Axboec51ca6c2016-12-10 15:13:59 -0700764 e->type->ops.sq.elevator_completed_req_fn)
765 e->type->ops.sq.elevator_completed_req_fn(q, rq);
Tejun Heo1bc691d2006-01-12 15:39:26 +0100766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767}
768
Al Viro3d1ab402006-03-18 18:35:43 -0500769#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
770
771static ssize_t
772elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
773{
Al Viro3d1ab402006-03-18 18:35:43 -0500774 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100775 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500776 ssize_t error;
777
778 if (!entry->show)
779 return -EIO;
780
Jens Axboeb374d182008-10-31 10:05:07 +0100781 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500782 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100783 error = e->type ? entry->show(e, page) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500784 mutex_unlock(&e->sysfs_lock);
785 return error;
786}
787
788static ssize_t
789elv_attr_store(struct kobject *kobj, struct attribute *attr,
790 const char *page, size_t length)
791{
Al Viro3d1ab402006-03-18 18:35:43 -0500792 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100793 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500794 ssize_t error;
795
796 if (!entry->store)
797 return -EIO;
798
Jens Axboeb374d182008-10-31 10:05:07 +0100799 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500800 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100801 error = e->type ? entry->store(e, page, length) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500802 mutex_unlock(&e->sysfs_lock);
803 return error;
804}
805
Emese Revfy52cf25d2010-01-19 02:58:23 +0100806static const struct sysfs_ops elv_sysfs_ops = {
Al Viro3d1ab402006-03-18 18:35:43 -0500807 .show = elv_attr_show,
808 .store = elv_attr_store,
809};
810
811static struct kobj_type elv_ktype = {
812 .sysfs_ops = &elv_sysfs_ops,
813 .release = elevator_release,
814};
815
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800816int elv_register_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817{
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800818 struct elevator_queue *e = q->elevator;
Al Viro3d1ab402006-03-18 18:35:43 -0500819 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Bart Van Assche14a23492018-01-17 11:48:09 -0800821 lockdep_assert_held(&q->sysfs_lock);
822
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -0700823 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
Al Viro3d1ab402006-03-18 18:35:43 -0500824 if (!error) {
Tejun Heo22f746e2011-12-14 00:33:41 +0100825 struct elv_fs_entry *attr = e->type->elevator_attrs;
Al Viro3d1ab402006-03-18 18:35:43 -0500826 if (attr) {
Al Viroe572ec72006-03-18 22:27:18 -0500827 while (attr->attr.name) {
828 if (sysfs_create_file(&e->kobj, &attr->attr))
Al Viro3d1ab402006-03-18 18:35:43 -0500829 break;
Al Viroe572ec72006-03-18 22:27:18 -0500830 attr++;
Al Viro3d1ab402006-03-18 18:35:43 -0500831 }
832 }
833 kobject_uevent(&e->kobj, KOBJ_ADD);
Jens Axboe430c62f2010-10-07 09:35:16 +0200834 e->registered = 1;
Jens Axboebd166ef2017-01-17 06:03:22 -0700835 if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700836 e->type->ops.sq.elevator_registered_fn(q);
Al Viro3d1ab402006-03-18 18:35:43 -0500837 }
838 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839}
Jens Axboebc1c1162006-06-08 08:49:06 +0200840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841void elv_unregister_queue(struct request_queue *q)
842{
Bart Van Assche14a23492018-01-17 11:48:09 -0800843 lockdep_assert_held(&q->sysfs_lock);
844
Tejun Heof8fc8772011-12-14 00:33:40 +0100845 if (q) {
846 struct elevator_queue *e = q->elevator;
847
848 kobject_uevent(&e->kobj, KOBJ_REMOVE);
849 kobject_del(&e->kobj);
850 e->registered = 0;
Jan Kara8330cdb2017-04-19 11:33:27 +0200851 /* Re-enable throttling in case elevator disabled it */
852 wbt_enable_default(q);
Tejun Heof8fc8772011-12-14 00:33:40 +0100853 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854}
855
Jens Axboee567bf72014-06-22 16:32:48 -0600856int elv_register(struct elevator_type *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857{
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100858 char *def = "";
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200859
Tejun Heo3d3c2372011-12-14 00:33:42 +0100860 /* create icq_cache if requested */
861 if (e->icq_size) {
862 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
863 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
864 return -EINVAL;
865
866 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
867 "%s_io_cq", e->elevator_name);
868 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
869 e->icq_align, 0, NULL);
870 if (!e->icq_cache)
871 return -ENOMEM;
872 }
873
874 /* register, don't allow duplicate names */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200875 spin_lock(&elv_list_lock);
Jens Axboe2527d992017-10-25 12:33:42 -0600876 if (elevator_find(e->elevator_name, e->uses_mq)) {
Tejun Heo3d3c2372011-12-14 00:33:42 +0100877 spin_unlock(&elv_list_lock);
Chengguang Xu62d2a192018-08-28 07:31:11 +0800878 kmem_cache_destroy(e->icq_cache);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100879 return -EBUSY;
880 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 list_add_tail(&e->list, &elv_list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200882 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
Tejun Heo3d3c2372011-12-14 00:33:42 +0100884 /* print pretty message */
Jens Axboe8ac0d9a2017-10-25 12:35:02 -0600885 if (elevator_match(e, chosen_elevator) ||
Nate Diller5f003972006-01-24 10:07:58 +0100886 (!*chosen_elevator &&
Jens Axboe8ac0d9a2017-10-25 12:35:02 -0600887 elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100888 def = " (default)";
889
Jens Axboe4eb166d2008-02-01 00:37:27 +0100890 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
891 def);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100892 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893}
894EXPORT_SYMBOL_GPL(elv_register);
895
896void elv_unregister(struct elevator_type *e)
897{
Tejun Heo3d3c2372011-12-14 00:33:42 +0100898 /* unregister */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200899 spin_lock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 list_del_init(&e->list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200901 spin_unlock(&elv_list_lock);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100902
903 /*
904 * Destroy icq_cache if it exists. icq's are RCU managed. Make
905 * sure all RCU operations are complete before proceeding.
906 */
907 if (e->icq_cache) {
908 rcu_barrier();
909 kmem_cache_destroy(e->icq_cache);
910 e->icq_cache = NULL;
911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912}
913EXPORT_SYMBOL_GPL(elv_unregister);
914
Jianchao Wangd48ece22018-08-21 15:15:03 +0800915int elevator_switch_mq(struct request_queue *q,
Omar Sandoval54d53292017-04-07 08:52:27 -0600916 struct elevator_type *new_e)
917{
918 int ret;
919
Bart Van Assche14a23492018-01-17 11:48:09 -0800920 lockdep_assert_held(&q->sysfs_lock);
921
Omar Sandoval54d53292017-04-07 08:52:27 -0600922 if (q->elevator) {
923 if (q->elevator->registered)
924 elv_unregister_queue(q);
925 ioc_clear_queue(q);
926 elevator_exit(q, q->elevator);
927 }
928
929 ret = blk_mq_init_sched(q, new_e);
930 if (ret)
931 goto out;
932
933 if (new_e) {
934 ret = elv_register_queue(q);
935 if (ret) {
936 elevator_exit(q, q->elevator);
937 goto out;
938 }
939 }
940
941 if (new_e)
942 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
943 else
944 blk_add_trace_msg(q, "elv switch: none");
945
946out:
Omar Sandoval54d53292017-04-07 08:52:27 -0600947 return ret;
Omar Sandoval54d53292017-04-07 08:52:27 -0600948}
949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950/*
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200951 * For blk-mq devices, we default to using mq-deadline, if available, for single
952 * queue devices. If deadline isn't available OR we have multiple queues,
953 * default to "none".
954 */
955int elevator_init_mq(struct request_queue *q)
956{
957 struct elevator_type *e;
958 int err = 0;
959
960 if (q->nr_hw_queues != 1)
961 return 0;
962
963 /*
964 * q->sysfs_lock must be held to provide mutual exclusion between
965 * elevator_switch() and here.
966 */
967 mutex_lock(&q->sysfs_lock);
968 if (unlikely(q->elevator))
969 goto out_unlock;
970
971 e = elevator_get(q, "mq-deadline", false);
972 if (!e)
973 goto out_unlock;
974
975 err = blk_mq_init_sched(q, e);
976 if (err)
977 elevator_put(e);
978out_unlock:
979 mutex_unlock(&q->sysfs_lock);
980 return err;
981}
982
983
984/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 * switch to new_e io scheduler. be careful not to introduce deadlocks -
986 * we don't free the old io scheduler, before we have allocated what we
987 * need for the new one. this way we have a chance of going back to the old
Tejun Heocb98fc82005-10-28 08:29:39 +0200988 * one, if the new one fails init for some reason.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 */
Jens Axboe165125e2007-07-24 09:28:11 +0200990static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991{
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800992 struct elevator_queue *old = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -0700993 bool old_registered = false;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800994 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
Bart Van Assche14a23492018-01-17 11:48:09 -0800996 lockdep_assert_held(&q->sysfs_lock);
997
Jianchao Wangd48ece22018-08-21 15:15:03 +0800998 if (q->mq_ops) {
999 blk_mq_freeze_queue(q);
1000 blk_mq_quiesce_queue(q);
1001
1002 err = elevator_switch_mq(q, new_e);
1003
1004 blk_mq_unquiesce_queue(q);
1005 blk_mq_unfreeze_queue(q);
1006
1007 return err;
1008 }
Jens Axboebd166ef2017-01-17 06:03:22 -07001009
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001010 /*
1011 * Turn on BYPASS and drain all requests w/ elevator private data.
1012 * Block layer doesn't call into a quiesced elevator - all requests
1013 * are directly put on the dispatch list without elevator data
1014 * using INSERT_BACK. All requests have SOFTBARRIER set and no
1015 * merge happens either.
1016 */
Jens Axboebd166ef2017-01-17 06:03:22 -07001017 if (old) {
1018 old_registered = old->registered;
Tejun Heocb98fc82005-10-28 08:29:39 +02001019
Omar Sandoval54d53292017-04-07 08:52:27 -06001020 blk_queue_bypass_start(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001021
1022 /* unregister and clear all auxiliary data of the old elevator */
1023 if (old_registered)
1024 elv_unregister_queue(q);
1025
Jens Axboebd166ef2017-01-17 06:03:22 -07001026 ioc_clear_queue(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001027 }
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001028
1029 /* allocate, init and register new elevator */
Omar Sandoval54d53292017-04-07 08:52:27 -06001030 err = new_e->ops.sq.elevator_init_fn(q, new_e);
Omar Sandoval6917ff02017-04-05 12:01:30 -07001031 if (err)
1032 goto fail_init;
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001033
Omar Sandoval54d53292017-04-07 08:52:27 -06001034 err = elv_register_queue(q);
1035 if (err)
1036 goto fail_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001038 /* done, kill the old one and finish */
Jens Axboebd166ef2017-01-17 06:03:22 -07001039 if (old) {
Omar Sandoval54d53292017-04-07 08:52:27 -06001040 elevator_exit(q, old);
1041 blk_queue_bypass_end(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001042 }
Nick Piggin75ad23b2008-04-29 14:48:33 +02001043
Omar Sandoval54d53292017-04-07 08:52:27 -06001044 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
Alan D. Brunelle4722dc52008-05-27 14:55:00 +02001045
Jens Axboe5dd531a2010-08-23 13:52:19 +02001046 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
1048fail_register:
Omar Sandoval54d53292017-04-07 08:52:27 -06001049 elevator_exit(q, q->elevator);
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001050fail_init:
1051 /* switch failed, restore and re-register old elevator */
Jens Axboebd166ef2017-01-17 06:03:22 -07001052 if (old) {
1053 q->elevator = old;
1054 elv_register_queue(q);
Omar Sandoval54d53292017-04-07 08:52:27 -06001055 blk_queue_bypass_end(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001056 }
Nick Piggin75ad23b2008-04-29 14:48:33 +02001057
Jens Axboe5dd531a2010-08-23 13:52:19 +02001058 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059}
1060
Jens Axboe5dd531a2010-08-23 13:52:19 +02001061/*
1062 * Switch this queue to the given IO scheduler.
1063 */
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001064static int __elevator_change(struct request_queue *q, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065{
1066 char elevator_name[ELV_NAME_MAX];
1067 struct elevator_type *e;
1068
David Jefferye9a823f2017-08-28 10:52:44 -06001069 /* Make sure queue is not in the middle of being removed */
1070 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
1071 return -ENOENT;
1072
Jens Axboebd166ef2017-01-17 06:03:22 -07001073 /*
1074 * Special case for mq, turn off scheduling
1075 */
1076 if (q->mq_ops && !strncmp(name, "none", 4))
1077 return elevator_switch(q, NULL);
Martin K. Petersencd43e262009-05-22 17:17:52 -04001078
Li Zefanee2e9922008-10-14 08:49:56 +02001079 strlcpy(elevator_name, name, sizeof(elevator_name));
Jens Axboe2527d992017-10-25 12:33:42 -06001080 e = elevator_get(q, strstrip(elevator_name), true);
Jens Axboe340ff322017-05-10 07:40:04 -06001081 if (!e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
Jens Axboe8ac0d9a2017-10-25 12:35:02 -06001084 if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
Nate Diller2ca7d932005-10-30 15:02:24 -08001085 elevator_put(e);
Jens Axboe5dd531a2010-08-23 13:52:19 +02001086 return 0;
Nate Diller2ca7d932005-10-30 15:02:24 -08001087 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
Jens Axboe5dd531a2010-08-23 13:52:19 +02001089 return elevator_switch(q, e);
1090}
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001091
Ming Lei3a5088c2017-04-15 20:38:22 +08001092static inline bool elv_support_iosched(struct request_queue *q)
1093{
1094 if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1095 BLK_MQ_F_NO_SCHED))
1096 return false;
1097 return true;
1098}
1099
Jens Axboe5dd531a2010-08-23 13:52:19 +02001100ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1101 size_t count)
1102{
1103 int ret;
1104
Ming Lei3a5088c2017-04-15 20:38:22 +08001105 if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
Jens Axboe5dd531a2010-08-23 13:52:19 +02001106 return count;
1107
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001108 ret = __elevator_change(q, name);
Jens Axboe5dd531a2010-08-23 13:52:19 +02001109 if (!ret)
1110 return count;
1111
Jens Axboe5dd531a2010-08-23 13:52:19 +02001112 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113}
1114
Jens Axboe165125e2007-07-24 09:28:11 +02001115ssize_t elv_iosched_show(struct request_queue *q, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116{
Jens Axboeb374d182008-10-31 10:05:07 +01001117 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -07001118 struct elevator_type *elv = NULL;
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001119 struct elevator_type *__e;
Jens Axboe8ac0d9a2017-10-25 12:35:02 -06001120 bool uses_mq = q->mq_ops != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 int len = 0;
1122
Christoph Hellwig5fdee212017-10-05 21:22:52 +02001123 if (!queue_is_rq_based(q))
Martin K. Petersencd43e262009-05-22 17:17:52 -04001124 return sprintf(name, "none\n");
1125
Jens Axboebd166ef2017-01-17 06:03:22 -07001126 if (!q->elevator)
1127 len += sprintf(name+len, "[none] ");
1128 else
1129 elv = e->type;
Martin K. Petersencd43e262009-05-22 17:17:52 -04001130
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001131 spin_lock(&elv_list_lock);
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001132 list_for_each_entry(__e, &elv_list, list) {
Jens Axboe8ac0d9a2017-10-25 12:35:02 -06001133 if (elv && elevator_match(elv, __e->elevator_name) &&
1134 (__e->uses_mq == uses_mq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 len += sprintf(name+len, "[%s] ", elv->elevator_name);
Jens Axboebd166ef2017-01-17 06:03:22 -07001136 continue;
1137 }
Ming Lei3a5088c2017-04-15 20:38:22 +08001138 if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001139 len += sprintf(name+len, "%s ", __e->elevator_name);
1140 else if (!__e->uses_mq && !q->mq_ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 len += sprintf(name+len, "%s ", __e->elevator_name);
1142 }
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001143 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Jens Axboebd166ef2017-01-17 06:03:22 -07001145 if (q->mq_ops && q->elevator)
1146 len += sprintf(name+len, "none");
1147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 len += sprintf(len+name, "\n");
1149 return len;
1150}
1151
Jens Axboe165125e2007-07-24 09:28:11 +02001152struct request *elv_rb_former_request(struct request_queue *q,
1153 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001154{
1155 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1156
1157 if (rbprev)
1158 return rb_entry_rq(rbprev);
1159
1160 return NULL;
1161}
Jens Axboe2e662b62006-07-13 11:55:04 +02001162EXPORT_SYMBOL(elv_rb_former_request);
1163
Jens Axboe165125e2007-07-24 09:28:11 +02001164struct request *elv_rb_latter_request(struct request_queue *q,
1165 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001166{
1167 struct rb_node *rbnext = rb_next(&rq->rb_node);
1168
1169 if (rbnext)
1170 return rb_entry_rq(rbnext);
1171
1172 return NULL;
1173}
Jens Axboe2e662b62006-07-13 11:55:04 +02001174EXPORT_SYMBOL(elv_rb_latter_request);