blob: 88f6089044326433a137e6915deb6b1f424c2ca5 [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
Jens Axboe6728cb02008-01-31 13:03:55 +01007 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8 * - July2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12/*
13 * This handles all read/write requests to block devices
14 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/backing-dev.h>
18#include <linux/bio.h>
19#include <linux/blkdev.h>
Jens Axboe320ae512013-10-24 09:20:05 +010020#include <linux/blk-mq.h>
Alan Stern52abca62020-12-08 21:29:51 -080021#include <linux/blk-pm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/highmem.h>
23#include <linux/mm.h>
Matthew Wilcox (Oracle)cee9a0c2020-06-01 21:46:07 -070024#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/kernel_stat.h>
26#include <linux/string.h>
27#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/completion.h>
29#include <linux/slab.h>
30#include <linux/swap.h>
31#include <linux/writeback.h>
Andrew Mortonfaccbd4b2006-12-10 02:19:35 -080032#include <linux/task_io_accounting_ops.h>
Akinobu Mitac17bb492006-12-08 02:39:46 -080033#include <linux/fault-inject.h>
Jens Axboe73c10102011-03-08 13:19:51 +010034#include <linux/list_sort.h>
Tejun Heoe3c78ca2011-10-19 14:32:38 +020035#include <linux/delay.h>
Tejun Heoaaf7c682012-04-19 16:29:22 -070036#include <linux/ratelimit.h>
Lin Ming6c954662013-03-23 11:42:26 +080037#include <linux/pm_runtime.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040038#include <linux/blk-cgroup.h>
Max Gurtovoy54d4e6a2019-09-16 18:44:29 +030039#include <linux/t10-pi.h>
Omar Sandoval18fbda92017-01-31 14:53:20 -080040#include <linux/debugfs.h>
Howard McLauchlan30abb3a2018-02-06 14:05:39 -080041#include <linux/bpf.h>
Johannes Weinerb8e24a92019-08-08 15:03:00 -040042#include <linux/psi.h>
Ming Lei71ac8602020-05-14 16:45:09 +080043#include <linux/sched/sysctl.h>
Satya Tangiralaa892c8d2020-05-14 00:37:18 +000044#include <linux/blk-crypto.h>
Li Zefan55782132009-06-09 13:43:05 +080045
46#define CREATE_TRACE_POINTS
47#include <trace/events/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Jens Axboe8324aa92008-01-29 14:51:59 +010049#include "blk.h"
Ming Lei43a5e4e2013-12-26 21:31:35 +080050#include "blk-mq.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070051#include "blk-mq-sched.h"
Bart Van Asschebca6b062018-09-26 14:01:03 -070052#include "blk-pm.h"
Josef Bacikc1c80382018-07-03 11:14:59 -040053#include "blk-rq-qos.h"
Jens Axboe8324aa92008-01-29 14:51:59 +010054
Omar Sandoval18fbda92017-01-31 14:53:20 -080055struct dentry *blk_debugfs_root;
Omar Sandoval18fbda92017-01-31 14:53:20 -080056
Mike Snitzerd07335e2010-11-16 12:52:38 +010057EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
Jun'ichi Nomurab0da3f02009-10-01 21:16:13 +020058EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
Linus Torvalds0a82a8d2013-04-18 09:00:26 -070059EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
Keith Busch3291fa52014-04-28 12:30:52 -060060EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
NeilBrowncbae8d42012-12-14 20:49:27 +010061EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
Ingo Molnar0bfc2452008-11-26 11:59:56 +010062
Tejun Heoa73f7302011-12-14 00:33:37 +010063DEFINE_IDA(blk_queue_ida);
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 * For queue allocation
67 */
Jens Axboe6728cb02008-01-31 13:03:55 +010068struct kmem_cache *blk_requestq_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 * Controlling structure to kblockd
72 */
Jens Axboeff856ba2006-01-09 16:02:34 +010073static struct workqueue_struct *kblockd_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Bart Van Assche8814ce82018-03-07 17:10:04 -080075/**
76 * blk_queue_flag_set - atomically set a queue flag
77 * @flag: flag to be set
78 * @q: request queue
79 */
80void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
81{
Christoph Hellwig57d74df2018-11-14 17:02:07 +010082 set_bit(flag, &q->queue_flags);
Bart Van Assche8814ce82018-03-07 17:10:04 -080083}
84EXPORT_SYMBOL(blk_queue_flag_set);
85
86/**
87 * blk_queue_flag_clear - atomically clear a queue flag
88 * @flag: flag to be cleared
89 * @q: request queue
90 */
91void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
92{
Christoph Hellwig57d74df2018-11-14 17:02:07 +010093 clear_bit(flag, &q->queue_flags);
Bart Van Assche8814ce82018-03-07 17:10:04 -080094}
95EXPORT_SYMBOL(blk_queue_flag_clear);
96
97/**
98 * blk_queue_flag_test_and_set - atomically test and set a queue flag
99 * @flag: flag to be set
100 * @q: request queue
101 *
102 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
103 * the flag was already set.
104 */
105bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
106{
Christoph Hellwig57d74df2018-11-14 17:02:07 +0100107 return test_and_set_bit(flag, &q->queue_flags);
Bart Van Assche8814ce82018-03-07 17:10:04 -0800108}
109EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
110
FUJITA Tomonori2a4aa302008-04-29 09:54:36 +0200111void blk_rq_init(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112{
FUJITA Tomonori1afb20f2008-04-25 12:26:28 +0200113 memset(rq, 0, sizeof(*rq));
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 INIT_LIST_HEAD(&rq->queuelist);
Jens Axboe63a71382008-02-08 12:41:03 +0100116 rq->q = q;
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900117 rq->__sector = (sector_t) -1;
Jens Axboe2e662b62006-07-13 11:55:04 +0200118 INIT_HLIST_NODE(&rq->hash);
119 RB_CLEAR_NODE(&rq->rb_node);
Xianting Tiane44a6a22020-08-27 14:34:17 +0800120 rq->tag = BLK_MQ_NO_TAG;
121 rq->internal_tag = BLK_MQ_NO_TAG;
Omar Sandoval522a7772018-05-09 02:08:53 -0700122 rq->start_time_ns = ktime_get_ns();
Jerome Marchand09e099d2011-01-05 16:57:38 +0100123 rq->part = NULL;
Josef Bacikb554db12019-03-07 21:37:18 +0000124 refcount_set(&rq->ref, 1);
Satya Tangiralaa892c8d2020-05-14 00:37:18 +0000125 blk_crypto_rq_set_defaults(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126}
FUJITA Tomonori2a4aa302008-04-29 09:54:36 +0200127EXPORT_SYMBOL(blk_rq_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Chaitanya Kulkarnie47bc4e2019-06-20 10:59:16 -0700129#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
130static const char *const blk_op_name[] = {
131 REQ_OP_NAME(READ),
132 REQ_OP_NAME(WRITE),
133 REQ_OP_NAME(FLUSH),
134 REQ_OP_NAME(DISCARD),
135 REQ_OP_NAME(SECURE_ERASE),
136 REQ_OP_NAME(ZONE_RESET),
Chaitanya Kulkarni6e33dbf2019-08-01 10:26:36 -0700137 REQ_OP_NAME(ZONE_RESET_ALL),
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900138 REQ_OP_NAME(ZONE_OPEN),
139 REQ_OP_NAME(ZONE_CLOSE),
140 REQ_OP_NAME(ZONE_FINISH),
Keith Busch0512a752020-05-12 17:55:47 +0900141 REQ_OP_NAME(ZONE_APPEND),
Chaitanya Kulkarnie47bc4e2019-06-20 10:59:16 -0700142 REQ_OP_NAME(WRITE_SAME),
143 REQ_OP_NAME(WRITE_ZEROES),
144 REQ_OP_NAME(SCSI_IN),
145 REQ_OP_NAME(SCSI_OUT),
146 REQ_OP_NAME(DRV_IN),
147 REQ_OP_NAME(DRV_OUT),
148};
149#undef REQ_OP_NAME
150
151/**
152 * blk_op_str - Return string XXX in the REQ_OP_XXX.
153 * @op: REQ_OP_XXX.
154 *
155 * Description: Centralize block layer function to convert REQ_OP_XXX into
156 * string format. Useful in the debugging and tracing bio or request. For
157 * invalid REQ_OP_XXX it returns string "UNKNOWN".
158 */
159inline const char *blk_op_str(unsigned int op)
160{
161 const char *op_str = "UNKNOWN";
162
163 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
164 op_str = blk_op_name[op];
165
166 return op_str;
167}
168EXPORT_SYMBOL_GPL(blk_op_str);
169
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200170static const struct {
171 int errno;
172 const char *name;
173} blk_errors[] = {
174 [BLK_STS_OK] = { 0, "" },
175 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
176 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
177 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
178 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
179 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
180 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
181 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
182 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
183 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
Ming Lei86ff7c22018-01-30 22:04:57 -0500184 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500185 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200186
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200187 /* device mapper special case, should not leak out: */
188 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
189
Keith Busch3b481d92020-09-24 13:53:28 -0700190 /* zone device specific errors */
191 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
192 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
193
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200194 /* everything else not covered above: */
195 [BLK_STS_IOERR] = { -EIO, "I/O" },
196};
197
198blk_status_t errno_to_blk_status(int errno)
199{
200 int i;
201
202 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
203 if (blk_errors[i].errno == errno)
204 return (__force blk_status_t)i;
205 }
206
207 return BLK_STS_IOERR;
208}
209EXPORT_SYMBOL_GPL(errno_to_blk_status);
210
211int blk_status_to_errno(blk_status_t status)
212{
213 int idx = (__force int)status;
214
Bart Van Assche34bd9c12017-06-21 10:55:46 -0700215 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200216 return -EIO;
217 return blk_errors[idx].errno;
218}
219EXPORT_SYMBOL_GPL(blk_status_to_errno);
220
Christoph Hellwig178cc592019-06-20 10:59:15 -0700221static void print_req_error(struct request *req, blk_status_t status,
222 const char *caller)
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200223{
224 int idx = (__force int)status;
225
Bart Van Assche34bd9c12017-06-21 10:55:46 -0700226 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200227 return;
228
Christoph Hellwig178cc592019-06-20 10:59:15 -0700229 printk_ratelimited(KERN_ERR
Chaitanya Kulkarnib0e51682019-06-20 10:59:18 -0700230 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
231 "phys_seg %u prio class %u\n",
Christoph Hellwig178cc592019-06-20 10:59:15 -0700232 caller, blk_errors[idx].name,
Chaitanya Kulkarnib0e51682019-06-20 10:59:18 -0700233 req->rq_disk ? req->rq_disk->disk_name : "?",
234 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
235 req->cmd_flags & ~REQ_OP_MASK,
236 req->nr_phys_segments,
237 IOPRIO_PRIO_CLASS(req->ioprio));
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200238}
239
NeilBrown5bb23a62007-09-27 12:46:13 +0200240static void req_bio_endio(struct request *rq, struct bio *bio,
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200241 unsigned int nbytes, blk_status_t error)
Tejun Heo797e7db2006-01-06 09:51:03 +0100242{
Mike Snitzer78d8e582015-06-26 10:01:13 -0400243 if (error)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200244 bio->bi_status = error;
Tejun Heo797e7db2006-01-06 09:51:03 +0100245
Christoph Hellwige8064022016-10-20 15:12:13 +0200246 if (unlikely(rq->rq_flags & RQF_QUIET))
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600247 bio_set_flag(bio, BIO_QUIET);
Tejun Heo143a87f2011-01-25 12:43:52 +0100248
Kent Overstreetf79ea412012-09-20 16:38:30 -0700249 bio_advance(bio, nbytes);
Tejun Heo143a87f2011-01-25 12:43:52 +0100250
Keith Busch0512a752020-05-12 17:55:47 +0900251 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
252 /*
253 * Partial zone append completions cannot be supported as the
254 * BIO fragments may end up not being written sequentially.
255 */
256 if (bio->bi_iter.bi_size)
257 bio->bi_status = BLK_STS_IOERR;
258 else
259 bio->bi_iter.bi_sector = rq->__sector;
260 }
261
Tejun Heo143a87f2011-01-25 12:43:52 +0100262 /* don't actually finish bio if it's part of flush sequence */
Christoph Hellwige8064022016-10-20 15:12:13 +0200263 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200264 bio_endio(bio);
Tejun Heo797e7db2006-01-06 09:51:03 +0100265}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267void blk_dump_rq_flags(struct request *rq, char *msg)
268{
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100269 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
270 rq->rq_disk ? rq->rq_disk->disk_name : "?",
Jens Axboe59533162013-05-23 12:25:08 +0200271 (unsigned long long) rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Tejun Heo83096eb2009-05-07 22:24:39 +0900273 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
274 (unsigned long long)blk_rq_pos(rq),
275 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
Jens Axboeb4f42e22014-04-10 09:46:28 -0600276 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
277 rq->bio, rq->biotail, blk_rq_bytes(rq));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279EXPORT_SYMBOL(blk_dump_rq_flags);
280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281/**
282 * blk_sync_queue - cancel any pending callbacks on a queue
283 * @q: the queue
284 *
285 * Description:
286 * The block layer may perform asynchronous callback activity
287 * on a queue, such as calling the unplug function after a timeout.
288 * A block device may call blk_sync_queue to ensure that any
289 * such activity is cancelled, thus allowing it to release resources
Michael Opdenacker59c51592007-05-09 08:57:56 +0200290 * that the callbacks might use. The caller must already have made sure
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200291 * that its ->submit_bio will not re-add plugging prior to calling
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 * this function.
293 *
Vivek Goyalda527772011-03-02 19:05:33 -0500294 * This function does not cancel any asynchronous activity arising
Masanari Iidada3dae52014-09-09 01:27:23 +0900295 * out of elevator or throttling code. That would require elevator_exit()
Tejun Heo5efd6112012-03-05 13:15:12 -0800296 * and blkcg_exit_queue() to be called with queue lock initialized.
Vivek Goyalda527772011-03-02 19:05:33 -0500297 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 */
299void blk_sync_queue(struct request_queue *q)
300{
Jens Axboe70ed28b2008-11-19 14:38:39 +0100301 del_timer_sync(&q->timeout);
Bart Van Assche4e9b6f22017-10-19 10:00:48 -0700302 cancel_work_sync(&q->timeout_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304EXPORT_SYMBOL(blk_sync_queue);
305
306/**
Bart Van Asschecd84a622018-09-26 14:01:04 -0700307 * blk_set_pm_only - increment pm_only counter
Bart Van Asschec9254f22017-11-09 10:49:57 -0800308 * @q: request queue pointer
Bart Van Asschec9254f22017-11-09 10:49:57 -0800309 */
Bart Van Asschecd84a622018-09-26 14:01:04 -0700310void blk_set_pm_only(struct request_queue *q)
Bart Van Asschec9254f22017-11-09 10:49:57 -0800311{
Bart Van Asschecd84a622018-09-26 14:01:04 -0700312 atomic_inc(&q->pm_only);
Bart Van Asschec9254f22017-11-09 10:49:57 -0800313}
Bart Van Asschecd84a622018-09-26 14:01:04 -0700314EXPORT_SYMBOL_GPL(blk_set_pm_only);
Bart Van Asschec9254f22017-11-09 10:49:57 -0800315
Bart Van Asschecd84a622018-09-26 14:01:04 -0700316void blk_clear_pm_only(struct request_queue *q)
Bart Van Asschec9254f22017-11-09 10:49:57 -0800317{
Bart Van Asschecd84a622018-09-26 14:01:04 -0700318 int pm_only;
319
320 pm_only = atomic_dec_return(&q->pm_only);
321 WARN_ON_ONCE(pm_only < 0);
322 if (pm_only == 0)
323 wake_up_all(&q->mq_freeze_wq);
Bart Van Asschec9254f22017-11-09 10:49:57 -0800324}
Bart Van Asschecd84a622018-09-26 14:01:04 -0700325EXPORT_SYMBOL_GPL(blk_clear_pm_only);
Bart Van Asschec9254f22017-11-09 10:49:57 -0800326
Luis Chamberlainb5bd3572020-06-19 20:47:23 +0000327/**
328 * blk_put_queue - decrement the request_queue refcount
329 * @q: the request_queue structure to decrement the refcount for
330 *
331 * Decrements the refcount of the request_queue kobject. When this reaches 0
332 * we'll have blk_release_queue() called.
Luis Chamberlaine8c7d142020-06-19 20:47:25 +0000333 *
334 * Context: Any context, but the last reference must not be dropped from
335 * atomic context.
Luis Chamberlainb5bd3572020-06-19 20:47:23 +0000336 */
Jens Axboe165125e2007-07-24 09:28:11 +0200337void blk_put_queue(struct request_queue *q)
Al Viro483f4af2006-03-18 18:34:37 -0500338{
339 kobject_put(&q->kobj);
340}
Jens Axboed86e0e82011-05-27 07:44:43 +0200341EXPORT_SYMBOL(blk_put_queue);
Al Viro483f4af2006-03-18 18:34:37 -0500342
Jens Axboeaed3ea92014-12-22 14:04:42 -0700343void blk_set_queue_dying(struct request_queue *q)
344{
Bart Van Assche8814ce82018-03-07 17:10:04 -0800345 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
Jens Axboeaed3ea92014-12-22 14:04:42 -0700346
Ming Leid3cfb2a2017-03-27 20:06:58 +0800347 /*
348 * When queue DYING flag is set, we need to block new req
349 * entering queue, so we call blk_freeze_queue_start() to
350 * prevent I/O from crossing blk_queue_enter().
351 */
352 blk_freeze_queue_start(q);
353
Jens Axboe344e9ff2018-11-15 12:22:51 -0700354 if (queue_is_mq(q))
Jens Axboeaed3ea92014-12-22 14:04:42 -0700355 blk_mq_wake_waiters(q);
Ming Lei055f6e12017-11-09 10:49:53 -0800356
357 /* Make blk_queue_enter() reexamine the DYING flag. */
358 wake_up_all(&q->mq_freeze_wq);
Jens Axboeaed3ea92014-12-22 14:04:42 -0700359}
360EXPORT_SYMBOL_GPL(blk_set_queue_dying);
361
Tejun Heod7325802012-03-05 13:14:58 -0800362/**
Tejun Heoc9a929d2011-10-19 14:42:16 +0200363 * blk_cleanup_queue - shutdown a request queue
364 * @q: request queue to shutdown
365 *
Bart Van Asschec246e802012-12-06 14:32:01 +0100366 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
367 * put it. All future requests will be failed immediately with -ENODEV.
Luis Chamberlaine8c7d142020-06-19 20:47:25 +0000368 *
369 * Context: can sleep
Vivek Goyalc94a96a2011-03-02 19:04:42 -0500370 */
Jens Axboe6728cb02008-01-31 13:03:55 +0100371void blk_cleanup_queue(struct request_queue *q)
Al Viro483f4af2006-03-18 18:34:37 -0500372{
Luis Chamberlaine8c7d142020-06-19 20:47:25 +0000373 /* cannot be called from atomic context */
374 might_sleep();
375
Bart Van Asschebae85c12019-09-30 16:00:43 -0700376 WARN_ON_ONCE(blk_queue_registered(q));
377
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100378 /* mark @q DYING, no new request or merges will be allowed afterwards */
Jens Axboeaed3ea92014-12-22 14:04:42 -0700379 blk_set_queue_dying(q);
Tejun Heo6ecf23a2012-03-05 13:14:59 -0800380
Christoph Hellwig57d74df2018-11-14 17:02:07 +0100381 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
382 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
Tejun Heoc9a929d2011-10-19 14:42:16 +0200383
Bart Van Asschec246e802012-12-06 14:32:01 +0100384 /*
385 * Drain all requests queued before DYING marking. Set DEAD flag to
Bart Van Assche67ed8b72019-08-01 15:39:55 -0700386 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
387 * after draining finished.
Bart Van Asschec246e802012-12-06 14:32:01 +0100388 */
Dan Williams3ef28e82015-10-21 13:20:12 -0400389 blk_freeze_queue(q);
Ming Leic57cdf72018-10-24 21:18:09 +0800390
391 rq_qos_exit(q);
392
Christoph Hellwig57d74df2018-11-14 17:02:07 +0100393 blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
Tejun Heoc9a929d2011-10-19 14:42:16 +0200394
Dan Williams5a48fc12015-10-21 13:20:23 -0400395 /* for synchronous bio-based driver finish in-flight integrity i/o */
396 blk_flush_integrity();
397
Tejun Heoc9a929d2011-10-19 14:42:16 +0200398 /* @q won't process any more request, flush async actions */
Jan Karadc3b17c2017-02-02 15:56:50 +0100399 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
Tejun Heoc9a929d2011-10-19 14:42:16 +0200400 blk_sync_queue(q);
401
Jens Axboe344e9ff2018-11-15 12:22:51 -0700402 if (queue_is_mq(q))
Ming Leic7e2d942019-04-30 09:52:25 +0800403 blk_mq_exit_queue(q);
Jens Axboea1ce35f2018-10-29 10:23:51 -0600404
Ming Leic3e22192019-06-04 21:08:02 +0800405 /*
406 * In theory, request pool of sched_tags belongs to request queue.
407 * However, the current implementation requires tag_set for freeing
408 * requests, so free the pool now.
409 *
410 * Queue has become frozen, there can't be any in-queue requests, so
411 * it is safe to free requests now.
412 */
413 mutex_lock(&q->sysfs_lock);
414 if (q->elevator)
415 blk_mq_sched_free_requests(q);
416 mutex_unlock(&q->sysfs_lock);
417
Dan Williams3ef28e82015-10-21 13:20:12 -0400418 percpu_ref_exit(&q->q_usage_counter);
Bart Van Assche45a9c9d2014-12-09 16:57:48 +0100419
Tejun Heoc9a929d2011-10-19 14:42:16 +0200420 /* @q is and will stay empty, shutdown and put */
Al Viro483f4af2006-03-18 18:34:37 -0500421 blk_put_queue(q);
422}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423EXPORT_SYMBOL(blk_cleanup_queue);
424
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800425/**
426 * blk_queue_enter() - try to increase q->q_usage_counter
427 * @q: request queue pointer
Bart Van Asschea4d34da2020-12-08 21:29:50 -0800428 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800429 */
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800430int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
Dan Williams3ef28e82015-10-21 13:20:12 -0400431{
Bart Van Asschea4d34da2020-12-08 21:29:50 -0800432 const bool pm = flags & BLK_MQ_REQ_PM;
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800433
Dan Williams3ef28e82015-10-21 13:20:12 -0400434 while (true) {
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800435 bool success = false;
Dan Williams3ef28e82015-10-21 13:20:12 -0400436
Bart Van Assche818e0fa2018-03-19 11:46:13 -0700437 rcu_read_lock();
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800438 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
439 /*
Bart Van Asschecd84a622018-09-26 14:01:04 -0700440 * The code that increments the pm_only counter is
441 * responsible for ensuring that that counter is
442 * globally visible before the queue is unfrozen.
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800443 */
Alan Stern52abca62020-12-08 21:29:51 -0800444 if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
445 !blk_queue_pm_only(q)) {
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800446 success = true;
447 } else {
448 percpu_ref_put(&q->q_usage_counter);
449 }
450 }
Bart Van Assche818e0fa2018-03-19 11:46:13 -0700451 rcu_read_unlock();
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800452
453 if (success)
Dan Williams3ef28e82015-10-21 13:20:12 -0400454 return 0;
455
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800456 if (flags & BLK_MQ_REQ_NOWAIT)
Dan Williams3ef28e82015-10-21 13:20:12 -0400457 return -EBUSY;
458
Ming Lei5ed61d32017-03-27 20:06:56 +0800459 /*
Ming Lei1671d522017-03-27 20:06:57 +0800460 * read pair of barrier in blk_freeze_queue_start(),
Ming Lei5ed61d32017-03-27 20:06:56 +0800461 * we need to order reading __PERCPU_REF_DEAD flag of
Ming Leid3cfb2a2017-03-27 20:06:58 +0800462 * .q_usage_counter and reading .mq_freeze_depth or
463 * queue dying flag, otherwise the following wait may
464 * never return if the two reads are reordered.
Ming Lei5ed61d32017-03-27 20:06:56 +0800465 */
466 smp_rmb();
467
Alan Jenkins1dc30392018-04-12 19:11:58 +0100468 wait_event(q->mq_freeze_wq,
Bob Liu7996a8b2019-05-21 11:25:55 +0800469 (!q->mq_freeze_depth &&
Alan Stern52abca62020-12-08 21:29:51 -0800470 blk_pm_resume_queue(pm, q)) ||
Alan Jenkins1dc30392018-04-12 19:11:58 +0100471 blk_queue_dying(q));
Dan Williams3ef28e82015-10-21 13:20:12 -0400472 if (blk_queue_dying(q))
473 return -ENODEV;
Dan Williams3ef28e82015-10-21 13:20:12 -0400474 }
475}
476
Christoph Hellwigaccea322020-04-28 13:27:56 +0200477static inline int bio_queue_enter(struct bio *bio)
478{
Christoph Hellwig309dca302021-01-24 11:02:34 +0100479 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
Christoph Hellwigaccea322020-04-28 13:27:56 +0200480 bool nowait = bio->bi_opf & REQ_NOWAIT;
481 int ret;
482
483 ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
484 if (unlikely(ret)) {
485 if (nowait && !blk_queue_dying(q))
486 bio_wouldblock_error(bio);
487 else
488 bio_io_error(bio);
489 }
490
491 return ret;
492}
493
Dan Williams3ef28e82015-10-21 13:20:12 -0400494void blk_queue_exit(struct request_queue *q)
495{
496 percpu_ref_put(&q->q_usage_counter);
497}
498
499static void blk_queue_usage_counter_release(struct percpu_ref *ref)
500{
501 struct request_queue *q =
502 container_of(ref, struct request_queue, q_usage_counter);
503
504 wake_up_all(&q->mq_freeze_wq);
505}
506
Kees Cookbca237a2017-08-28 15:03:41 -0700507static void blk_rq_timed_out_timer(struct timer_list *t)
Christoph Hellwig287922e2015-10-30 20:57:30 +0800508{
Kees Cookbca237a2017-08-28 15:03:41 -0700509 struct request_queue *q = from_timer(q, t, timeout);
Christoph Hellwig287922e2015-10-30 20:57:30 +0800510
511 kblockd_schedule_work(&q->timeout_work);
512}
513
Tetsuo Handa2e3c18d2019-01-30 22:21:45 +0900514static void blk_timeout_work(struct work_struct *work)
515{
516}
517
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200518struct request_queue *blk_alloc_queue(int node_id)
Christoph Lameter19460892005-06-23 00:08:19 -0700519{
Jens Axboe165125e2007-07-24 09:28:11 +0200520 struct request_queue *q;
Kent Overstreet338aa962018-05-20 18:25:47 -0400521 int ret;
Christoph Lameter19460892005-06-23 00:08:19 -0700522
Jens Axboe8324aa92008-01-29 14:51:59 +0100523 q = kmem_cache_alloc_node(blk_requestq_cachep,
Christoph Hellwig3d745ea2020-03-27 09:30:11 +0100524 GFP_KERNEL | __GFP_ZERO, node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 if (!q)
526 return NULL;
527
Christoph Hellwigcbf62af2018-05-31 19:11:36 +0200528 q->last_merge = NULL;
Christoph Hellwigcbf62af2018-05-31 19:11:36 +0200529
Christoph Hellwig3d745ea2020-03-27 09:30:11 +0100530 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
Tejun Heoa73f7302011-12-14 00:33:37 +0100531 if (q->id < 0)
Ming Lei3d2936f2014-05-27 23:35:14 +0800532 goto fail_q;
Tejun Heoa73f7302011-12-14 00:33:37 +0100533
Ming Leic495a172021-01-11 11:05:53 +0800534 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
Kent Overstreet338aa962018-05-20 18:25:47 -0400535 if (ret)
Kent Overstreet54efd502015-04-23 22:37:18 -0700536 goto fail_id;
537
Christoph Hellwigaef33c22020-05-04 14:48:00 +0200538 q->backing_dev_info = bdi_alloc(node_id);
Jan Karad03f6cd2017-02-02 15:56:51 +0100539 if (!q->backing_dev_info)
Kent Overstreet54efd502015-04-23 22:37:18 -0700540 goto fail_split;
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -0700541
Jens Axboea83b5762017-03-21 17:20:01 -0600542 q->stats = blk_alloc_queue_stats();
543 if (!q->stats)
544 goto fail_stats;
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 q->node = node_id;
547
John Garrybccf5e22020-08-19 23:20:26 +0800548 atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
549
Kees Cookbca237a2017-08-28 15:03:41 -0700550 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
551 laptop_mode_timer_fn, 0);
552 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
Tetsuo Handa2e3c18d2019-01-30 22:21:45 +0900553 INIT_WORK(&q->timeout_work, blk_timeout_work);
Tejun Heoa612fdd2011-12-14 00:33:41 +0100554 INIT_LIST_HEAD(&q->icq_list);
Tejun Heo4eef3042012-03-05 13:15:18 -0800555#ifdef CONFIG_BLK_CGROUP
Tejun Heoe8989fa2012-03-05 13:15:20 -0800556 INIT_LIST_HEAD(&q->blkg_list);
Tejun Heo4eef3042012-03-05 13:15:18 -0800557#endif
Al Viro483f4af2006-03-18 18:34:37 -0500558
Jens Axboe8324aa92008-01-29 14:51:59 +0100559 kobject_init(&q->kobj, &blk_queue_ktype);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Luis Chamberlain85e0cbb2020-06-19 20:47:30 +0000561 mutex_init(&q->debugfs_mutex);
Al Viro483f4af2006-03-18 18:34:37 -0500562 mutex_init(&q->sysfs_lock);
Ming Leicecf5d82019-08-27 19:01:48 +0800563 mutex_init(&q->sysfs_dir_lock);
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700564 spin_lock_init(&q->queue_lock);
Vivek Goyalc94a96a2011-03-02 19:04:42 -0500565
Jens Axboe320ae512013-10-24 09:20:05 +0100566 init_waitqueue_head(&q->mq_freeze_wq);
Bob Liu7996a8b2019-05-21 11:25:55 +0800567 mutex_init(&q->mq_freeze_lock);
Jens Axboe320ae512013-10-24 09:20:05 +0100568
Dan Williams3ef28e82015-10-21 13:20:12 -0400569 /*
570 * Init percpu_ref in atomic mode so that it's faster to shutdown.
571 * See blk_register_queue() for details.
572 */
573 if (percpu_ref_init(&q->q_usage_counter,
574 blk_queue_usage_counter_release,
575 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
Mikulas Patockafff49962013-10-14 12:11:36 -0400576 goto fail_bdi;
Tejun Heof51b8022012-03-05 13:15:05 -0800577
Dan Williams3ef28e82015-10-21 13:20:12 -0400578 if (blkcg_init_queue(q))
579 goto fail_ref;
580
Christoph Hellwig3d745ea2020-03-27 09:30:11 +0100581 blk_queue_dma_alignment(q, 511);
582 blk_set_default_limits(&q->limits);
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200583 q->nr_requests = BLKDEV_MAX_RQ;
Christoph Hellwig3d745ea2020-03-27 09:30:11 +0100584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 return q;
Tejun Heoa73f7302011-12-14 00:33:37 +0100586
Dan Williams3ef28e82015-10-21 13:20:12 -0400587fail_ref:
588 percpu_ref_exit(&q->q_usage_counter);
Mikulas Patockafff49962013-10-14 12:11:36 -0400589fail_bdi:
Jens Axboea83b5762017-03-21 17:20:01 -0600590 blk_free_queue_stats(q->stats);
591fail_stats:
Jan Karad03f6cd2017-02-02 15:56:51 +0100592 bdi_put(q->backing_dev_info);
Kent Overstreet54efd502015-04-23 22:37:18 -0700593fail_split:
Kent Overstreet338aa962018-05-20 18:25:47 -0400594 bioset_exit(&q->bio_split);
Tejun Heoa73f7302011-12-14 00:33:37 +0100595fail_id:
596 ida_simple_remove(&blk_queue_ida, q->id);
597fail_q:
598 kmem_cache_free(blk_requestq_cachep, q);
599 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
Christoph Hellwig3d745ea2020-03-27 09:30:11 +0100601EXPORT_SYMBOL(blk_alloc_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Luis Chamberlainb5bd3572020-06-19 20:47:23 +0000603/**
604 * blk_get_queue - increment the request_queue refcount
605 * @q: the request_queue structure to increment the refcount for
606 *
607 * Increment the refcount of the request_queue kobject.
Luis Chamberlain763b5892020-06-19 20:47:24 +0000608 *
609 * Context: Any context.
Luis Chamberlainb5bd3572020-06-19 20:47:23 +0000610 */
Tejun Heo09ac46c2011-12-14 00:33:38 +0100611bool blk_get_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100613 if (likely(!blk_queue_dying(q))) {
Tejun Heo09ac46c2011-12-14 00:33:38 +0100614 __blk_get_queue(q);
615 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 }
617
Tejun Heo09ac46c2011-12-14 00:33:38 +0100618 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
Jens Axboed86e0e82011-05-27 07:44:43 +0200620EXPORT_SYMBOL(blk_get_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Bart Van Assche6a156742017-11-09 10:49:54 -0800622/**
Christoph Hellwigff005a02018-05-09 09:54:05 +0200623 * blk_get_request - allocate a request
Bart Van Assche6a156742017-11-09 10:49:54 -0800624 * @q: request queue to allocate a request for
625 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
626 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
627 */
Christoph Hellwigff005a02018-05-09 09:54:05 +0200628struct request *blk_get_request(struct request_queue *q, unsigned int op,
629 blk_mq_req_flags_t flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100630{
Bart Van Assched280bab2017-06-20 11:15:40 -0700631 struct request *req;
632
Bart Van Assche6a156742017-11-09 10:49:54 -0800633 WARN_ON_ONCE(op & REQ_NOWAIT);
Bart Van Asschea4d34da2020-12-08 21:29:50 -0800634 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
Bart Van Assche6a156742017-11-09 10:49:54 -0800635
Jens Axboea1ce35f2018-10-29 10:23:51 -0600636 req = blk_mq_alloc_request(q, op, flags);
637 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
638 q->mq_ops->initialize_rq_fn(req);
Bart Van Assched280bab2017-06-20 11:15:40 -0700639
640 return req;
Jens Axboe320ae512013-10-24 09:20:05 +0100641}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642EXPORT_SYMBOL(blk_get_request);
643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644void blk_put_request(struct request *req)
645{
Jens Axboea1ce35f2018-10-29 10:23:51 -0600646 blk_mq_free_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648EXPORT_SYMBOL(blk_put_request);
649
Christoph Hellwig52c5e622018-03-14 16:56:53 +0100650static void handle_bad_sector(struct bio *bio, sector_t maxsector)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
652 char b[BDEVNAME_SIZE];
653
Tetsuo Handaf4ac7122020-10-08 22:37:23 +0900654 pr_info_ratelimited("attempt to access beyond end of device\n"
655 "%s: rw=%d, want=%llu, limit=%llu\n",
656 bio_devname(bio, b), bio->bi_opf,
657 bio_end_sector(bio), maxsector);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658}
659
Akinobu Mitac17bb492006-12-08 02:39:46 -0800660#ifdef CONFIG_FAIL_MAKE_REQUEST
661
662static DECLARE_FAULT_ATTR(fail_make_request);
663
664static int __init setup_fail_make_request(char *str)
665{
666 return setup_fault_attr(&fail_make_request, str);
667}
668__setup("fail_make_request=", setup_fail_make_request);
669
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100670static bool should_fail_request(struct block_device *part, unsigned int bytes)
Akinobu Mitac17bb492006-12-08 02:39:46 -0800671{
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100672 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
Akinobu Mitac17bb492006-12-08 02:39:46 -0800673}
674
675static int __init fail_make_request_debugfs(void)
676{
Akinobu Mitadd48c082011-08-03 16:21:01 -0700677 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
678 NULL, &fail_make_request);
679
Duan Jiong21f9fcd2014-04-11 15:58:56 +0800680 return PTR_ERR_OR_ZERO(dir);
Akinobu Mitac17bb492006-12-08 02:39:46 -0800681}
682
683late_initcall(fail_make_request_debugfs);
684
685#else /* CONFIG_FAIL_MAKE_REQUEST */
686
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100687static inline bool should_fail_request(struct block_device *part,
Akinobu Mitab2c9cd32011-07-26 16:09:03 -0700688 unsigned int bytes)
Akinobu Mitac17bb492006-12-08 02:39:46 -0800689{
Akinobu Mitab2c9cd32011-07-26 16:09:03 -0700690 return false;
Akinobu Mitac17bb492006-12-08 02:39:46 -0800691}
692
693#endif /* CONFIG_FAIL_MAKE_REQUEST */
694
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100695static inline bool bio_check_ro(struct bio *bio)
Ilya Dryomov721c7fc2018-01-11 14:09:11 +0100696{
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100697 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
Ilya Dryomov721c7fc2018-01-11 14:09:11 +0100698 char b[BDEVNAME_SIZE];
699
Mikulas Patocka8b2ded12018-09-05 16:14:36 -0600700 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
701 return false;
702
Linus Torvaldsa32e2362018-08-03 12:22:09 -0700703 WARN_ONCE(1,
Christoph Hellwigc8178672020-07-01 10:59:40 +0200704 "Trying to write to read-only block-device %s (partno %d)\n",
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100705 bio_devname(bio, b), bio->bi_bdev->bd_partno);
Linus Torvaldsa32e2362018-08-03 12:22:09 -0700706 /* Older lvm-tools actually trigger this */
707 return false;
Ilya Dryomov721c7fc2018-01-11 14:09:11 +0100708 }
709
710 return false;
711}
712
Howard McLauchlan30abb3a2018-02-06 14:05:39 -0800713static noinline int should_fail_bio(struct bio *bio)
714{
Christoph Hellwig309dca302021-01-24 11:02:34 +0100715 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
Howard McLauchlan30abb3a2018-02-06 14:05:39 -0800716 return -EIO;
717 return 0;
718}
719ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
720
Jens Axboec07e2b42007-07-18 13:27:58 +0200721/*
Christoph Hellwig52c5e622018-03-14 16:56:53 +0100722 * Check whether this bio extends beyond the end of the device or partition.
723 * This may well happen - the kernel calls bread() without checking the size of
724 * the device, e.g., when mounting a file system.
725 */
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100726static inline int bio_check_eod(struct bio *bio)
Christoph Hellwig52c5e622018-03-14 16:56:53 +0100727{
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100728 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
Christoph Hellwig52c5e622018-03-14 16:56:53 +0100729 unsigned int nr_sectors = bio_sectors(bio);
730
731 if (nr_sectors && maxsector &&
732 (nr_sectors > maxsector ||
733 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
734 handle_bad_sector(bio, maxsector);
735 return -EIO;
736 }
737 return 0;
738}
739
740/*
Christoph Hellwig74d46992017-08-23 19:10:32 +0200741 * Remap block n of partition p to block n+start(p) of the disk.
742 */
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100743static int blk_partition_remap(struct bio *bio)
Christoph Hellwig74d46992017-08-23 19:10:32 +0200744{
Christoph Hellwig309dca302021-01-24 11:02:34 +0100745 struct block_device *p = bio->bi_bdev;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200746
Christoph Hellwig52c5e622018-03-14 16:56:53 +0100747 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100748 return -EIO;
Damien Le Moal5eac3eb2019-11-11 11:39:25 +0900749 if (bio_sectors(bio)) {
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100750 bio->bi_iter.bi_sector += p->bd_start_sect;
Christoph Hellwig1c02fca2020-12-03 17:21:38 +0100751 trace_block_bio_remap(bio, p->bd_dev,
Christoph Hellwig29ff57c2020-11-24 09:34:24 +0100752 bio->bi_iter.bi_sector -
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100753 p->bd_start_sect);
Christoph Hellwig52c5e622018-03-14 16:56:53 +0100754 }
Christoph Hellwig30c5d342021-01-24 11:02:36 +0100755 bio_set_flag(bio, BIO_REMAPPED);
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100756 return 0;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200757}
758
Keith Busch0512a752020-05-12 17:55:47 +0900759/*
760 * Check write append to a zoned block device.
761 */
762static inline blk_status_t blk_check_zone_append(struct request_queue *q,
763 struct bio *bio)
764{
765 sector_t pos = bio->bi_iter.bi_sector;
766 int nr_sectors = bio_sectors(bio);
767
768 /* Only applicable to zoned block devices */
769 if (!blk_queue_is_zoned(q))
770 return BLK_STS_NOTSUPP;
771
772 /* The bio sector must point to the start of a sequential zone */
773 if (pos & (blk_queue_zone_sectors(q) - 1) ||
774 !blk_queue_zone_is_seq(q, pos))
775 return BLK_STS_IOERR;
776
777 /*
778 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
779 * split and could result in non-contiguous sectors being written in
780 * different zones.
781 */
782 if (nr_sectors > q->limits.chunk_sectors)
783 return BLK_STS_IOERR;
784
785 /* Make sure the BIO is small enough and will not get split */
786 if (nr_sectors > q->limits.max_zone_append_sectors)
787 return BLK_STS_IOERR;
788
789 bio->bi_opf |= REQ_NOMERGE;
790
791 return BLK_STS_OK;
792}
793
Christoph Hellwiged00aab2020-07-01 10:59:44 +0200794static noinline_for_stack bool submit_bio_checks(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795{
Christoph Hellwig309dca302021-01-24 11:02:34 +0100796 struct block_device *bdev = bio->bi_bdev;
797 struct request_queue *q = bdev->bd_disk->queue;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200798 blk_status_t status = BLK_STS_IOERR;
Jens Axboe5a473e82020-06-04 11:23:39 -0600799 struct blk_plug *plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
801 might_sleep();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
Jens Axboe5a473e82020-06-04 11:23:39 -0600803 plug = blk_mq_plug(q, bio);
804 if (plug && plug->nowait)
805 bio->bi_opf |= REQ_NOWAIT;
806
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500807 /*
Jens Axboeb0beb282020-05-28 13:19:29 -0600808 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
Mike Snitzer021a2442020-09-23 16:06:51 -0400809 * if queue does not support NOWAIT.
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500810 */
Mike Snitzer021a2442020-09-23 16:06:51 -0400811 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
Jens Axboeb0beb282020-05-28 13:19:29 -0600812 goto not_supported;
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500813
Howard McLauchlan30abb3a2018-02-06 14:05:39 -0800814 if (should_fail_bio(bio))
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200815 goto end_io;
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100816 if (unlikely(bio_check_ro(bio)))
817 goto end_io;
818 if (unlikely(bio_check_eod(bio)))
819 goto end_io;
Christoph Hellwig30c5d342021-01-24 11:02:36 +0100820 if (bio->bi_bdev->bd_partno && !bio_flagged(bio, BIO_REMAPPED) &&
821 unlikely(blk_partition_remap(bio)))
Christoph Hellwig2f9f6222021-01-24 11:02:35 +0100822 goto end_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200824 /*
Christoph Hellwiged00aab2020-07-01 10:59:44 +0200825 * Filter flush bio's early so that bio based drivers without flush
826 * support don't have to worry about them.
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200827 */
Jens Axboef3a8ab72017-01-27 09:08:23 -0700828 if (op_is_flush(bio->bi_opf) &&
Jens Axboec888a8f2016-04-13 13:33:19 -0600829 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
Jens Axboe1eff9d32016-08-05 15:35:16 -0600830 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
Christoph Hellwige439ab72020-07-01 10:59:42 +0200831 if (!bio_sectors(bio)) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200832 status = BLK_STS_OK;
Tejun Heoa7384672008-11-28 13:32:03 +0900833 goto end_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 }
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
Christoph Hellwigd04c4062018-12-14 17:21:22 +0100837 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
838 bio->bi_opf &= ~REQ_HIPRI;
839
Christoph Hellwig288dab82016-06-09 16:00:36 +0200840 switch (bio_op(bio)) {
841 case REQ_OP_DISCARD:
842 if (!blk_queue_discard(q))
843 goto not_supported;
844 break;
845 case REQ_OP_SECURE_ERASE:
846 if (!blk_queue_secure_erase(q))
847 goto not_supported;
848 break;
849 case REQ_OP_WRITE_SAME:
Christoph Hellwig74d46992017-08-23 19:10:32 +0200850 if (!q->limits.max_write_same_sectors)
Christoph Hellwig288dab82016-06-09 16:00:36 +0200851 goto not_supported;
Nicolai Stange58886782016-12-04 14:56:39 +0100852 break;
Keith Busch0512a752020-05-12 17:55:47 +0900853 case REQ_OP_ZONE_APPEND:
854 status = blk_check_zone_append(q, bio);
855 if (status != BLK_STS_OK)
856 goto end_io;
857 break;
Shaun Tancheff2d253442016-10-18 15:40:32 +0900858 case REQ_OP_ZONE_RESET:
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900859 case REQ_OP_ZONE_OPEN:
860 case REQ_OP_ZONE_CLOSE:
861 case REQ_OP_ZONE_FINISH:
Christoph Hellwig74d46992017-08-23 19:10:32 +0200862 if (!blk_queue_is_zoned(q))
Shaun Tancheff2d253442016-10-18 15:40:32 +0900863 goto not_supported;
Christoph Hellwig288dab82016-06-09 16:00:36 +0200864 break;
Chaitanya Kulkarni6e33dbf2019-08-01 10:26:36 -0700865 case REQ_OP_ZONE_RESET_ALL:
866 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
867 goto not_supported;
868 break;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800869 case REQ_OP_WRITE_ZEROES:
Christoph Hellwig74d46992017-08-23 19:10:32 +0200870 if (!q->limits.max_write_zeroes_sectors)
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800871 goto not_supported;
872 break;
Christoph Hellwig288dab82016-06-09 16:00:36 +0200873 default:
874 break;
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
Tejun Heo7f4b35d2012-06-04 20:40:56 -0700877 /*
Christoph Hellwig3e82c342020-04-25 09:55:51 +0200878 * Various block parts want %current->io_context, so allocate it up
879 * front rather than dealing with lots of pain to allocate it only
880 * where needed. This may fail and the block layer knows how to live
881 * with it.
Tejun Heo7f4b35d2012-06-04 20:40:56 -0700882 */
Christoph Hellwig3e82c342020-04-25 09:55:51 +0200883 if (unlikely(!current->io_context))
884 create_task_io_context(current, GFP_ATOMIC, q->node);
Tejun Heo7f4b35d2012-06-04 20:40:56 -0700885
Christoph Hellwigdb18a532020-06-27 09:31:58 +0200886 if (blk_throtl_bio(bio)) {
887 blkcg_bio_issue_init(bio);
Tejun Heoae118892015-08-18 14:55:20 -0700888 return false;
Christoph Hellwigdb18a532020-06-27 09:31:58 +0200889 }
890
891 blk_cgroup_bio_start(bio);
892 blkcg_bio_issue_init(bio);
Christoph Hellwig27a84d52011-09-15 14:01:40 +0200893
NeilBrownfbbaf702017-04-07 09:40:52 -0600894 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
Christoph Hellwige8a676d2020-12-03 17:21:36 +0100895 trace_block_bio_queue(bio);
NeilBrownfbbaf702017-04-07 09:40:52 -0600896 /* Now that enqueuing has been traced, we need to trace
897 * completion as well.
898 */
899 bio_set_flag(bio, BIO_TRACE_COMPLETION);
900 }
Christoph Hellwig27a84d52011-09-15 14:01:40 +0200901 return true;
Tejun Heoa7384672008-11-28 13:32:03 +0900902
Christoph Hellwig288dab82016-06-09 16:00:36 +0200903not_supported:
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200904 status = BLK_STS_NOTSUPP;
Tejun Heoa7384672008-11-28 13:32:03 +0900905end_io:
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200906 bio->bi_status = status;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200907 bio_endio(bio);
Christoph Hellwig27a84d52011-09-15 14:01:40 +0200908 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909}
910
Christoph Hellwiged00aab2020-07-01 10:59:44 +0200911static blk_qc_t __submit_bio(struct bio *bio)
Christoph Hellwigac7c5672020-05-16 20:28:01 +0200912{
Christoph Hellwig309dca302021-01-24 11:02:34 +0100913 struct gendisk *disk = bio->bi_bdev->bd_disk;
Christoph Hellwigac7c5672020-05-16 20:28:01 +0200914 blk_qc_t ret = BLK_QC_T_NONE;
915
916 if (blk_crypto_bio_prep(&bio)) {
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200917 if (!disk->fops->submit_bio)
918 return blk_mq_submit_bio(bio);
919 ret = disk->fops->submit_bio(bio);
Christoph Hellwigac7c5672020-05-16 20:28:01 +0200920 }
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200921 blk_queue_exit(disk->queue);
Christoph Hellwigac7c5672020-05-16 20:28:01 +0200922 return ret;
923}
924
Christoph Hellwig566acf22020-07-01 10:59:45 +0200925/*
926 * The loop in this function may be a bit non-obvious, and so deserves some
927 * explanation:
928 *
929 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
930 * that), so we have a list with a single bio.
931 * - We pretend that we have just taken it off a longer list, so we assign
932 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
933 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
934 * bios through a recursive call to submit_bio_noacct. If it did, we find a
935 * non-NULL value in bio_list and re-enter the loop from the top.
936 * - In this case we really did just take the bio of the top of the list (no
937 * pretending) and so remove it from bio_list, and call into ->submit_bio()
938 * again.
939 *
940 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
941 * bio_list_on_stack[1] contains bios that were submitted before the current
942 * ->submit_bio_bio, but that haven't been processed yet.
943 */
944static blk_qc_t __submit_bio_noacct(struct bio *bio)
945{
946 struct bio_list bio_list_on_stack[2];
947 blk_qc_t ret = BLK_QC_T_NONE;
948
949 BUG_ON(bio->bi_next);
950
951 bio_list_init(&bio_list_on_stack[0]);
952 current->bio_list = bio_list_on_stack;
953
954 do {
Christoph Hellwig309dca302021-01-24 11:02:34 +0100955 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
Christoph Hellwig566acf22020-07-01 10:59:45 +0200956 struct bio_list lower, same;
957
958 if (unlikely(bio_queue_enter(bio) != 0))
959 continue;
960
961 /*
962 * Create a fresh bio_list for all subordinate requests.
963 */
964 bio_list_on_stack[1] = bio_list_on_stack[0];
965 bio_list_init(&bio_list_on_stack[0]);
966
967 ret = __submit_bio(bio);
968
969 /*
970 * Sort new bios into those for a lower level and those for the
971 * same level.
972 */
973 bio_list_init(&lower);
974 bio_list_init(&same);
975 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
Christoph Hellwig309dca302021-01-24 11:02:34 +0100976 if (q == bio->bi_bdev->bd_disk->queue)
Christoph Hellwig566acf22020-07-01 10:59:45 +0200977 bio_list_add(&same, bio);
978 else
979 bio_list_add(&lower, bio);
980
981 /*
982 * Now assemble so we handle the lowest level first.
983 */
984 bio_list_merge(&bio_list_on_stack[0], &lower);
985 bio_list_merge(&bio_list_on_stack[0], &same);
986 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
987 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
988
989 current->bio_list = NULL;
990 return ret;
991}
992
Christoph Hellwigff93ea02020-07-01 10:59:46 +0200993static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
994{
Christoph Hellwig7c792f32020-07-02 21:21:25 +0200995 struct bio_list bio_list[2] = { };
Christoph Hellwigff93ea02020-07-01 10:59:46 +0200996 blk_qc_t ret = BLK_QC_T_NONE;
997
Christoph Hellwig7c792f32020-07-02 21:21:25 +0200998 current->bio_list = bio_list;
Christoph Hellwigff93ea02020-07-01 10:59:46 +0200999
1000 do {
Christoph Hellwig309dca302021-01-24 11:02:34 +01001001 struct gendisk *disk = bio->bi_bdev->bd_disk;
Christoph Hellwigff93ea02020-07-01 10:59:46 +02001002
1003 if (unlikely(bio_queue_enter(bio) != 0))
1004 continue;
1005
1006 if (!blk_crypto_bio_prep(&bio)) {
1007 blk_queue_exit(disk->queue);
1008 ret = BLK_QC_T_NONE;
1009 continue;
1010 }
1011
1012 ret = blk_mq_submit_bio(bio);
Christoph Hellwig7c792f32020-07-02 21:21:25 +02001013 } while ((bio = bio_list_pop(&bio_list[0])));
Christoph Hellwigff93ea02020-07-01 10:59:46 +02001014
1015 current->bio_list = NULL;
1016 return ret;
1017}
1018
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001019/**
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001020 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001021 * @bio: The bio describing the location in memory and on the device.
1022 *
Christoph Hellwig3fdd4082020-04-28 13:27:53 +02001023 * This is a version of submit_bio() that shall only be used for I/O that is
1024 * resubmitted to lower level drivers by stacking block drivers. All file
1025 * systems and other upper level users of the block layer should use
1026 * submit_bio() instead.
Neil Brownd89d8792007-05-01 09:53:42 +02001027 */
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001028blk_qc_t submit_bio_noacct(struct bio *bio)
Neil Brownd89d8792007-05-01 09:53:42 +02001029{
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001030 if (!submit_bio_checks(bio))
Christoph Hellwig566acf22020-07-01 10:59:45 +02001031 return BLK_QC_T_NONE;
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001032
1033 /*
Christoph Hellwig566acf22020-07-01 10:59:45 +02001034 * We only want one ->submit_bio to be active at a time, else stack
1035 * usage with stacked devices could be a problem. Use current->bio_list
1036 * to collect a list of requests submited by a ->submit_bio method while
1037 * it is active, and then process them after it returned.
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001038 */
Akinobu Mitabddd87c2010-02-23 08:55:42 +01001039 if (current->bio_list) {
NeilBrownf5fe1b52017-03-10 17:00:47 +11001040 bio_list_add(&current->bio_list[0], bio);
Christoph Hellwig566acf22020-07-01 10:59:45 +02001041 return BLK_QC_T_NONE;
Neil Brownd89d8792007-05-01 09:53:42 +02001042 }
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001043
Christoph Hellwig309dca302021-01-24 11:02:34 +01001044 if (!bio->bi_bdev->bd_disk->fops->submit_bio)
Christoph Hellwigff93ea02020-07-01 10:59:46 +02001045 return __submit_bio_noacct_mq(bio);
Christoph Hellwig566acf22020-07-01 10:59:45 +02001046 return __submit_bio_noacct(bio);
Neil Brownd89d8792007-05-01 09:53:42 +02001047}
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001048EXPORT_SYMBOL(submit_bio_noacct);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
1050/**
Randy Dunlap710027a2008-08-19 20:13:11 +02001051 * submit_bio - submit a bio to the block device layer for I/O
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 * @bio: The &struct bio which describes the I/O
1053 *
Christoph Hellwig3fdd4082020-04-28 13:27:53 +02001054 * submit_bio() is used to submit I/O requests to block devices. It is passed a
1055 * fully set up &struct bio that describes the I/O that needs to be done. The
Christoph Hellwig309dca302021-01-24 11:02:34 +01001056 * bio will be send to the device described by the bi_bdev field.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 *
Christoph Hellwig3fdd4082020-04-28 13:27:53 +02001058 * The success/failure status of the request, along with notification of
1059 * completion, is delivered asynchronously through the ->bi_end_io() callback
1060 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
1061 * been called.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 */
Mike Christie4e49ea42016-06-05 14:31:41 -05001063blk_qc_t submit_bio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064{
Tejun Heod3f77df2019-06-27 13:39:52 -07001065 if (blkcg_punt_bio_submit(bio))
1066 return BLK_QC_T_NONE;
1067
Jens Axboebf2de6f2007-09-27 13:01:25 +02001068 /*
1069 * If it's a regular read/write or a barrier with data attached,
1070 * go through the normal accounting stuff before submission.
1071 */
Martin K. Petersene2a60da2012-09-18 12:19:25 -04001072 if (bio_has_data(bio)) {
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001073 unsigned int count;
1074
Mike Christie95fe6c12016-06-05 14:31:48 -05001075 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
Christoph Hellwig309dca302021-01-24 11:02:34 +01001076 count = queue_logical_block_size(
1077 bio->bi_bdev->bd_disk->queue) >> 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001078 else
1079 count = bio_sectors(bio);
1080
Mike Christiea8ebb052016-06-05 14:31:45 -05001081 if (op_is_write(bio_op(bio))) {
Jens Axboebf2de6f2007-09-27 13:01:25 +02001082 count_vm_events(PGPGOUT, count);
1083 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -07001084 task_io_account_read(bio->bi_iter.bi_size);
Jens Axboebf2de6f2007-09-27 13:01:25 +02001085 count_vm_events(PGPGIN, count);
1086 }
1087
1088 if (unlikely(block_dump)) {
1089 char b[BDEVNAME_SIZE];
San Mehat8dcbdc72010-09-14 08:48:01 +02001090 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001091 current->comm, task_pid_nr(current),
Mike Christiea8ebb052016-06-05 14:31:45 -05001092 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
Kent Overstreet4f024f32013-10-11 15:44:27 -07001093 (unsigned long long)bio->bi_iter.bi_sector,
Christoph Hellwig74d46992017-08-23 19:10:32 +02001094 bio_devname(bio, b), count);
Jens Axboebf2de6f2007-09-27 13:01:25 +02001095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 }
1097
Johannes Weinerb8e24a92019-08-08 15:03:00 -04001098 /*
Christoph Hellwig760f83e2020-04-28 13:27:54 +02001099 * If we're reading data that is part of the userspace workingset, count
1100 * submission time as memory stall. When the device is congested, or
1101 * the submitting cgroup IO-throttled, submission can be a significant
1102 * part of overall IO time.
Johannes Weinerb8e24a92019-08-08 15:03:00 -04001103 */
Christoph Hellwig760f83e2020-04-28 13:27:54 +02001104 if (unlikely(bio_op(bio) == REQ_OP_READ &&
1105 bio_flagged(bio, BIO_WORKINGSET))) {
1106 unsigned long pflags;
1107 blk_qc_t ret;
1108
Johannes Weinerb8e24a92019-08-08 15:03:00 -04001109 psi_memstall_enter(&pflags);
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001110 ret = submit_bio_noacct(bio);
Johannes Weinerb8e24a92019-08-08 15:03:00 -04001111 psi_memstall_leave(&pflags);
1112
Christoph Hellwig760f83e2020-04-28 13:27:54 +02001113 return ret;
1114 }
1115
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001116 return submit_bio_noacct(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118EXPORT_SYMBOL(submit_bio);
1119
Jens Axboe1052b8a2018-11-26 08:21:49 -07001120/**
Hannes Reineckebf4e6b42015-11-26 08:46:57 +01001121 * blk_cloned_rq_check_limits - Helper function to check a cloned request
Guoqing Jiang0d720312020-03-09 22:41:33 +01001122 * for the new queue limits
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001123 * @q: the queue
1124 * @rq: the request being checked
1125 *
1126 * Description:
1127 * @rq may have been made based on weaker limitations of upper-level queues
1128 * in request stacking drivers, and it may violate the limitation of @q.
1129 * Since the block layer and the underlying device driver trust @rq
1130 * after it is inserted to @q, it should be checked against @q before
1131 * the insertion using this generic function.
1132 *
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001133 * Request stacking drivers like request-based dm may change the queue
Hannes Reineckebf4e6b42015-11-26 08:46:57 +01001134 * limits when retrying requests on other queues. Those requests need
1135 * to be checked against the new queue limits again during dispatch.
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001136 */
Ritika Srivastava143d2602020-09-01 13:17:30 -07001137static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
Hannes Reineckebf4e6b42015-11-26 08:46:57 +01001138 struct request *rq)
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001139{
Ritika Srivastava8327cce52020-09-01 13:17:31 -07001140 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
1141
1142 if (blk_rq_sectors(rq) > max_sectors) {
1143 /*
1144 * SCSI device does not have a good way to return if
1145 * Write Same/Zero is actually supported. If a device rejects
1146 * a non-read/write command (discard, write same,etc.) the
1147 * low-level device driver will set the relevant queue limit to
1148 * 0 to prevent blk-lib from issuing more of the offending
1149 * operations. Commands queued prior to the queue limit being
1150 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
1151 * errors being propagated to upper layers.
1152 */
1153 if (max_sectors == 0)
1154 return BLK_STS_NOTSUPP;
1155
John Pittman61939b12019-05-23 17:49:39 -04001156 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
Ritika Srivastava8327cce52020-09-01 13:17:31 -07001157 __func__, blk_rq_sectors(rq), max_sectors);
Ritika Srivastava143d2602020-09-01 13:17:30 -07001158 return BLK_STS_IOERR;
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001159 }
1160
1161 /*
1162 * queue's settings related to segment counting like q->bounce_pfn
1163 * may differ from that of other stacking queues.
1164 * Recalculate it to check the request correctly on this queue's
1165 * limitation.
1166 */
Christoph Hellwige9cd19c2019-06-06 12:29:02 +02001167 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
Martin K. Petersen8a783622010-02-26 00:20:39 -05001168 if (rq->nr_phys_segments > queue_max_segments(q)) {
John Pittman61939b12019-05-23 17:49:39 -04001169 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1170 __func__, rq->nr_phys_segments, queue_max_segments(q));
Ritika Srivastava143d2602020-09-01 13:17:30 -07001171 return BLK_STS_IOERR;
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001172 }
1173
Ritika Srivastava143d2602020-09-01 13:17:30 -07001174 return BLK_STS_OK;
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001175}
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001176
1177/**
1178 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1179 * @q: the queue to submit the request
1180 * @rq: the request being queued
1181 */
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02001182blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001183{
Ritika Srivastava8327cce52020-09-01 13:17:31 -07001184 blk_status_t ret;
1185
1186 ret = blk_cloned_rq_check_limits(q, rq);
1187 if (ret != BLK_STS_OK)
1188 return ret;
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001189
Akinobu Mitab2c9cd32011-07-26 16:09:03 -07001190 if (rq->rq_disk &&
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001191 should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02001192 return BLK_STS_IOERR;
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001193
Satya Tangiralaa892c8d2020-05-14 00:37:18 +00001194 if (blk_crypto_insert_cloned_request(rq))
1195 return BLK_STS_IOERR;
1196
Jens Axboea1ce35f2018-10-29 10:23:51 -06001197 if (blk_queue_io_stat(q))
Konstantin Khlebnikovb5af37a2020-05-27 07:24:16 +02001198 blk_account_io_start(rq);
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001199
1200 /*
Jens Axboea1ce35f2018-10-29 10:23:51 -06001201 * Since we have a scheduler attached on the top device,
1202 * bypass a potential scheduler on the bottom device for
1203 * insert.
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001204 */
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07001205 return blk_mq_request_issue_directly(rq, true);
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001206}
1207EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1208
Tejun Heo80a761f2009-07-03 17:48:17 +09001209/**
1210 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1211 * @rq: request to examine
1212 *
1213 * Description:
1214 * A request could be merge of IOs which require different failure
1215 * handling. This function determines the number of bytes which
1216 * can be failed from the beginning of the request without
1217 * crossing into area which need to be retried further.
1218 *
1219 * Return:
1220 * The number of bytes to fail.
Tejun Heo80a761f2009-07-03 17:48:17 +09001221 */
1222unsigned int blk_rq_err_bytes(const struct request *rq)
1223{
1224 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1225 unsigned int bytes = 0;
1226 struct bio *bio;
1227
Christoph Hellwige8064022016-10-20 15:12:13 +02001228 if (!(rq->rq_flags & RQF_MIXED_MERGE))
Tejun Heo80a761f2009-07-03 17:48:17 +09001229 return blk_rq_bytes(rq);
1230
1231 /*
1232 * Currently the only 'mixing' which can happen is between
1233 * different fastfail types. We can safely fail portions
1234 * which have all the failfast bits that the first one has -
1235 * the ones which are at least as eager to fail as the first
1236 * one.
1237 */
1238 for (bio = rq->bio; bio; bio = bio->bi_next) {
Jens Axboe1eff9d32016-08-05 15:35:16 -06001239 if ((bio->bi_opf & ff) != ff)
Tejun Heo80a761f2009-07-03 17:48:17 +09001240 break;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001241 bytes += bio->bi_iter.bi_size;
Tejun Heo80a761f2009-07-03 17:48:17 +09001242 }
1243
1244 /* this could lead to infinite loop */
1245 BUG_ON(blk_rq_bytes(rq) && !bytes);
1246 return bytes;
1247}
1248EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1249
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001250static void update_io_ticks(struct block_device *part, unsigned long now,
1251 bool end)
Christoph Hellwig9123bf62020-05-27 07:24:13 +02001252{
1253 unsigned long stamp;
1254again:
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001255 stamp = READ_ONCE(part->bd_stamp);
Christoph Hellwig9123bf62020-05-27 07:24:13 +02001256 if (unlikely(stamp != now)) {
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001257 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
Christoph Hellwig9123bf62020-05-27 07:24:13 +02001258 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
1259 }
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001260 if (part->bd_partno) {
1261 part = bdev_whole(part);
Christoph Hellwig9123bf62020-05-27 07:24:13 +02001262 goto again;
1263 }
1264}
1265
Christoph Hellwigf1394b792020-05-13 12:49:32 +02001266static void blk_account_io_completion(struct request *req, unsigned int bytes)
Jens Axboebc58ba92009-01-23 10:54:44 +01001267{
Logan Gunthorpeecb61862019-12-10 11:47:04 -07001268 if (req->part && blk_do_io_stat(req)) {
Michael Callahanddcf35d2018-07-18 04:47:39 -07001269 const int sgrp = op_stat_group(req_op(req));
Jens Axboebc58ba92009-01-23 10:54:44 +01001270
Mike Snitzer112f1582018-12-06 11:41:18 -05001271 part_stat_lock();
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001272 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
Jens Axboebc58ba92009-01-23 10:54:44 +01001273 part_stat_unlock();
1274 }
1275}
1276
Omar Sandoval522a7772018-05-09 02:08:53 -07001277void blk_account_io_done(struct request *req, u64 now)
Jens Axboebc58ba92009-01-23 10:54:44 +01001278{
Jens Axboebc58ba92009-01-23 10:54:44 +01001279 /*
Tejun Heodd4c1332010-09-03 11:56:16 +02001280 * Account IO completion. flush_rq isn't accounted as a
1281 * normal IO on queueing nor completion. Accounting the
1282 * containing request is enough.
Jens Axboebc58ba92009-01-23 10:54:44 +01001283 */
Logan Gunthorpeecb61862019-12-10 11:47:04 -07001284 if (req->part && blk_do_io_stat(req) &&
1285 !(req->rq_flags & RQF_FLUSH_SEQ)) {
Michael Callahanddcf35d2018-07-18 04:47:39 -07001286 const int sgrp = op_stat_group(req_op(req));
Jens Axboebc58ba92009-01-23 10:54:44 +01001287
Mike Snitzer112f1582018-12-06 11:41:18 -05001288 part_stat_lock();
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001289 update_io_ticks(req->part, jiffies, true);
1290 part_stat_inc(req->part, ios[sgrp]);
1291 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
Christoph Hellwig524f9ff2020-05-27 07:24:19 +02001292 part_stat_unlock();
Jens Axboebc58ba92009-01-23 10:54:44 +01001293 }
1294}
1295
Konstantin Khlebnikovb5af37a2020-05-27 07:24:16 +02001296void blk_account_io_start(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +01001297{
Jens Axboe320ae512013-10-24 09:20:05 +01001298 if (!blk_do_io_stat(rq))
1299 return;
1300
Christoph Hellwig0b6e5222021-01-24 11:02:38 +01001301 /* passthrough requests can hold bios that do not have ->bi_bdev set */
1302 if (rq->bio && rq->bio->bi_bdev)
1303 rq->part = rq->bio->bi_bdev;
1304 else
1305 rq->part = rq->rq_disk->part0;
Christoph Hellwig524f9ff2020-05-27 07:24:19 +02001306
Mike Snitzer112f1582018-12-06 11:41:18 -05001307 part_stat_lock();
Christoph Hellwig76268f32020-05-13 12:49:34 +02001308 update_io_ticks(rq->part, jiffies, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001309 part_stat_unlock();
1310}
1311
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001312static unsigned long __part_start_io_acct(struct block_device *part,
Song Liu7b264102020-08-31 15:27:23 -07001313 unsigned int sectors, unsigned int op)
Christoph Hellwig956d5102020-05-27 07:24:04 +02001314{
Christoph Hellwig956d5102020-05-27 07:24:04 +02001315 const int sgrp = op_stat_group(op);
1316 unsigned long now = READ_ONCE(jiffies);
1317
1318 part_stat_lock();
1319 update_io_ticks(part, now, false);
1320 part_stat_inc(part, ios[sgrp]);
1321 part_stat_add(part, sectors[sgrp], sectors);
1322 part_stat_local_inc(part, in_flight[op_is_write(op)]);
1323 part_stat_unlock();
1324
1325 return now;
1326}
Song Liu7b264102020-08-31 15:27:23 -07001327
Christoph Hellwig99dfc432021-01-24 11:02:37 +01001328/**
1329 * bio_start_io_acct - start I/O accounting for bio based drivers
1330 * @bio: bio to start account for
1331 *
1332 * Returns the start time that should be passed back to bio_end_io_acct().
1333 */
1334unsigned long bio_start_io_acct(struct bio *bio)
Song Liu7b264102020-08-31 15:27:23 -07001335{
Christoph Hellwig99dfc432021-01-24 11:02:37 +01001336 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio));
Song Liu7b264102020-08-31 15:27:23 -07001337}
Christoph Hellwig99dfc432021-01-24 11:02:37 +01001338EXPORT_SYMBOL_GPL(bio_start_io_acct);
Song Liu7b264102020-08-31 15:27:23 -07001339
1340unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1341 unsigned int op)
1342{
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001343 return __part_start_io_acct(disk->part0, sectors, op);
Song Liu7b264102020-08-31 15:27:23 -07001344}
Christoph Hellwig956d5102020-05-27 07:24:04 +02001345EXPORT_SYMBOL(disk_start_io_acct);
1346
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001347static void __part_end_io_acct(struct block_device *part, unsigned int op,
Song Liu7b264102020-08-31 15:27:23 -07001348 unsigned long start_time)
Christoph Hellwig956d5102020-05-27 07:24:04 +02001349{
Christoph Hellwig956d5102020-05-27 07:24:04 +02001350 const int sgrp = op_stat_group(op);
1351 unsigned long now = READ_ONCE(jiffies);
1352 unsigned long duration = now - start_time;
1353
1354 part_stat_lock();
1355 update_io_ticks(part, now, true);
1356 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1357 part_stat_local_dec(part, in_flight[op_is_write(op)]);
1358 part_stat_unlock();
1359}
Song Liu7b264102020-08-31 15:27:23 -07001360
Christoph Hellwig99dfc432021-01-24 11:02:37 +01001361void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1362 struct block_device *orig_bdev)
Song Liu7b264102020-08-31 15:27:23 -07001363{
Christoph Hellwig99dfc432021-01-24 11:02:37 +01001364 __part_end_io_acct(orig_bdev, bio_op(bio), start_time);
Song Liu7b264102020-08-31 15:27:23 -07001365}
Christoph Hellwig99dfc432021-01-24 11:02:37 +01001366EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
Song Liu7b264102020-08-31 15:27:23 -07001367
1368void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1369 unsigned long start_time)
1370{
Christoph Hellwig8446fe92020-11-24 09:36:54 +01001371 __part_end_io_acct(disk->part0, op, start_time);
Song Liu7b264102020-08-31 15:27:23 -07001372}
Christoph Hellwig956d5102020-05-27 07:24:04 +02001373EXPORT_SYMBOL(disk_end_io_acct);
1374
Christoph Hellwigef71de82017-11-02 21:29:51 +03001375/*
1376 * Steal bios from a request and add them to a bio list.
1377 * The request must not have been partially completed before.
1378 */
1379void blk_steal_bios(struct bio_list *list, struct request *rq)
1380{
1381 if (rq->bio) {
1382 if (list->tail)
1383 list->tail->bi_next = rq->bio;
1384 else
1385 list->head = rq->bio;
1386 list->tail = rq->biotail;
1387
1388 rq->bio = NULL;
1389 rq->biotail = NULL;
1390 }
1391
1392 rq->__data_len = 0;
1393}
1394EXPORT_SYMBOL_GPL(blk_steal_bios);
1395
Tejun Heo9934c8c2009-05-08 11:54:16 +09001396/**
Tejun Heo2e60e022009-04-23 11:05:18 +09001397 * blk_update_request - Special helper function for request stacking drivers
Randy Dunlap8ebf9752009-06-11 20:00:41 -07001398 * @req: the request being processed
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02001399 * @error: block status code
Randy Dunlap8ebf9752009-06-11 20:00:41 -07001400 * @nr_bytes: number of bytes to complete @req
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05001401 *
1402 * Description:
Randy Dunlap8ebf9752009-06-11 20:00:41 -07001403 * Ends I/O on a number of bytes attached to @req, but doesn't complete
1404 * the request structure even if @req doesn't have leftover.
1405 * If @req has leftover, sets it up for the next range of segments.
Tejun Heo2e60e022009-04-23 11:05:18 +09001406 *
1407 * This special helper function is only for request stacking drivers
1408 * (e.g. request-based dm) so that they can handle partial completion.
Pavel Begunkov3a211b72019-05-23 18:43:11 +03001409 * Actual device drivers should use blk_mq_end_request instead.
Tejun Heo2e60e022009-04-23 11:05:18 +09001410 *
1411 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1412 * %false return from this function.
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05001413 *
Bart Van Assche1954e9a2018-06-27 13:09:05 -07001414 * Note:
1415 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
1416 * blk_rq_bytes() and in blk_update_request().
1417 *
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05001418 * Return:
Tejun Heo2e60e022009-04-23 11:05:18 +09001419 * %false - this request doesn't have any more data
1420 * %true - this request has more data
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05001421 **/
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02001422bool blk_update_request(struct request *req, blk_status_t error,
1423 unsigned int nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424{
Kent Overstreetf79ea412012-09-20 16:38:30 -07001425 int total_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02001427 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
Hannes Reinecke4a0efdc2014-10-01 14:32:31 +02001428
Tejun Heo2e60e022009-04-23 11:05:18 +09001429 if (!req->bio)
1430 return false;
1431
Max Gurtovoy54d4e6a2019-09-16 18:44:29 +03001432#ifdef CONFIG_BLK_DEV_INTEGRITY
1433 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1434 error == BLK_STS_OK)
1435 req->q->integrity.profile->complete_fn(req, nr_bytes);
1436#endif
1437
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02001438 if (unlikely(error && !blk_rq_is_passthrough(req) &&
1439 !(req->rq_flags & RQF_QUIET)))
Christoph Hellwig178cc592019-06-20 10:59:15 -07001440 print_req_error(req, error, __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
Jens Axboebc58ba92009-01-23 10:54:44 +01001442 blk_account_io_completion(req, nr_bytes);
Jens Axboed72d9042005-11-01 08:35:42 +01001443
Kent Overstreetf79ea412012-09-20 16:38:30 -07001444 total_bytes = 0;
1445 while (req->bio) {
1446 struct bio *bio = req->bio;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001447 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
Bart Van Assche9c24c102018-06-19 10:26:40 -07001449 if (bio_bytes == bio->bi_iter.bi_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 req->bio = bio->bi_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
NeilBrownfbbaf702017-04-07 09:40:52 -06001452 /* Completion has already been traced */
1453 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
Kent Overstreetf79ea412012-09-20 16:38:30 -07001454 req_bio_endio(req, bio, bio_bytes, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
Kent Overstreetf79ea412012-09-20 16:38:30 -07001456 total_bytes += bio_bytes;
1457 nr_bytes -= bio_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
Kent Overstreetf79ea412012-09-20 16:38:30 -07001459 if (!nr_bytes)
1460 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 }
1462
1463 /*
1464 * completely done
1465 */
Tejun Heo2e60e022009-04-23 11:05:18 +09001466 if (!req->bio) {
1467 /*
1468 * Reset counters so that the request stacking driver
1469 * can find how many bytes remain in the request
1470 * later.
1471 */
Tejun Heoa2dec7b2009-05-07 22:24:44 +09001472 req->__data_len = 0;
Tejun Heo2e60e022009-04-23 11:05:18 +09001473 return false;
1474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
Tejun Heoa2dec7b2009-05-07 22:24:44 +09001476 req->__data_len -= total_bytes;
Tejun Heo2e46e8b2009-05-07 22:24:41 +09001477
1478 /* update sector only for requests with clear definition of sector */
Christoph Hellwig57292b52017-01-31 16:57:29 +01001479 if (!blk_rq_is_passthrough(req))
Tejun Heoa2dec7b2009-05-07 22:24:44 +09001480 req->__sector += total_bytes >> 9;
Tejun Heo2e46e8b2009-05-07 22:24:41 +09001481
Tejun Heo80a761f2009-07-03 17:48:17 +09001482 /* mixed attributes always follow the first bio */
Christoph Hellwige8064022016-10-20 15:12:13 +02001483 if (req->rq_flags & RQF_MIXED_MERGE) {
Tejun Heo80a761f2009-07-03 17:48:17 +09001484 req->cmd_flags &= ~REQ_FAILFAST_MASK;
Jens Axboe1eff9d32016-08-05 15:35:16 -06001485 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
Tejun Heo80a761f2009-07-03 17:48:17 +09001486 }
1487
Christoph Hellwiged6565e2017-05-11 12:34:38 +02001488 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1489 /*
1490 * If total number of sectors is less than the first segment
1491 * size, something has gone terribly wrong.
1492 */
1493 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1494 blk_dump_rq_flags(req, "request botched");
1495 req->__data_len = blk_rq_cur_bytes(req);
1496 }
Tejun Heo2e46e8b2009-05-07 22:24:41 +09001497
Christoph Hellwiged6565e2017-05-11 12:34:38 +02001498 /* recalculate the number of segments */
Christoph Hellwige9cd19c2019-06-06 12:29:02 +02001499 req->nr_phys_segments = blk_recalc_rq_segments(req);
Christoph Hellwiged6565e2017-05-11 12:34:38 +02001500 }
Tejun Heo2e46e8b2009-05-07 22:24:41 +09001501
Tejun Heo2e60e022009-04-23 11:05:18 +09001502 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503}
Tejun Heo2e60e022009-04-23 11:05:18 +09001504EXPORT_SYMBOL_GPL(blk_update_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
Ilya Loginov2d4dc892009-11-26 09:16:19 +01001506#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1507/**
1508 * rq_flush_dcache_pages - Helper function to flush all pages in a request
1509 * @rq: the request to be flushed
1510 *
1511 * Description:
1512 * Flush all pages in @rq.
1513 */
1514void rq_flush_dcache_pages(struct request *rq)
1515{
1516 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -08001517 struct bio_vec bvec;
Ilya Loginov2d4dc892009-11-26 09:16:19 +01001518
1519 rq_for_each_segment(bvec, rq, iter)
Kent Overstreet79886132013-11-23 17:19:00 -08001520 flush_dcache_page(bvec.bv_page);
Ilya Loginov2d4dc892009-11-26 09:16:19 +01001521}
1522EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1523#endif
1524
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +02001525/**
1526 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1527 * @q : the queue of the device being checked
1528 *
1529 * Description:
1530 * Check if underlying low-level drivers of a device are busy.
1531 * If the drivers want to export their busy state, they must set own
1532 * exporting function using blk_queue_lld_busy() first.
1533 *
1534 * Basically, this function is used only by request stacking drivers
1535 * to stop dispatching requests to underlying devices when underlying
1536 * devices are busy. This behavior helps more I/O merging on the queue
1537 * of the request stacking driver and prevents I/O throughput regression
1538 * on burst I/O load.
1539 *
1540 * Return:
1541 * 0 - Not busy (The request stacking driver should dispatch request)
1542 * 1 - Busy (The request stacking driver should stop dispatching request)
1543 */
1544int blk_lld_busy(struct request_queue *q)
1545{
Jens Axboe344e9ff2018-11-15 12:22:51 -07001546 if (queue_is_mq(q) && q->mq_ops->busy)
Jens Axboe9ba20522018-10-29 10:15:10 -06001547 return q->mq_ops->busy(q);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +02001548
1549 return 0;
1550}
1551EXPORT_SYMBOL_GPL(blk_lld_busy);
1552
Mike Snitzer78d8e582015-06-26 10:01:13 -04001553/**
1554 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
1555 * @rq: the clone request to be cleaned up
1556 *
1557 * Description:
1558 * Free all bios in @rq for a cloned request.
1559 */
1560void blk_rq_unprep_clone(struct request *rq)
1561{
1562 struct bio *bio;
1563
1564 while ((bio = rq->bio) != NULL) {
1565 rq->bio = bio->bi_next;
1566
1567 bio_put(bio);
1568 }
1569}
1570EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1571
Mike Snitzer78d8e582015-06-26 10:01:13 -04001572/**
1573 * blk_rq_prep_clone - Helper function to setup clone request
1574 * @rq: the request to be setup
1575 * @rq_src: original request to be cloned
1576 * @bs: bio_set that bios for clone are allocated from
1577 * @gfp_mask: memory allocation mask for bio
1578 * @bio_ctr: setup function to be called for each clone bio.
1579 * Returns %0 for success, non %0 for failure.
1580 * @data: private data to be passed to @bio_ctr
1581 *
1582 * Description:
1583 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
Mike Snitzer78d8e582015-06-26 10:01:13 -04001584 * Also, pages which the original bios are pointing to are not copied
1585 * and the cloned bios just point same pages.
1586 * So cloned bios must be completed before original bios, which means
1587 * the caller must complete @rq before @rq_src.
1588 */
1589int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1590 struct bio_set *bs, gfp_t gfp_mask,
1591 int (*bio_ctr)(struct bio *, struct bio *, void *),
1592 void *data)
1593{
1594 struct bio *bio, *bio_src;
1595
1596 if (!bs)
Kent Overstreetf4f81542018-05-08 21:33:52 -04001597 bs = &fs_bio_set;
Mike Snitzer78d8e582015-06-26 10:01:13 -04001598
1599 __rq_for_each_bio(bio_src, rq_src) {
1600 bio = bio_clone_fast(bio_src, gfp_mask, bs);
1601 if (!bio)
1602 goto free_and_out;
1603
1604 if (bio_ctr && bio_ctr(bio, bio_src, data))
1605 goto free_and_out;
1606
1607 if (rq->bio) {
1608 rq->biotail->bi_next = bio;
1609 rq->biotail = bio;
Eric Biggers93f221a2020-09-15 20:53:14 -07001610 } else {
Mike Snitzer78d8e582015-06-26 10:01:13 -04001611 rq->bio = rq->biotail = bio;
Eric Biggers93f221a2020-09-15 20:53:14 -07001612 }
1613 bio = NULL;
Mike Snitzer78d8e582015-06-26 10:01:13 -04001614 }
1615
Guoqing Jiang361301a2020-03-09 22:41:36 +01001616 /* Copy attributes of the original request to the clone request. */
1617 rq->__sector = blk_rq_pos(rq_src);
1618 rq->__data_len = blk_rq_bytes(rq_src);
1619 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1620 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1621 rq->special_vec = rq_src->special_vec;
1622 }
1623 rq->nr_phys_segments = rq_src->nr_phys_segments;
1624 rq->ioprio = rq_src->ioprio;
Mike Snitzer78d8e582015-06-26 10:01:13 -04001625
Eric Biggers93f221a2020-09-15 20:53:14 -07001626 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1627 goto free_and_out;
Mike Snitzer78d8e582015-06-26 10:01:13 -04001628
1629 return 0;
1630
1631free_and_out:
1632 if (bio)
1633 bio_put(bio);
1634 blk_rq_unprep_clone(rq);
1635
1636 return -ENOMEM;
Kiyoshi Uedab0fd2712009-06-11 13:10:16 +02001637}
1638EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1639
Jens Axboe59c3d452014-04-08 09:15:35 -06001640int kblockd_schedule_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641{
1642 return queue_work(kblockd_workqueue, work);
1643}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644EXPORT_SYMBOL(kblockd_schedule_work);
1645
Jens Axboe818cd1c2017-04-10 09:54:55 -06001646int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1647 unsigned long delay)
1648{
1649 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1650}
1651EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1652
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001653/**
1654 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1655 * @plug: The &struct blk_plug that needs to be initialized
1656 *
1657 * Description:
Jeff Moyer40405852019-01-08 16:57:34 -05001658 * blk_start_plug() indicates to the block layer an intent by the caller
1659 * to submit multiple I/O requests in a batch. The block layer may use
1660 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1661 * is called. However, the block layer may choose to submit requests
1662 * before a call to blk_finish_plug() if the number of queued I/Os
1663 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1664 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1665 * the task schedules (see below).
1666 *
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001667 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1668 * pending I/O should the task end up blocking between blk_start_plug() and
1669 * blk_finish_plug(). This is important from a performance perspective, but
1670 * also ensures that we don't deadlock. For instance, if the task is blocking
1671 * for a memory allocation, memory reclaim could end up wanting to free a
1672 * page belonging to that request that is currently residing in our private
1673 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1674 * this kind of deadlock.
1675 */
Jens Axboe73c10102011-03-08 13:19:51 +01001676void blk_start_plug(struct blk_plug *plug)
1677{
1678 struct task_struct *tsk = current;
1679
Shaohua Lidd6cf3e2015-05-08 10:51:28 -07001680 /*
1681 * If this is a nested plug, don't actually assign it.
1682 */
1683 if (tsk->plug)
1684 return;
1685
Jens Axboe320ae512013-10-24 09:20:05 +01001686 INIT_LIST_HEAD(&plug->mq_list);
NeilBrown048c9372011-04-18 09:52:22 +02001687 INIT_LIST_HEAD(&plug->cb_list);
Jens Axboe5f0ed772018-11-23 22:04:33 -07001688 plug->rq_count = 0;
Jens Axboece5b0092018-11-27 17:13:56 -07001689 plug->multiple_queues = false;
Jens Axboe5a473e82020-06-04 11:23:39 -06001690 plug->nowait = false;
Jens Axboe5f0ed772018-11-23 22:04:33 -07001691
Jens Axboe73c10102011-03-08 13:19:51 +01001692 /*
Shaohua Lidd6cf3e2015-05-08 10:51:28 -07001693 * Store ordering should not be needed here, since a potential
1694 * preempt will imply a full memory barrier
Jens Axboe73c10102011-03-08 13:19:51 +01001695 */
Shaohua Lidd6cf3e2015-05-08 10:51:28 -07001696 tsk->plug = plug;
Jens Axboe73c10102011-03-08 13:19:51 +01001697}
1698EXPORT_SYMBOL(blk_start_plug);
1699
NeilBrown74018dc2012-07-31 09:08:15 +02001700static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
NeilBrown048c9372011-04-18 09:52:22 +02001701{
1702 LIST_HEAD(callbacks);
1703
Shaohua Li2a7d5552012-07-31 09:08:15 +02001704 while (!list_empty(&plug->cb_list)) {
1705 list_splice_init(&plug->cb_list, &callbacks);
NeilBrown048c9372011-04-18 09:52:22 +02001706
Shaohua Li2a7d5552012-07-31 09:08:15 +02001707 while (!list_empty(&callbacks)) {
1708 struct blk_plug_cb *cb = list_first_entry(&callbacks,
NeilBrown048c9372011-04-18 09:52:22 +02001709 struct blk_plug_cb,
1710 list);
Shaohua Li2a7d5552012-07-31 09:08:15 +02001711 list_del(&cb->list);
NeilBrown74018dc2012-07-31 09:08:15 +02001712 cb->callback(cb, from_schedule);
Shaohua Li2a7d5552012-07-31 09:08:15 +02001713 }
NeilBrown048c9372011-04-18 09:52:22 +02001714 }
1715}
1716
NeilBrown9cbb1752012-07-31 09:08:14 +02001717struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1718 int size)
1719{
1720 struct blk_plug *plug = current->plug;
1721 struct blk_plug_cb *cb;
1722
1723 if (!plug)
1724 return NULL;
1725
1726 list_for_each_entry(cb, &plug->cb_list, list)
1727 if (cb->callback == unplug && cb->data == data)
1728 return cb;
1729
1730 /* Not currently on the callback list */
1731 BUG_ON(size < sizeof(*cb));
1732 cb = kzalloc(size, GFP_ATOMIC);
1733 if (cb) {
1734 cb->data = data;
1735 cb->callback = unplug;
1736 list_add(&cb->list, &plug->cb_list);
1737 }
1738 return cb;
1739}
1740EXPORT_SYMBOL(blk_check_plugged);
1741
Jens Axboe49cac012011-04-16 13:51:05 +02001742void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
Jens Axboe73c10102011-03-08 13:19:51 +01001743{
NeilBrown74018dc2012-07-31 09:08:15 +02001744 flush_plug_callbacks(plug, from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001745
1746 if (!list_empty(&plug->mq_list))
1747 blk_mq_flush_plug_list(plug, from_schedule);
Jens Axboe73c10102011-03-08 13:19:51 +01001748}
Jens Axboe73c10102011-03-08 13:19:51 +01001749
Jeff Moyer40405852019-01-08 16:57:34 -05001750/**
1751 * blk_finish_plug - mark the end of a batch of submitted I/O
1752 * @plug: The &struct blk_plug passed to blk_start_plug()
1753 *
1754 * Description:
1755 * Indicate that a batch of I/O submissions is complete. This function
1756 * must be paired with an initial call to blk_start_plug(). The intent
1757 * is to allow the block layer to optimize I/O submission. See the
1758 * documentation for blk_start_plug() for more information.
1759 */
Jens Axboe73c10102011-03-08 13:19:51 +01001760void blk_finish_plug(struct blk_plug *plug)
1761{
Shaohua Lidd6cf3e2015-05-08 10:51:28 -07001762 if (plug != current->plug)
1763 return;
Jens Axboef6603782011-04-15 15:49:07 +02001764 blk_flush_plug_list(plug, false);
Christoph Hellwig88b996c2011-04-15 15:20:10 +02001765
Shaohua Lidd6cf3e2015-05-08 10:51:28 -07001766 current->plug = NULL;
Jens Axboe73c10102011-03-08 13:19:51 +01001767}
1768EXPORT_SYMBOL(blk_finish_plug);
1769
Ming Lei71ac8602020-05-14 16:45:09 +08001770void blk_io_schedule(void)
1771{
1772 /* Prevent hang_check timer from firing at us during very long I/O */
1773 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1774
1775 if (timeout)
1776 io_schedule_timeout(timeout);
1777 else
1778 io_schedule();
1779}
1780EXPORT_SYMBOL_GPL(blk_io_schedule);
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782int __init blk_dev_init(void)
1783{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001784 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1785 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
Pankaj Bharadiyac5936422019-12-09 10:31:43 -08001786 sizeof_field(struct request, cmd_flags));
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001787 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
Pankaj Bharadiyac5936422019-12-09 10:31:43 -08001788 sizeof_field(struct bio, bi_opf));
Nikanth Karthikesan9eb55b02009-04-27 14:53:54 +02001789
Tejun Heo89b90be2011-01-03 15:01:47 +01001790 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1791 kblockd_workqueue = alloc_workqueue("kblockd",
Matias Bjørling28747fc2014-06-11 23:43:54 +02001792 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 if (!kblockd_workqueue)
1794 panic("Failed to create kblockd\n");
1795
Ilya Dryomovc2789bd2015-11-20 22:16:46 +01001796 blk_requestq_cachep = kmem_cache_create("request_queue",
Jens Axboe165125e2007-07-24 09:28:11 +02001797 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
Omar Sandoval18fbda92017-01-31 14:53:20 -08001799 blk_debugfs_root = debugfs_create_dir("block", NULL);
Omar Sandoval18fbda92017-01-31 14:53:20 -08001800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 return 0;
1802}