Christoph Hellwig | 3dcf60b | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 4 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics |
| 5 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE |
| 6 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 7 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> |
| 8 | * - July2000 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 |
| 10 | */ |
| 11 | |
| 12 | /* |
| 13 | * This handles all read/write requests to block devices |
| 14 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/bio.h> |
| 18 | #include <linux/blkdev.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 19 | #include <linux/blk-mq.h> |
Alan Stern | 52abca6 | 2020-12-08 21:29:51 -0800 | [diff] [blame] | 20 | #include <linux/blk-pm.h> |
Christoph Hellwig | fe45e63 | 2021-09-20 14:33:27 +0200 | [diff] [blame] | 21 | #include <linux/blk-integrity.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/highmem.h> |
| 23 | #include <linux/mm.h> |
Matthew Wilcox (Oracle) | cee9a0c | 2020-06-01 21:46:07 -0700 | [diff] [blame] | 24 | #include <linux/pagemap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/kernel_stat.h> |
| 26 | #include <linux/string.h> |
| 27 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/completion.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/swap.h> |
| 31 | #include <linux/writeback.h> |
Andrew Morton | faccbd4b | 2006-12-10 02:19:35 -0800 | [diff] [blame] | 32 | #include <linux/task_io_accounting_ops.h> |
Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 33 | #include <linux/fault-inject.h> |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 34 | #include <linux/list_sort.h> |
Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 35 | #include <linux/delay.h> |
Tejun Heo | aaf7c68 | 2012-04-19 16:29:22 -0700 | [diff] [blame] | 36 | #include <linux/ratelimit.h> |
Lin Ming | 6c95466 | 2013-03-23 11:42:26 +0800 | [diff] [blame] | 37 | #include <linux/pm_runtime.h> |
Tejun Heo | eea8f41 | 2015-05-22 17:13:17 -0400 | [diff] [blame] | 38 | #include <linux/blk-cgroup.h> |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 39 | #include <linux/t10-pi.h> |
Omar Sandoval | 18fbda9 | 2017-01-31 14:53:20 -0800 | [diff] [blame] | 40 | #include <linux/debugfs.h> |
Howard McLauchlan | 30abb3a | 2018-02-06 14:05:39 -0800 | [diff] [blame] | 41 | #include <linux/bpf.h> |
Johannes Weiner | b8e24a9 | 2019-08-08 15:03:00 -0400 | [diff] [blame] | 42 | #include <linux/psi.h> |
Ming Lei | 71ac860 | 2020-05-14 16:45:09 +0800 | [diff] [blame] | 43 | #include <linux/sched/sysctl.h> |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 44 | #include <linux/blk-crypto.h> |
Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 45 | |
| 46 | #define CREATE_TRACE_POINTS |
| 47 | #include <trace/events/block.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 49 | #include "blk.h" |
Ming Lei | 43a5e4e | 2013-12-26 21:31:35 +0800 | [diff] [blame] | 50 | #include "blk-mq.h" |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 51 | #include "blk-mq-sched.h" |
Bart Van Assche | bca6b06 | 2018-09-26 14:01:03 -0700 | [diff] [blame] | 52 | #include "blk-pm.h" |
Jens Axboe | a7b36ee | 2021-10-05 09:11:56 -0600 | [diff] [blame] | 53 | #include "blk-throttle.h" |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 54 | |
Omar Sandoval | 18fbda9 | 2017-01-31 14:53:20 -0800 | [diff] [blame] | 55 | struct dentry *blk_debugfs_root; |
Omar Sandoval | 18fbda9 | 2017-01-31 14:53:20 -0800 | [diff] [blame] | 56 | |
Mike Snitzer | d07335e | 2010-11-16 12:52:38 +0100 | [diff] [blame] | 57 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); |
Jun'ichi Nomura | b0da3f0 | 2009-10-01 21:16:13 +0200 | [diff] [blame] | 58 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
Linus Torvalds | 0a82a8d | 2013-04-18 09:00:26 -0700 | [diff] [blame] | 59 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); |
Keith Busch | 3291fa5 | 2014-04-28 12:30:52 -0600 | [diff] [blame] | 60 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); |
NeilBrown | cbae8d4 | 2012-12-14 20:49:27 +0100 | [diff] [blame] | 61 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); |
Chaitanya Kulkarni | b357e4a | 2021-02-21 21:29:59 -0800 | [diff] [blame] | 62 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert); |
Ingo Molnar | 0bfc245 | 2008-11-26 11:59:56 +0100 | [diff] [blame] | 63 | |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 64 | DEFINE_IDA(blk_queue_ida); |
| 65 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | * For queue allocation |
| 68 | */ |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 69 | struct kmem_cache *blk_requestq_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | |
| 71 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | * Controlling structure to kblockd |
| 73 | */ |
Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 74 | static struct workqueue_struct *kblockd_workqueue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 76 | /** |
| 77 | * blk_queue_flag_set - atomically set a queue flag |
| 78 | * @flag: flag to be set |
| 79 | * @q: request queue |
| 80 | */ |
| 81 | void blk_queue_flag_set(unsigned int flag, struct request_queue *q) |
| 82 | { |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 83 | set_bit(flag, &q->queue_flags); |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 84 | } |
| 85 | EXPORT_SYMBOL(blk_queue_flag_set); |
| 86 | |
| 87 | /** |
| 88 | * blk_queue_flag_clear - atomically clear a queue flag |
| 89 | * @flag: flag to be cleared |
| 90 | * @q: request queue |
| 91 | */ |
| 92 | void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) |
| 93 | { |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 94 | clear_bit(flag, &q->queue_flags); |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 95 | } |
| 96 | EXPORT_SYMBOL(blk_queue_flag_clear); |
| 97 | |
| 98 | /** |
| 99 | * blk_queue_flag_test_and_set - atomically test and set a queue flag |
| 100 | * @flag: flag to be set |
| 101 | * @q: request queue |
| 102 | * |
| 103 | * Returns the previous value of @flag - 0 if the flag was not set and 1 if |
| 104 | * the flag was already set. |
| 105 | */ |
| 106 | bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) |
| 107 | { |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 108 | return test_and_set_bit(flag, &q->queue_flags); |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 109 | } |
| 110 | EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); |
| 111 | |
FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 112 | void blk_rq_init(struct request_queue *q, struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | { |
FUJITA Tomonori | 1afb20f | 2008-04-25 12:26:28 +0200 | [diff] [blame] | 114 | memset(rq, 0, sizeof(*rq)); |
| 115 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | INIT_LIST_HEAD(&rq->queuelist); |
Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 117 | rq->q = q; |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 118 | rq->__sector = (sector_t) -1; |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 119 | INIT_HLIST_NODE(&rq->hash); |
| 120 | RB_CLEAR_NODE(&rq->rb_node); |
Xianting Tian | e44a6a2 | 2020-08-27 14:34:17 +0800 | [diff] [blame] | 121 | rq->tag = BLK_MQ_NO_TAG; |
| 122 | rq->internal_tag = BLK_MQ_NO_TAG; |
Omar Sandoval | 522a777 | 2018-05-09 02:08:53 -0700 | [diff] [blame] | 123 | rq->start_time_ns = ktime_get_ns(); |
Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 124 | rq->part = NULL; |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 125 | blk_crypto_rq_set_defaults(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | } |
FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 127 | EXPORT_SYMBOL(blk_rq_init); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
Chaitanya Kulkarni | e47bc4e | 2019-06-20 10:59:16 -0700 | [diff] [blame] | 129 | #define REQ_OP_NAME(name) [REQ_OP_##name] = #name |
| 130 | static const char *const blk_op_name[] = { |
| 131 | REQ_OP_NAME(READ), |
| 132 | REQ_OP_NAME(WRITE), |
| 133 | REQ_OP_NAME(FLUSH), |
| 134 | REQ_OP_NAME(DISCARD), |
| 135 | REQ_OP_NAME(SECURE_ERASE), |
| 136 | REQ_OP_NAME(ZONE_RESET), |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 137 | REQ_OP_NAME(ZONE_RESET_ALL), |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 138 | REQ_OP_NAME(ZONE_OPEN), |
| 139 | REQ_OP_NAME(ZONE_CLOSE), |
| 140 | REQ_OP_NAME(ZONE_FINISH), |
Keith Busch | 0512a75 | 2020-05-12 17:55:47 +0900 | [diff] [blame] | 141 | REQ_OP_NAME(ZONE_APPEND), |
Chaitanya Kulkarni | e47bc4e | 2019-06-20 10:59:16 -0700 | [diff] [blame] | 142 | REQ_OP_NAME(WRITE_SAME), |
| 143 | REQ_OP_NAME(WRITE_ZEROES), |
Chaitanya Kulkarni | e47bc4e | 2019-06-20 10:59:16 -0700 | [diff] [blame] | 144 | REQ_OP_NAME(DRV_IN), |
| 145 | REQ_OP_NAME(DRV_OUT), |
| 146 | }; |
| 147 | #undef REQ_OP_NAME |
| 148 | |
| 149 | /** |
| 150 | * blk_op_str - Return string XXX in the REQ_OP_XXX. |
| 151 | * @op: REQ_OP_XXX. |
| 152 | * |
| 153 | * Description: Centralize block layer function to convert REQ_OP_XXX into |
| 154 | * string format. Useful in the debugging and tracing bio or request. For |
| 155 | * invalid REQ_OP_XXX it returns string "UNKNOWN". |
| 156 | */ |
| 157 | inline const char *blk_op_str(unsigned int op) |
| 158 | { |
| 159 | const char *op_str = "UNKNOWN"; |
| 160 | |
| 161 | if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) |
| 162 | op_str = blk_op_name[op]; |
| 163 | |
| 164 | return op_str; |
| 165 | } |
| 166 | EXPORT_SYMBOL_GPL(blk_op_str); |
| 167 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 168 | static const struct { |
| 169 | int errno; |
| 170 | const char *name; |
| 171 | } blk_errors[] = { |
| 172 | [BLK_STS_OK] = { 0, "" }, |
| 173 | [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, |
| 174 | [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, |
| 175 | [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, |
| 176 | [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, |
| 177 | [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, |
| 178 | [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, |
| 179 | [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, |
| 180 | [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, |
| 181 | [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, |
Ming Lei | 86ff7c2 | 2018-01-30 22:04:57 -0500 | [diff] [blame] | 182 | [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, |
Goldwyn Rodrigues | 03a07c9 | 2017-06-20 07:05:46 -0500 | [diff] [blame] | 183 | [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 184 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 185 | /* device mapper special case, should not leak out: */ |
| 186 | [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, |
| 187 | |
Keith Busch | 3b481d9 | 2020-09-24 13:53:28 -0700 | [diff] [blame] | 188 | /* zone device specific errors */ |
| 189 | [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, |
| 190 | [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, |
| 191 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 192 | /* everything else not covered above: */ |
| 193 | [BLK_STS_IOERR] = { -EIO, "I/O" }, |
| 194 | }; |
| 195 | |
| 196 | blk_status_t errno_to_blk_status(int errno) |
| 197 | { |
| 198 | int i; |
| 199 | |
| 200 | for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { |
| 201 | if (blk_errors[i].errno == errno) |
| 202 | return (__force blk_status_t)i; |
| 203 | } |
| 204 | |
| 205 | return BLK_STS_IOERR; |
| 206 | } |
| 207 | EXPORT_SYMBOL_GPL(errno_to_blk_status); |
| 208 | |
| 209 | int blk_status_to_errno(blk_status_t status) |
| 210 | { |
| 211 | int idx = (__force int)status; |
| 212 | |
Bart Van Assche | 34bd9c1 | 2017-06-21 10:55:46 -0700 | [diff] [blame] | 213 | if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 214 | return -EIO; |
| 215 | return blk_errors[idx].errno; |
| 216 | } |
| 217 | EXPORT_SYMBOL_GPL(blk_status_to_errno); |
| 218 | |
Jens Axboe | 9be3e06 | 2021-10-14 09:17:01 -0600 | [diff] [blame] | 219 | void blk_print_req_error(struct request *req, blk_status_t status) |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 220 | { |
| 221 | int idx = (__force int)status; |
| 222 | |
Bart Van Assche | 34bd9c1 | 2017-06-21 10:55:46 -0700 | [diff] [blame] | 223 | if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 224 | return; |
| 225 | |
Christoph Hellwig | 178cc59 | 2019-06-20 10:59:15 -0700 | [diff] [blame] | 226 | printk_ratelimited(KERN_ERR |
Jens Axboe | c477b79 | 2021-10-14 09:15:40 -0600 | [diff] [blame] | 227 | "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " |
Chaitanya Kulkarni | b0e5168 | 2019-06-20 10:59:18 -0700 | [diff] [blame] | 228 | "phys_seg %u prio class %u\n", |
Jens Axboe | c477b79 | 2021-10-14 09:15:40 -0600 | [diff] [blame] | 229 | blk_errors[idx].name, |
Chaitanya Kulkarni | b0e5168 | 2019-06-20 10:59:18 -0700 | [diff] [blame] | 230 | req->rq_disk ? req->rq_disk->disk_name : "?", |
| 231 | blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), |
| 232 | req->cmd_flags & ~REQ_OP_MASK, |
| 233 | req->nr_phys_segments, |
| 234 | IOPRIO_PRIO_CLASS(req->ioprio)); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 235 | } |
| 236 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | void blk_dump_rq_flags(struct request *rq, char *msg) |
| 238 | { |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 239 | printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, |
| 240 | rq->rq_disk ? rq->rq_disk->disk_name : "?", |
Jens Axboe | 5953316 | 2013-05-23 12:25:08 +0200 | [diff] [blame] | 241 | (unsigned long long) rq->cmd_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 243 | printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", |
| 244 | (unsigned long long)blk_rq_pos(rq), |
| 245 | blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); |
Jens Axboe | b4f42e2 | 2014-04-10 09:46:28 -0600 | [diff] [blame] | 246 | printk(KERN_INFO " bio %p, biotail %p, len %u\n", |
| 247 | rq->bio, rq->biotail, blk_rq_bytes(rq)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | EXPORT_SYMBOL(blk_dump_rq_flags); |
| 250 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | /** |
| 252 | * blk_sync_queue - cancel any pending callbacks on a queue |
| 253 | * @q: the queue |
| 254 | * |
| 255 | * Description: |
| 256 | * The block layer may perform asynchronous callback activity |
| 257 | * on a queue, such as calling the unplug function after a timeout. |
| 258 | * A block device may call blk_sync_queue to ensure that any |
| 259 | * such activity is cancelled, thus allowing it to release resources |
Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 260 | * that the callbacks might use. The caller must already have made sure |
Christoph Hellwig | c62b37d | 2020-07-01 10:59:43 +0200 | [diff] [blame] | 261 | * that its ->submit_bio will not re-add plugging prior to calling |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | * this function. |
| 263 | * |
Vivek Goyal | da52777 | 2011-03-02 19:05:33 -0500 | [diff] [blame] | 264 | * This function does not cancel any asynchronous activity arising |
Masanari Iida | da3dae5 | 2014-09-09 01:27:23 +0900 | [diff] [blame] | 265 | * out of elevator or throttling code. That would require elevator_exit() |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 266 | * and blkcg_exit_queue() to be called with queue lock initialized. |
Vivek Goyal | da52777 | 2011-03-02 19:05:33 -0500 | [diff] [blame] | 267 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | */ |
| 269 | void blk_sync_queue(struct request_queue *q) |
| 270 | { |
Jens Axboe | 70ed28b | 2008-11-19 14:38:39 +0100 | [diff] [blame] | 271 | del_timer_sync(&q->timeout); |
Bart Van Assche | 4e9b6f2 | 2017-10-19 10:00:48 -0700 | [diff] [blame] | 272 | cancel_work_sync(&q->timeout_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | } |
| 274 | EXPORT_SYMBOL(blk_sync_queue); |
| 275 | |
| 276 | /** |
Bart Van Assche | cd84a62 | 2018-09-26 14:01:04 -0700 | [diff] [blame] | 277 | * blk_set_pm_only - increment pm_only counter |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 278 | * @q: request queue pointer |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 279 | */ |
Bart Van Assche | cd84a62 | 2018-09-26 14:01:04 -0700 | [diff] [blame] | 280 | void blk_set_pm_only(struct request_queue *q) |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 281 | { |
Bart Van Assche | cd84a62 | 2018-09-26 14:01:04 -0700 | [diff] [blame] | 282 | atomic_inc(&q->pm_only); |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 283 | } |
Bart Van Assche | cd84a62 | 2018-09-26 14:01:04 -0700 | [diff] [blame] | 284 | EXPORT_SYMBOL_GPL(blk_set_pm_only); |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 285 | |
Bart Van Assche | cd84a62 | 2018-09-26 14:01:04 -0700 | [diff] [blame] | 286 | void blk_clear_pm_only(struct request_queue *q) |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 287 | { |
Bart Van Assche | cd84a62 | 2018-09-26 14:01:04 -0700 | [diff] [blame] | 288 | int pm_only; |
| 289 | |
| 290 | pm_only = atomic_dec_return(&q->pm_only); |
| 291 | WARN_ON_ONCE(pm_only < 0); |
| 292 | if (pm_only == 0) |
| 293 | wake_up_all(&q->mq_freeze_wq); |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 294 | } |
Bart Van Assche | cd84a62 | 2018-09-26 14:01:04 -0700 | [diff] [blame] | 295 | EXPORT_SYMBOL_GPL(blk_clear_pm_only); |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 296 | |
Luis Chamberlain | b5bd357 | 2020-06-19 20:47:23 +0000 | [diff] [blame] | 297 | /** |
| 298 | * blk_put_queue - decrement the request_queue refcount |
| 299 | * @q: the request_queue structure to decrement the refcount for |
| 300 | * |
| 301 | * Decrements the refcount of the request_queue kobject. When this reaches 0 |
| 302 | * we'll have blk_release_queue() called. |
Luis Chamberlain | e8c7d14 | 2020-06-19 20:47:25 +0000 | [diff] [blame] | 303 | * |
| 304 | * Context: Any context, but the last reference must not be dropped from |
| 305 | * atomic context. |
Luis Chamberlain | b5bd357 | 2020-06-19 20:47:23 +0000 | [diff] [blame] | 306 | */ |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 307 | void blk_put_queue(struct request_queue *q) |
Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 308 | { |
| 309 | kobject_put(&q->kobj); |
| 310 | } |
Jens Axboe | d86e0e8 | 2011-05-27 07:44:43 +0200 | [diff] [blame] | 311 | EXPORT_SYMBOL(blk_put_queue); |
Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 312 | |
Christoph Hellwig | 8e141f9 | 2021-09-29 09:12:40 +0200 | [diff] [blame] | 313 | void blk_queue_start_drain(struct request_queue *q) |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 314 | { |
Ming Lei | d3cfb2a | 2017-03-27 20:06:58 +0800 | [diff] [blame] | 315 | /* |
| 316 | * When queue DYING flag is set, we need to block new req |
| 317 | * entering queue, so we call blk_freeze_queue_start() to |
| 318 | * prevent I/O from crossing blk_queue_enter(). |
| 319 | */ |
| 320 | blk_freeze_queue_start(q); |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 321 | if (queue_is_mq(q)) |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 322 | blk_mq_wake_waiters(q); |
Ming Lei | 055f6e1 | 2017-11-09 10:49:53 -0800 | [diff] [blame] | 323 | /* Make blk_queue_enter() reexamine the DYING flag. */ |
| 324 | wake_up_all(&q->mq_freeze_wq); |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 325 | } |
Christoph Hellwig | 8e141f9 | 2021-09-29 09:12:40 +0200 | [diff] [blame] | 326 | |
| 327 | void blk_set_queue_dying(struct request_queue *q) |
| 328 | { |
| 329 | blk_queue_flag_set(QUEUE_FLAG_DYING, q); |
| 330 | blk_queue_start_drain(q); |
| 331 | } |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 332 | EXPORT_SYMBOL_GPL(blk_set_queue_dying); |
| 333 | |
Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 334 | /** |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 335 | * blk_cleanup_queue - shutdown a request queue |
| 336 | * @q: request queue to shutdown |
| 337 | * |
Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 338 | * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and |
| 339 | * put it. All future requests will be failed immediately with -ENODEV. |
Luis Chamberlain | e8c7d14 | 2020-06-19 20:47:25 +0000 | [diff] [blame] | 340 | * |
| 341 | * Context: can sleep |
Vivek Goyal | c94a96a | 2011-03-02 19:04:42 -0500 | [diff] [blame] | 342 | */ |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 343 | void blk_cleanup_queue(struct request_queue *q) |
Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 344 | { |
Luis Chamberlain | e8c7d14 | 2020-06-19 20:47:25 +0000 | [diff] [blame] | 345 | /* cannot be called from atomic context */ |
| 346 | might_sleep(); |
| 347 | |
Bart Van Assche | bae85c1 | 2019-09-30 16:00:43 -0700 | [diff] [blame] | 348 | WARN_ON_ONCE(blk_queue_registered(q)); |
| 349 | |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 350 | /* mark @q DYING, no new request or merges will be allowed afterwards */ |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 351 | blk_set_queue_dying(q); |
Tejun Heo | 6ecf23a | 2012-03-05 13:14:59 -0800 | [diff] [blame] | 352 | |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 353 | blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
| 354 | blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 355 | |
Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 356 | /* |
| 357 | * Drain all requests queued before DYING marking. Set DEAD flag to |
Bart Van Assche | 67ed8b7 | 2019-08-01 15:39:55 -0700 | [diff] [blame] | 358 | * prevent that blk_mq_run_hw_queues() accesses the hardware queues |
| 359 | * after draining finished. |
Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 360 | */ |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 361 | blk_freeze_queue(q); |
Ming Lei | c57cdf7 | 2018-10-24 21:18:09 +0800 | [diff] [blame] | 362 | |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 363 | blk_queue_flag_set(QUEUE_FLAG_DEAD, q); |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 364 | |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 365 | blk_sync_queue(q); |
Ming Lei | 2a19b28 | 2021-11-16 09:43:43 +0800 | [diff] [blame] | 366 | if (queue_is_mq(q)) { |
| 367 | blk_mq_cancel_work_sync(q); |
Ming Lei | c7e2d94 | 2019-04-30 09:52:25 +0800 | [diff] [blame] | 368 | blk_mq_exit_queue(q); |
Ming Lei | 2a19b28 | 2021-11-16 09:43:43 +0800 | [diff] [blame] | 369 | } |
Jens Axboe | a1ce35f | 2018-10-29 10:23:51 -0600 | [diff] [blame] | 370 | |
Ming Lei | c3e2219 | 2019-06-04 21:08:02 +0800 | [diff] [blame] | 371 | /* |
| 372 | * In theory, request pool of sched_tags belongs to request queue. |
| 373 | * However, the current implementation requires tag_set for freeing |
| 374 | * requests, so free the pool now. |
| 375 | * |
| 376 | * Queue has become frozen, there can't be any in-queue requests, so |
| 377 | * it is safe to free requests now. |
| 378 | */ |
| 379 | mutex_lock(&q->sysfs_lock); |
| 380 | if (q->elevator) |
John Garry | 1820f4f | 2021-10-05 18:23:31 +0800 | [diff] [blame] | 381 | blk_mq_sched_free_rqs(q); |
Ming Lei | c3e2219 | 2019-06-04 21:08:02 +0800 | [diff] [blame] | 382 | mutex_unlock(&q->sysfs_lock); |
| 383 | |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 384 | percpu_ref_exit(&q->q_usage_counter); |
Bart Van Assche | 45a9c9d | 2014-12-09 16:57:48 +0100 | [diff] [blame] | 385 | |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 386 | /* @q is and will stay empty, shutdown and put */ |
Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 387 | blk_put_queue(q); |
| 388 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | EXPORT_SYMBOL(blk_cleanup_queue); |
| 390 | |
Bart Van Assche | 3a0a529 | 2017-11-09 10:49:58 -0800 | [diff] [blame] | 391 | /** |
| 392 | * blk_queue_enter() - try to increase q->q_usage_counter |
| 393 | * @q: request queue pointer |
Bart Van Assche | a4d34da | 2020-12-08 21:29:50 -0800 | [diff] [blame] | 394 | * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM |
Bart Van Assche | 3a0a529 | 2017-11-09 10:49:58 -0800 | [diff] [blame] | 395 | */ |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 396 | int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 397 | { |
Bart Van Assche | a4d34da | 2020-12-08 21:29:50 -0800 | [diff] [blame] | 398 | const bool pm = flags & BLK_MQ_REQ_PM; |
Bart Van Assche | 3a0a529 | 2017-11-09 10:49:58 -0800 | [diff] [blame] | 399 | |
Christoph Hellwig | 1f14a09 | 2021-09-29 09:12:38 +0200 | [diff] [blame] | 400 | while (!blk_try_enter_queue(q, pm)) { |
Bart Van Assche | 3a0a529 | 2017-11-09 10:49:58 -0800 | [diff] [blame] | 401 | if (flags & BLK_MQ_REQ_NOWAIT) |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 402 | return -EBUSY; |
| 403 | |
Ming Lei | 5ed61d3 | 2017-03-27 20:06:56 +0800 | [diff] [blame] | 404 | /* |
Christoph Hellwig | 1f14a09 | 2021-09-29 09:12:38 +0200 | [diff] [blame] | 405 | * read pair of barrier in blk_freeze_queue_start(), we need to |
| 406 | * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and |
| 407 | * reading .mq_freeze_depth or queue dying flag, otherwise the |
| 408 | * following wait may never return if the two reads are |
| 409 | * reordered. |
Ming Lei | 5ed61d3 | 2017-03-27 20:06:56 +0800 | [diff] [blame] | 410 | */ |
| 411 | smp_rmb(); |
Alan Jenkins | 1dc3039 | 2018-04-12 19:11:58 +0100 | [diff] [blame] | 412 | wait_event(q->mq_freeze_wq, |
Bob Liu | 7996a8b | 2019-05-21 11:25:55 +0800 | [diff] [blame] | 413 | (!q->mq_freeze_depth && |
Alan Stern | 52abca6 | 2020-12-08 21:29:51 -0800 | [diff] [blame] | 414 | blk_pm_resume_queue(pm, q)) || |
Alan Jenkins | 1dc3039 | 2018-04-12 19:11:58 +0100 | [diff] [blame] | 415 | blk_queue_dying(q)); |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 416 | if (blk_queue_dying(q)) |
| 417 | return -ENODEV; |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 418 | } |
Christoph Hellwig | 1f14a09 | 2021-09-29 09:12:38 +0200 | [diff] [blame] | 419 | |
| 420 | return 0; |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 421 | } |
| 422 | |
Jens Axboe | c98cb5b | 2021-11-04 12:45:51 -0600 | [diff] [blame] | 423 | int __bio_queue_enter(struct request_queue *q, struct bio *bio) |
Christoph Hellwig | accea32 | 2020-04-28 13:27:56 +0200 | [diff] [blame] | 424 | { |
Christoph Hellwig | a674153 | 2021-09-29 09:12:39 +0200 | [diff] [blame] | 425 | while (!blk_try_enter_queue(q, false)) { |
Pavel Begunkov | eab4e02 | 2021-10-14 15:03:29 +0100 | [diff] [blame] | 426 | struct gendisk *disk = bio->bi_bdev->bd_disk; |
| 427 | |
Christoph Hellwig | a674153 | 2021-09-29 09:12:39 +0200 | [diff] [blame] | 428 | if (bio->bi_opf & REQ_NOWAIT) { |
Christoph Hellwig | 8e141f9 | 2021-09-29 09:12:40 +0200 | [diff] [blame] | 429 | if (test_bit(GD_DEAD, &disk->state)) |
Christoph Hellwig | a674153 | 2021-09-29 09:12:39 +0200 | [diff] [blame] | 430 | goto dead; |
Christoph Hellwig | accea32 | 2020-04-28 13:27:56 +0200 | [diff] [blame] | 431 | bio_wouldblock_error(bio); |
Christoph Hellwig | a674153 | 2021-09-29 09:12:39 +0200 | [diff] [blame] | 432 | return -EBUSY; |
| 433 | } |
| 434 | |
| 435 | /* |
| 436 | * read pair of barrier in blk_freeze_queue_start(), we need to |
| 437 | * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and |
| 438 | * reading .mq_freeze_depth or queue dying flag, otherwise the |
| 439 | * following wait may never return if the two reads are |
| 440 | * reordered. |
| 441 | */ |
| 442 | smp_rmb(); |
| 443 | wait_event(q->mq_freeze_wq, |
| 444 | (!q->mq_freeze_depth && |
| 445 | blk_pm_resume_queue(false, q)) || |
Christoph Hellwig | 8e141f9 | 2021-09-29 09:12:40 +0200 | [diff] [blame] | 446 | test_bit(GD_DEAD, &disk->state)); |
| 447 | if (test_bit(GD_DEAD, &disk->state)) |
Christoph Hellwig | a674153 | 2021-09-29 09:12:39 +0200 | [diff] [blame] | 448 | goto dead; |
Christoph Hellwig | accea32 | 2020-04-28 13:27:56 +0200 | [diff] [blame] | 449 | } |
| 450 | |
Christoph Hellwig | a674153 | 2021-09-29 09:12:39 +0200 | [diff] [blame] | 451 | return 0; |
| 452 | dead: |
| 453 | bio_io_error(bio); |
| 454 | return -ENODEV; |
Christoph Hellwig | accea32 | 2020-04-28 13:27:56 +0200 | [diff] [blame] | 455 | } |
| 456 | |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 457 | void blk_queue_exit(struct request_queue *q) |
| 458 | { |
| 459 | percpu_ref_put(&q->q_usage_counter); |
| 460 | } |
| 461 | |
| 462 | static void blk_queue_usage_counter_release(struct percpu_ref *ref) |
| 463 | { |
| 464 | struct request_queue *q = |
| 465 | container_of(ref, struct request_queue, q_usage_counter); |
| 466 | |
| 467 | wake_up_all(&q->mq_freeze_wq); |
| 468 | } |
| 469 | |
Kees Cook | bca237a | 2017-08-28 15:03:41 -0700 | [diff] [blame] | 470 | static void blk_rq_timed_out_timer(struct timer_list *t) |
Christoph Hellwig | 287922e | 2015-10-30 20:57:30 +0800 | [diff] [blame] | 471 | { |
Kees Cook | bca237a | 2017-08-28 15:03:41 -0700 | [diff] [blame] | 472 | struct request_queue *q = from_timer(q, t, timeout); |
Christoph Hellwig | 287922e | 2015-10-30 20:57:30 +0800 | [diff] [blame] | 473 | |
| 474 | kblockd_schedule_work(&q->timeout_work); |
| 475 | } |
| 476 | |
Tetsuo Handa | 2e3c18d | 2019-01-30 22:21:45 +0900 | [diff] [blame] | 477 | static void blk_timeout_work(struct work_struct *work) |
| 478 | { |
| 479 | } |
| 480 | |
Christoph Hellwig | c62b37d | 2020-07-01 10:59:43 +0200 | [diff] [blame] | 481 | struct request_queue *blk_alloc_queue(int node_id) |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 482 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 483 | struct request_queue *q; |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 484 | int ret; |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 485 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 486 | q = kmem_cache_alloc_node(blk_requestq_cachep, |
Christoph Hellwig | 3d745ea | 2020-03-27 09:30:11 +0100 | [diff] [blame] | 487 | GFP_KERNEL | __GFP_ZERO, node_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | if (!q) |
| 489 | return NULL; |
| 490 | |
Christoph Hellwig | cbf62af | 2018-05-31 19:11:36 +0200 | [diff] [blame] | 491 | q->last_merge = NULL; |
Christoph Hellwig | cbf62af | 2018-05-31 19:11:36 +0200 | [diff] [blame] | 492 | |
Christoph Hellwig | 3d745ea | 2020-03-27 09:30:11 +0100 | [diff] [blame] | 493 | q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 494 | if (q->id < 0) |
Ming Lei | 3d2936f | 2014-05-27 23:35:14 +0800 | [diff] [blame] | 495 | goto fail_q; |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 496 | |
Ming Lei | c495a17 | 2021-01-11 11:05:53 +0800 | [diff] [blame] | 497 | ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0); |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 498 | if (ret) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 499 | goto fail_id; |
| 500 | |
Jens Axboe | a83b576 | 2017-03-21 17:20:01 -0600 | [diff] [blame] | 501 | q->stats = blk_alloc_queue_stats(); |
| 502 | if (!q->stats) |
Christoph Hellwig | edb0872 | 2021-08-09 16:17:43 +0200 | [diff] [blame] | 503 | goto fail_split; |
Jens Axboe | a83b576 | 2017-03-21 17:20:01 -0600 | [diff] [blame] | 504 | |
Mike Snitzer | 5151412 | 2011-11-23 10:59:13 +0100 | [diff] [blame] | 505 | q->node = node_id; |
Jens Axboe | 0989a02 | 2009-06-12 14:42:56 +0200 | [diff] [blame] | 506 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 507 | atomic_set(&q->nr_active_requests_shared_tags, 0); |
John Garry | bccf5e2 | 2020-08-19 23:20:26 +0800 | [diff] [blame] | 508 | |
Kees Cook | bca237a | 2017-08-28 15:03:41 -0700 | [diff] [blame] | 509 | timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); |
Tetsuo Handa | 2e3c18d | 2019-01-30 22:21:45 +0900 | [diff] [blame] | 510 | INIT_WORK(&q->timeout_work, blk_timeout_work); |
Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 511 | INIT_LIST_HEAD(&q->icq_list); |
Tejun Heo | 4eef304 | 2012-03-05 13:15:18 -0800 | [diff] [blame] | 512 | #ifdef CONFIG_BLK_CGROUP |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 513 | INIT_LIST_HEAD(&q->blkg_list); |
Tejun Heo | 4eef304 | 2012-03-05 13:15:18 -0800 | [diff] [blame] | 514 | #endif |
Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 515 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 516 | kobject_init(&q->kobj, &blk_queue_ktype); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | |
Luis Chamberlain | 85e0cbb | 2020-06-19 20:47:30 +0000 | [diff] [blame] | 518 | mutex_init(&q->debugfs_mutex); |
Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 519 | mutex_init(&q->sysfs_lock); |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 520 | mutex_init(&q->sysfs_dir_lock); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 521 | spin_lock_init(&q->queue_lock); |
Vivek Goyal | c94a96a | 2011-03-02 19:04:42 -0500 | [diff] [blame] | 522 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 523 | init_waitqueue_head(&q->mq_freeze_wq); |
Bob Liu | 7996a8b | 2019-05-21 11:25:55 +0800 | [diff] [blame] | 524 | mutex_init(&q->mq_freeze_lock); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 525 | |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 526 | /* |
| 527 | * Init percpu_ref in atomic mode so that it's faster to shutdown. |
| 528 | * See blk_register_queue() for details. |
| 529 | */ |
| 530 | if (percpu_ref_init(&q->q_usage_counter, |
| 531 | blk_queue_usage_counter_release, |
| 532 | PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) |
Christoph Hellwig | edb0872 | 2021-08-09 16:17:43 +0200 | [diff] [blame] | 533 | goto fail_stats; |
Tejun Heo | f51b802 | 2012-03-05 13:15:05 -0800 | [diff] [blame] | 534 | |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 535 | if (blkcg_init_queue(q)) |
| 536 | goto fail_ref; |
| 537 | |
Christoph Hellwig | 3d745ea | 2020-03-27 09:30:11 +0100 | [diff] [blame] | 538 | blk_queue_dma_alignment(q, 511); |
| 539 | blk_set_default_limits(&q->limits); |
John Garry | d2a2796 | 2021-10-05 18:23:27 +0800 | [diff] [blame] | 540 | q->nr_requests = BLKDEV_DEFAULT_RQ; |
Christoph Hellwig | 3d745ea | 2020-03-27 09:30:11 +0100 | [diff] [blame] | 541 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | return q; |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 543 | |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 544 | fail_ref: |
| 545 | percpu_ref_exit(&q->q_usage_counter); |
Jens Axboe | a83b576 | 2017-03-21 17:20:01 -0600 | [diff] [blame] | 546 | fail_stats: |
Christoph Hellwig | edb0872 | 2021-08-09 16:17:43 +0200 | [diff] [blame] | 547 | blk_free_queue_stats(q->stats); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 548 | fail_split: |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 549 | bioset_exit(&q->bio_split); |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 550 | fail_id: |
| 551 | ida_simple_remove(&blk_queue_ida, q->id); |
| 552 | fail_q: |
| 553 | kmem_cache_free(blk_requestq_cachep, q); |
| 554 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | |
Luis Chamberlain | b5bd357 | 2020-06-19 20:47:23 +0000 | [diff] [blame] | 557 | /** |
| 558 | * blk_get_queue - increment the request_queue refcount |
| 559 | * @q: the request_queue structure to increment the refcount for |
| 560 | * |
| 561 | * Increment the refcount of the request_queue kobject. |
Luis Chamberlain | 763b589 | 2020-06-19 20:47:24 +0000 | [diff] [blame] | 562 | * |
| 563 | * Context: Any context. |
Luis Chamberlain | b5bd357 | 2020-06-19 20:47:23 +0000 | [diff] [blame] | 564 | */ |
Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 565 | bool blk_get_queue(struct request_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | { |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 567 | if (likely(!blk_queue_dying(q))) { |
Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 568 | __blk_get_queue(q); |
| 569 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | } |
| 571 | |
Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 572 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | } |
Jens Axboe | d86e0e8 | 2011-05-27 07:44:43 +0200 | [diff] [blame] | 574 | EXPORT_SYMBOL(blk_get_queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | |
Christoph Hellwig | 52c5e62 | 2018-03-14 16:56:53 +0100 | [diff] [blame] | 576 | static void handle_bad_sector(struct bio *bio, sector_t maxsector) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | { |
| 578 | char b[BDEVNAME_SIZE]; |
| 579 | |
Christoph Hellwig | 8a3ee67 | 2021-09-28 07:27:55 +0200 | [diff] [blame] | 580 | pr_info_ratelimited("%s: attempt to access beyond end of device\n" |
Tetsuo Handa | f4ac712 | 2020-10-08 22:37:23 +0900 | [diff] [blame] | 581 | "%s: rw=%d, want=%llu, limit=%llu\n", |
Christoph Hellwig | 8a3ee67 | 2021-09-28 07:27:55 +0200 | [diff] [blame] | 582 | current->comm, |
Tetsuo Handa | f4ac712 | 2020-10-08 22:37:23 +0900 | [diff] [blame] | 583 | bio_devname(bio, b), bio->bi_opf, |
| 584 | bio_end_sector(bio), maxsector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | } |
| 586 | |
Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 587 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 588 | |
| 589 | static DECLARE_FAULT_ATTR(fail_make_request); |
| 590 | |
| 591 | static int __init setup_fail_make_request(char *str) |
| 592 | { |
| 593 | return setup_fault_attr(&fail_make_request, str); |
| 594 | } |
| 595 | __setup("fail_make_request=", setup_fail_make_request); |
| 596 | |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 597 | static bool should_fail_request(struct block_device *part, unsigned int bytes) |
Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 598 | { |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 599 | return part->bd_make_it_fail && should_fail(&fail_make_request, bytes); |
Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 600 | } |
| 601 | |
| 602 | static int __init fail_make_request_debugfs(void) |
| 603 | { |
Akinobu Mita | dd48c08 | 2011-08-03 16:21:01 -0700 | [diff] [blame] | 604 | struct dentry *dir = fault_create_debugfs_attr("fail_make_request", |
| 605 | NULL, &fail_make_request); |
| 606 | |
Duan Jiong | 21f9fcd | 2014-04-11 15:58:56 +0800 | [diff] [blame] | 607 | return PTR_ERR_OR_ZERO(dir); |
Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 608 | } |
| 609 | |
| 610 | late_initcall(fail_make_request_debugfs); |
| 611 | |
| 612 | #else /* CONFIG_FAIL_MAKE_REQUEST */ |
| 613 | |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 614 | static inline bool should_fail_request(struct block_device *part, |
Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 615 | unsigned int bytes) |
Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 616 | { |
Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 617 | return false; |
Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 618 | } |
| 619 | |
| 620 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ |
| 621 | |
Christoph Hellwig | 2f9f622 | 2021-01-24 11:02:35 +0100 | [diff] [blame] | 622 | static inline bool bio_check_ro(struct bio *bio) |
Ilya Dryomov | 721c7fc | 2018-01-11 14:09:11 +0100 | [diff] [blame] | 623 | { |
Christoph Hellwig | 2f9f622 | 2021-01-24 11:02:35 +0100 | [diff] [blame] | 624 | if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { |
Ilya Dryomov | 721c7fc | 2018-01-11 14:09:11 +0100 | [diff] [blame] | 625 | char b[BDEVNAME_SIZE]; |
| 626 | |
Mikulas Patocka | 8b2ded1 | 2018-09-05 16:14:36 -0600 | [diff] [blame] | 627 | if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) |
| 628 | return false; |
| 629 | |
Linus Torvalds | a32e236 | 2018-08-03 12:22:09 -0700 | [diff] [blame] | 630 | WARN_ONCE(1, |
Christoph Hellwig | c817867 | 2020-07-01 10:59:40 +0200 | [diff] [blame] | 631 | "Trying to write to read-only block-device %s (partno %d)\n", |
Christoph Hellwig | 2f9f622 | 2021-01-24 11:02:35 +0100 | [diff] [blame] | 632 | bio_devname(bio, b), bio->bi_bdev->bd_partno); |
Linus Torvalds | a32e236 | 2018-08-03 12:22:09 -0700 | [diff] [blame] | 633 | /* Older lvm-tools actually trigger this */ |
| 634 | return false; |
Ilya Dryomov | 721c7fc | 2018-01-11 14:09:11 +0100 | [diff] [blame] | 635 | } |
| 636 | |
| 637 | return false; |
| 638 | } |
| 639 | |
Howard McLauchlan | 30abb3a | 2018-02-06 14:05:39 -0800 | [diff] [blame] | 640 | static noinline int should_fail_bio(struct bio *bio) |
| 641 | { |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 642 | if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) |
Howard McLauchlan | 30abb3a | 2018-02-06 14:05:39 -0800 | [diff] [blame] | 643 | return -EIO; |
| 644 | return 0; |
| 645 | } |
| 646 | ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); |
| 647 | |
Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 648 | /* |
Christoph Hellwig | 52c5e62 | 2018-03-14 16:56:53 +0100 | [diff] [blame] | 649 | * Check whether this bio extends beyond the end of the device or partition. |
| 650 | * This may well happen - the kernel calls bread() without checking the size of |
| 651 | * the device, e.g., when mounting a file system. |
| 652 | */ |
Christoph Hellwig | 2f9f622 | 2021-01-24 11:02:35 +0100 | [diff] [blame] | 653 | static inline int bio_check_eod(struct bio *bio) |
Christoph Hellwig | 52c5e62 | 2018-03-14 16:56:53 +0100 | [diff] [blame] | 654 | { |
Christoph Hellwig | 2f9f622 | 2021-01-24 11:02:35 +0100 | [diff] [blame] | 655 | sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); |
Christoph Hellwig | 52c5e62 | 2018-03-14 16:56:53 +0100 | [diff] [blame] | 656 | unsigned int nr_sectors = bio_sectors(bio); |
| 657 | |
| 658 | if (nr_sectors && maxsector && |
| 659 | (nr_sectors > maxsector || |
| 660 | bio->bi_iter.bi_sector > maxsector - nr_sectors)) { |
| 661 | handle_bad_sector(bio, maxsector); |
| 662 | return -EIO; |
| 663 | } |
| 664 | return 0; |
| 665 | } |
| 666 | |
| 667 | /* |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 668 | * Remap block n of partition p to block n+start(p) of the disk. |
| 669 | */ |
Christoph Hellwig | 2f9f622 | 2021-01-24 11:02:35 +0100 | [diff] [blame] | 670 | static int blk_partition_remap(struct bio *bio) |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 671 | { |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 672 | struct block_device *p = bio->bi_bdev; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 673 | |
Christoph Hellwig | 52c5e62 | 2018-03-14 16:56:53 +0100 | [diff] [blame] | 674 | if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) |
Christoph Hellwig | 2f9f622 | 2021-01-24 11:02:35 +0100 | [diff] [blame] | 675 | return -EIO; |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 676 | if (bio_sectors(bio)) { |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 677 | bio->bi_iter.bi_sector += p->bd_start_sect; |
Christoph Hellwig | 1c02fca | 2020-12-03 17:21:38 +0100 | [diff] [blame] | 678 | trace_block_bio_remap(bio, p->bd_dev, |
Christoph Hellwig | 29ff57c | 2020-11-24 09:34:24 +0100 | [diff] [blame] | 679 | bio->bi_iter.bi_sector - |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 680 | p->bd_start_sect); |
Christoph Hellwig | 52c5e62 | 2018-03-14 16:56:53 +0100 | [diff] [blame] | 681 | } |
Christoph Hellwig | 30c5d34 | 2021-01-24 11:02:36 +0100 | [diff] [blame] | 682 | bio_set_flag(bio, BIO_REMAPPED); |
Christoph Hellwig | 2f9f622 | 2021-01-24 11:02:35 +0100 | [diff] [blame] | 683 | return 0; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 684 | } |
| 685 | |
Keith Busch | 0512a75 | 2020-05-12 17:55:47 +0900 | [diff] [blame] | 686 | /* |
| 687 | * Check write append to a zoned block device. |
| 688 | */ |
| 689 | static inline blk_status_t blk_check_zone_append(struct request_queue *q, |
| 690 | struct bio *bio) |
| 691 | { |
| 692 | sector_t pos = bio->bi_iter.bi_sector; |
| 693 | int nr_sectors = bio_sectors(bio); |
| 694 | |
| 695 | /* Only applicable to zoned block devices */ |
| 696 | if (!blk_queue_is_zoned(q)) |
| 697 | return BLK_STS_NOTSUPP; |
| 698 | |
| 699 | /* The bio sector must point to the start of a sequential zone */ |
| 700 | if (pos & (blk_queue_zone_sectors(q) - 1) || |
| 701 | !blk_queue_zone_is_seq(q, pos)) |
| 702 | return BLK_STS_IOERR; |
| 703 | |
| 704 | /* |
| 705 | * Not allowed to cross zone boundaries. Otherwise, the BIO will be |
| 706 | * split and could result in non-contiguous sectors being written in |
| 707 | * different zones. |
| 708 | */ |
| 709 | if (nr_sectors > q->limits.chunk_sectors) |
| 710 | return BLK_STS_IOERR; |
| 711 | |
| 712 | /* Make sure the BIO is small enough and will not get split */ |
| 713 | if (nr_sectors > q->limits.max_zone_append_sectors) |
| 714 | return BLK_STS_IOERR; |
| 715 | |
| 716 | bio->bi_opf |= REQ_NOMERGE; |
| 717 | |
| 718 | return BLK_STS_OK; |
| 719 | } |
| 720 | |
Jens Axboe | 900e080 | 2021-11-03 05:47:09 -0600 | [diff] [blame] | 721 | noinline_for_stack bool submit_bio_checks(struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | { |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 723 | struct block_device *bdev = bio->bi_bdev; |
Pavel Begunkov | eab4e02 | 2021-10-14 15:03:29 +0100 | [diff] [blame] | 724 | struct request_queue *q = bdev_get_queue(bdev); |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 725 | blk_status_t status = BLK_STS_IOERR; |
Jens Axboe | 5a473e8 | 2020-06-04 11:23:39 -0600 | [diff] [blame] | 726 | struct blk_plug *plug; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | |
| 728 | might_sleep(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | |
Jens Axboe | 5a473e8 | 2020-06-04 11:23:39 -0600 | [diff] [blame] | 730 | plug = blk_mq_plug(q, bio); |
| 731 | if (plug && plug->nowait) |
| 732 | bio->bi_opf |= REQ_NOWAIT; |
| 733 | |
Goldwyn Rodrigues | 03a07c9 | 2017-06-20 07:05:46 -0500 | [diff] [blame] | 734 | /* |
Jens Axboe | b0beb28 | 2020-05-28 13:19:29 -0600 | [diff] [blame] | 735 | * For a REQ_NOWAIT based request, return -EOPNOTSUPP |
Mike Snitzer | 021a244 | 2020-09-23 16:06:51 -0400 | [diff] [blame] | 736 | * if queue does not support NOWAIT. |
Goldwyn Rodrigues | 03a07c9 | 2017-06-20 07:05:46 -0500 | [diff] [blame] | 737 | */ |
Mike Snitzer | 021a244 | 2020-09-23 16:06:51 -0400 | [diff] [blame] | 738 | if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) |
Jens Axboe | b0beb28 | 2020-05-28 13:19:29 -0600 | [diff] [blame] | 739 | goto not_supported; |
Goldwyn Rodrigues | 03a07c9 | 2017-06-20 07:05:46 -0500 | [diff] [blame] | 740 | |
Howard McLauchlan | 30abb3a | 2018-02-06 14:05:39 -0800 | [diff] [blame] | 741 | if (should_fail_bio(bio)) |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 742 | goto end_io; |
Christoph Hellwig | 2f9f622 | 2021-01-24 11:02:35 +0100 | [diff] [blame] | 743 | if (unlikely(bio_check_ro(bio))) |
| 744 | goto end_io; |
Christoph Hellwig | 3a905c3 | 2021-01-25 19:39:57 +0100 | [diff] [blame] | 745 | if (!bio_flagged(bio, BIO_REMAPPED)) { |
| 746 | if (unlikely(bio_check_eod(bio))) |
| 747 | goto end_io; |
| 748 | if (bdev->bd_partno && unlikely(blk_partition_remap(bio))) |
| 749 | goto end_io; |
| 750 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 752 | /* |
Christoph Hellwig | ed00aab | 2020-07-01 10:59:44 +0200 | [diff] [blame] | 753 | * Filter flush bio's early so that bio based drivers without flush |
| 754 | * support don't have to worry about them. |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 755 | */ |
Jens Axboe | f3a8ab7 | 2017-01-27 09:08:23 -0700 | [diff] [blame] | 756 | if (op_is_flush(bio->bi_opf) && |
Jens Axboe | c888a8f | 2016-04-13 13:33:19 -0600 | [diff] [blame] | 757 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 758 | bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); |
Christoph Hellwig | e439ab7 | 2020-07-01 10:59:42 +0200 | [diff] [blame] | 759 | if (!bio_sectors(bio)) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 760 | status = BLK_STS_OK; |
Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 761 | goto end_io; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | } |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 763 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | |
Christoph Hellwig | d04c406 | 2018-12-14 17:21:22 +0100 | [diff] [blame] | 765 | if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) |
Christoph Hellwig | 6ce913f | 2021-10-12 13:12:21 +0200 | [diff] [blame] | 766 | bio_clear_polled(bio); |
Christoph Hellwig | d04c406 | 2018-12-14 17:21:22 +0100 | [diff] [blame] | 767 | |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 768 | switch (bio_op(bio)) { |
| 769 | case REQ_OP_DISCARD: |
| 770 | if (!blk_queue_discard(q)) |
| 771 | goto not_supported; |
| 772 | break; |
| 773 | case REQ_OP_SECURE_ERASE: |
| 774 | if (!blk_queue_secure_erase(q)) |
| 775 | goto not_supported; |
| 776 | break; |
| 777 | case REQ_OP_WRITE_SAME: |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 778 | if (!q->limits.max_write_same_sectors) |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 779 | goto not_supported; |
Nicolai Stange | 5888678 | 2016-12-04 14:56:39 +0100 | [diff] [blame] | 780 | break; |
Keith Busch | 0512a75 | 2020-05-12 17:55:47 +0900 | [diff] [blame] | 781 | case REQ_OP_ZONE_APPEND: |
| 782 | status = blk_check_zone_append(q, bio); |
| 783 | if (status != BLK_STS_OK) |
| 784 | goto end_io; |
| 785 | break; |
Shaun Tancheff | 2d25344 | 2016-10-18 15:40:32 +0900 | [diff] [blame] | 786 | case REQ_OP_ZONE_RESET: |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 787 | case REQ_OP_ZONE_OPEN: |
| 788 | case REQ_OP_ZONE_CLOSE: |
| 789 | case REQ_OP_ZONE_FINISH: |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 790 | if (!blk_queue_is_zoned(q)) |
Shaun Tancheff | 2d25344 | 2016-10-18 15:40:32 +0900 | [diff] [blame] | 791 | goto not_supported; |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 792 | break; |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 793 | case REQ_OP_ZONE_RESET_ALL: |
| 794 | if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q)) |
| 795 | goto not_supported; |
| 796 | break; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 797 | case REQ_OP_WRITE_ZEROES: |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 798 | if (!q->limits.max_write_zeroes_sectors) |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 799 | goto not_supported; |
| 800 | break; |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 801 | default: |
| 802 | break; |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 803 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 804 | |
Tejun Heo | 7f4b35d | 2012-06-04 20:40:56 -0700 | [diff] [blame] | 805 | /* |
Christoph Hellwig | 3e82c34 | 2020-04-25 09:55:51 +0200 | [diff] [blame] | 806 | * Various block parts want %current->io_context, so allocate it up |
| 807 | * front rather than dealing with lots of pain to allocate it only |
| 808 | * where needed. This may fail and the block layer knows how to live |
| 809 | * with it. |
Tejun Heo | 7f4b35d | 2012-06-04 20:40:56 -0700 | [diff] [blame] | 810 | */ |
Christoph Hellwig | 3e82c34 | 2020-04-25 09:55:51 +0200 | [diff] [blame] | 811 | if (unlikely(!current->io_context)) |
| 812 | create_task_io_context(current, GFP_ATOMIC, q->node); |
Tejun Heo | 7f4b35d | 2012-06-04 20:40:56 -0700 | [diff] [blame] | 813 | |
Laibin Qiu | b781d8db5 | 2021-11-12 17:33:54 +0800 | [diff] [blame] | 814 | if (blk_throtl_bio(bio)) |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 815 | return false; |
Christoph Hellwig | db18a53 | 2020-06-27 09:31:58 +0200 | [diff] [blame] | 816 | |
| 817 | blk_cgroup_bio_start(bio); |
| 818 | blkcg_bio_issue_init(bio); |
Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 819 | |
NeilBrown | fbbaf70 | 2017-04-07 09:40:52 -0600 | [diff] [blame] | 820 | if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { |
Christoph Hellwig | e8a676d | 2020-12-03 17:21:36 +0100 | [diff] [blame] | 821 | trace_block_bio_queue(bio); |
NeilBrown | fbbaf70 | 2017-04-07 09:40:52 -0600 | [diff] [blame] | 822 | /* Now that enqueuing has been traced, we need to trace |
| 823 | * completion as well. |
| 824 | */ |
| 825 | bio_set_flag(bio, BIO_TRACE_COMPLETION); |
| 826 | } |
Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 827 | return true; |
Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 828 | |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 829 | not_supported: |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 830 | status = BLK_STS_NOTSUPP; |
Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 831 | end_io: |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 832 | bio->bi_status = status; |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 833 | bio_endio(bio); |
Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 834 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 835 | } |
| 836 | |
Jens Axboe | 900e080 | 2021-11-03 05:47:09 -0600 | [diff] [blame] | 837 | static void __submit_bio_fops(struct gendisk *disk, struct bio *bio) |
| 838 | { |
| 839 | if (unlikely(bio_queue_enter(bio) != 0)) |
| 840 | return; |
| 841 | if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio)) |
| 842 | disk->fops->submit_bio(bio); |
| 843 | blk_queue_exit(disk->queue); |
| 844 | } |
| 845 | |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 846 | static void __submit_bio(struct bio *bio) |
Christoph Hellwig | ac7c567 | 2020-05-16 20:28:01 +0200 | [diff] [blame] | 847 | { |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 848 | struct gendisk *disk = bio->bi_bdev->bd_disk; |
Christoph Hellwig | ac7c567 | 2020-05-16 20:28:01 +0200 | [diff] [blame] | 849 | |
Jens Axboe | 900e080 | 2021-11-03 05:47:09 -0600 | [diff] [blame] | 850 | if (!disk->fops->submit_bio) |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 851 | blk_mq_submit_bio(bio); |
Jens Axboe | 900e080 | 2021-11-03 05:47:09 -0600 | [diff] [blame] | 852 | else |
| 853 | __submit_bio_fops(disk, bio); |
Christoph Hellwig | ac7c567 | 2020-05-16 20:28:01 +0200 | [diff] [blame] | 854 | } |
| 855 | |
Christoph Hellwig | 566acf2 | 2020-07-01 10:59:45 +0200 | [diff] [blame] | 856 | /* |
| 857 | * The loop in this function may be a bit non-obvious, and so deserves some |
| 858 | * explanation: |
| 859 | * |
| 860 | * - Before entering the loop, bio->bi_next is NULL (as all callers ensure |
| 861 | * that), so we have a list with a single bio. |
| 862 | * - We pretend that we have just taken it off a longer list, so we assign |
| 863 | * bio_list to a pointer to the bio_list_on_stack, thus initialising the |
| 864 | * bio_list of new bios to be added. ->submit_bio() may indeed add some more |
| 865 | * bios through a recursive call to submit_bio_noacct. If it did, we find a |
| 866 | * non-NULL value in bio_list and re-enter the loop from the top. |
| 867 | * - In this case we really did just take the bio of the top of the list (no |
| 868 | * pretending) and so remove it from bio_list, and call into ->submit_bio() |
| 869 | * again. |
| 870 | * |
| 871 | * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. |
| 872 | * bio_list_on_stack[1] contains bios that were submitted before the current |
| 873 | * ->submit_bio_bio, but that haven't been processed yet. |
| 874 | */ |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 875 | static void __submit_bio_noacct(struct bio *bio) |
Christoph Hellwig | 566acf2 | 2020-07-01 10:59:45 +0200 | [diff] [blame] | 876 | { |
| 877 | struct bio_list bio_list_on_stack[2]; |
Christoph Hellwig | 566acf2 | 2020-07-01 10:59:45 +0200 | [diff] [blame] | 878 | |
| 879 | BUG_ON(bio->bi_next); |
| 880 | |
| 881 | bio_list_init(&bio_list_on_stack[0]); |
| 882 | current->bio_list = bio_list_on_stack; |
| 883 | |
| 884 | do { |
Pavel Begunkov | eab4e02 | 2021-10-14 15:03:29 +0100 | [diff] [blame] | 885 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
Christoph Hellwig | 566acf2 | 2020-07-01 10:59:45 +0200 | [diff] [blame] | 886 | struct bio_list lower, same; |
| 887 | |
Christoph Hellwig | 566acf2 | 2020-07-01 10:59:45 +0200 | [diff] [blame] | 888 | /* |
| 889 | * Create a fresh bio_list for all subordinate requests. |
| 890 | */ |
| 891 | bio_list_on_stack[1] = bio_list_on_stack[0]; |
| 892 | bio_list_init(&bio_list_on_stack[0]); |
| 893 | |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 894 | __submit_bio(bio); |
Christoph Hellwig | 566acf2 | 2020-07-01 10:59:45 +0200 | [diff] [blame] | 895 | |
| 896 | /* |
| 897 | * Sort new bios into those for a lower level and those for the |
| 898 | * same level. |
| 899 | */ |
| 900 | bio_list_init(&lower); |
| 901 | bio_list_init(&same); |
| 902 | while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) |
Pavel Begunkov | eab4e02 | 2021-10-14 15:03:29 +0100 | [diff] [blame] | 903 | if (q == bdev_get_queue(bio->bi_bdev)) |
Christoph Hellwig | 566acf2 | 2020-07-01 10:59:45 +0200 | [diff] [blame] | 904 | bio_list_add(&same, bio); |
| 905 | else |
| 906 | bio_list_add(&lower, bio); |
| 907 | |
| 908 | /* |
| 909 | * Now assemble so we handle the lowest level first. |
| 910 | */ |
| 911 | bio_list_merge(&bio_list_on_stack[0], &lower); |
| 912 | bio_list_merge(&bio_list_on_stack[0], &same); |
| 913 | bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); |
| 914 | } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); |
| 915 | |
| 916 | current->bio_list = NULL; |
Christoph Hellwig | 566acf2 | 2020-07-01 10:59:45 +0200 | [diff] [blame] | 917 | } |
| 918 | |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 919 | static void __submit_bio_noacct_mq(struct bio *bio) |
Christoph Hellwig | ff93ea0 | 2020-07-01 10:59:46 +0200 | [diff] [blame] | 920 | { |
Christoph Hellwig | 7c792f3 | 2020-07-02 21:21:25 +0200 | [diff] [blame] | 921 | struct bio_list bio_list[2] = { }; |
Christoph Hellwig | ff93ea0 | 2020-07-01 10:59:46 +0200 | [diff] [blame] | 922 | |
Christoph Hellwig | 7c792f3 | 2020-07-02 21:21:25 +0200 | [diff] [blame] | 923 | current->bio_list = bio_list; |
Christoph Hellwig | ff93ea0 | 2020-07-01 10:59:46 +0200 | [diff] [blame] | 924 | |
| 925 | do { |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 926 | __submit_bio(bio); |
Christoph Hellwig | 7c792f3 | 2020-07-02 21:21:25 +0200 | [diff] [blame] | 927 | } while ((bio = bio_list_pop(&bio_list[0]))); |
Christoph Hellwig | ff93ea0 | 2020-07-01 10:59:46 +0200 | [diff] [blame] | 928 | |
| 929 | current->bio_list = NULL; |
Christoph Hellwig | ff93ea0 | 2020-07-01 10:59:46 +0200 | [diff] [blame] | 930 | } |
| 931 | |
Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 932 | /** |
Christoph Hellwig | ed00aab | 2020-07-01 10:59:44 +0200 | [diff] [blame] | 933 | * submit_bio_noacct - re-submit a bio to the block device layer for I/O |
Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 934 | * @bio: The bio describing the location in memory and on the device. |
| 935 | * |
Christoph Hellwig | 3fdd408 | 2020-04-28 13:27:53 +0200 | [diff] [blame] | 936 | * This is a version of submit_bio() that shall only be used for I/O that is |
| 937 | * resubmitted to lower level drivers by stacking block drivers. All file |
| 938 | * systems and other upper level users of the block layer should use |
| 939 | * submit_bio() instead. |
Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 940 | */ |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 941 | void submit_bio_noacct(struct bio *bio) |
Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 942 | { |
Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 943 | /* |
Christoph Hellwig | 566acf2 | 2020-07-01 10:59:45 +0200 | [diff] [blame] | 944 | * We only want one ->submit_bio to be active at a time, else stack |
| 945 | * usage with stacked devices could be a problem. Use current->bio_list |
| 946 | * to collect a list of requests submited by a ->submit_bio method while |
| 947 | * it is active, and then process them after it returned. |
Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 948 | */ |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 949 | if (current->bio_list) |
NeilBrown | f5fe1b5 | 2017-03-10 17:00:47 +1100 | [diff] [blame] | 950 | bio_list_add(¤t->bio_list[0], bio); |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 951 | else if (!bio->bi_bdev->bd_disk->fops->submit_bio) |
| 952 | __submit_bio_noacct_mq(bio); |
| 953 | else |
| 954 | __submit_bio_noacct(bio); |
Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 955 | } |
Christoph Hellwig | ed00aab | 2020-07-01 10:59:44 +0200 | [diff] [blame] | 956 | EXPORT_SYMBOL(submit_bio_noacct); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 | |
| 958 | /** |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 959 | * submit_bio - submit a bio to the block device layer for I/O |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | * @bio: The &struct bio which describes the I/O |
| 961 | * |
Christoph Hellwig | 3fdd408 | 2020-04-28 13:27:53 +0200 | [diff] [blame] | 962 | * submit_bio() is used to submit I/O requests to block devices. It is passed a |
| 963 | * fully set up &struct bio that describes the I/O that needs to be done. The |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 964 | * bio will be send to the device described by the bi_bdev field. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | * |
Christoph Hellwig | 3fdd408 | 2020-04-28 13:27:53 +0200 | [diff] [blame] | 966 | * The success/failure status of the request, along with notification of |
| 967 | * completion, is delivered asynchronously through the ->bi_end_io() callback |
| 968 | * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has |
| 969 | * been called. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | */ |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 971 | void submit_bio(struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 972 | { |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 973 | if (blkcg_punt_bio_submit(bio)) |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 974 | return; |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 975 | |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 976 | /* |
| 977 | * If it's a regular read/write or a barrier with data attached, |
| 978 | * go through the normal accounting stuff before submission. |
| 979 | */ |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 980 | if (bio_has_data(bio)) { |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 981 | unsigned int count; |
| 982 | |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 983 | if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 984 | count = queue_logical_block_size( |
Pavel Begunkov | eab4e02 | 2021-10-14 15:03:29 +0100 | [diff] [blame] | 985 | bdev_get_queue(bio->bi_bdev)) >> 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 986 | else |
| 987 | count = bio_sectors(bio); |
| 988 | |
Mike Christie | a8ebb05 | 2016-06-05 14:31:45 -0500 | [diff] [blame] | 989 | if (op_is_write(bio_op(bio))) { |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 990 | count_vm_events(PGPGOUT, count); |
| 991 | } else { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 992 | task_io_account_read(bio->bi_iter.bi_size); |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 993 | count_vm_events(PGPGIN, count); |
| 994 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 | } |
| 996 | |
Johannes Weiner | b8e24a9 | 2019-08-08 15:03:00 -0400 | [diff] [blame] | 997 | /* |
Christoph Hellwig | 760f83e | 2020-04-28 13:27:54 +0200 | [diff] [blame] | 998 | * If we're reading data that is part of the userspace workingset, count |
| 999 | * submission time as memory stall. When the device is congested, or |
| 1000 | * the submitting cgroup IO-throttled, submission can be a significant |
| 1001 | * part of overall IO time. |
Johannes Weiner | b8e24a9 | 2019-08-08 15:03:00 -0400 | [diff] [blame] | 1002 | */ |
Christoph Hellwig | 760f83e | 2020-04-28 13:27:54 +0200 | [diff] [blame] | 1003 | if (unlikely(bio_op(bio) == REQ_OP_READ && |
| 1004 | bio_flagged(bio, BIO_WORKINGSET))) { |
| 1005 | unsigned long pflags; |
Christoph Hellwig | 760f83e | 2020-04-28 13:27:54 +0200 | [diff] [blame] | 1006 | |
Johannes Weiner | b8e24a9 | 2019-08-08 15:03:00 -0400 | [diff] [blame] | 1007 | psi_memstall_enter(&pflags); |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1008 | submit_bio_noacct(bio); |
Johannes Weiner | b8e24a9 | 2019-08-08 15:03:00 -0400 | [diff] [blame] | 1009 | psi_memstall_leave(&pflags); |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1010 | return; |
Christoph Hellwig | 760f83e | 2020-04-28 13:27:54 +0200 | [diff] [blame] | 1011 | } |
| 1012 | |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1013 | submit_bio_noacct(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | EXPORT_SYMBOL(submit_bio); |
| 1016 | |
Jens Axboe | 1052b8a | 2018-11-26 08:21:49 -0700 | [diff] [blame] | 1017 | /** |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1018 | * bio_poll - poll for BIO completions |
| 1019 | * @bio: bio to poll for |
Yang Guang | e30028a | 2021-11-26 00:20:55 +0800 | [diff] [blame] | 1020 | * @iob: batches of IO |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1021 | * @flags: BLK_POLL_* flags that control the behavior |
| 1022 | * |
| 1023 | * Poll for completions on queue associated with the bio. Returns number of |
| 1024 | * completed entries found. |
| 1025 | * |
| 1026 | * Note: the caller must either be the context that submitted @bio, or |
| 1027 | * be in a RCU critical section to prevent freeing of @bio. |
| 1028 | */ |
Jens Axboe | 5a72e89 | 2021-10-12 09:24:29 -0600 | [diff] [blame] | 1029 | int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1030 | { |
Pavel Begunkov | 859897c | 2021-10-19 22:24:11 +0100 | [diff] [blame] | 1031 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1032 | blk_qc_t cookie = READ_ONCE(bio->bi_cookie); |
| 1033 | int ret; |
| 1034 | |
| 1035 | if (cookie == BLK_QC_T_NONE || |
| 1036 | !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) |
| 1037 | return 0; |
| 1038 | |
| 1039 | if (current->plug) |
Christoph Hellwig | 008f75a | 2021-10-20 16:41:19 +0200 | [diff] [blame] | 1040 | blk_flush_plug(current->plug, false); |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1041 | |
| 1042 | if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) |
| 1043 | return 0; |
| 1044 | if (WARN_ON_ONCE(!queue_is_mq(q))) |
| 1045 | ret = 0; /* not yet implemented, should not happen */ |
| 1046 | else |
Jens Axboe | 5a72e89 | 2021-10-12 09:24:29 -0600 | [diff] [blame] | 1047 | ret = blk_mq_poll(q, cookie, iob, flags); |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1048 | blk_queue_exit(q); |
| 1049 | return ret; |
| 1050 | } |
| 1051 | EXPORT_SYMBOL_GPL(bio_poll); |
| 1052 | |
| 1053 | /* |
| 1054 | * Helper to implement file_operations.iopoll. Requires the bio to be stored |
| 1055 | * in iocb->private, and cleared before freeing the bio. |
| 1056 | */ |
Jens Axboe | 5a72e89 | 2021-10-12 09:24:29 -0600 | [diff] [blame] | 1057 | int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, |
| 1058 | unsigned int flags) |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1059 | { |
| 1060 | struct bio *bio; |
| 1061 | int ret = 0; |
| 1062 | |
| 1063 | /* |
| 1064 | * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can |
| 1065 | * point to a freshly allocated bio at this point. If that happens |
| 1066 | * we have a few cases to consider: |
| 1067 | * |
| 1068 | * 1) the bio is beeing initialized and bi_bdev is NULL. We can just |
| 1069 | * simply nothing in this case |
| 1070 | * 2) the bio points to a not poll enabled device. bio_poll will catch |
| 1071 | * this and return 0 |
| 1072 | * 3) the bio points to a poll capable device, including but not |
| 1073 | * limited to the one that the original bio pointed to. In this |
| 1074 | * case we will call into the actual poll method and poll for I/O, |
| 1075 | * even if we don't need to, but it won't cause harm either. |
| 1076 | * |
| 1077 | * For cases 2) and 3) above the RCU grace period ensures that bi_bdev |
| 1078 | * is still allocated. Because partitions hold a reference to the whole |
| 1079 | * device bdev and thus disk, the disk is also still valid. Grabbing |
| 1080 | * a reference to the queue in bio_poll() ensures the hctxs and requests |
| 1081 | * are still valid as well. |
| 1082 | */ |
| 1083 | rcu_read_lock(); |
| 1084 | bio = READ_ONCE(kiocb->private); |
| 1085 | if (bio && bio->bi_bdev) |
Jens Axboe | 5a72e89 | 2021-10-12 09:24:29 -0600 | [diff] [blame] | 1086 | ret = bio_poll(bio, iob, flags); |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 1087 | rcu_read_unlock(); |
| 1088 | |
| 1089 | return ret; |
| 1090 | } |
| 1091 | EXPORT_SYMBOL_GPL(iocb_bio_iopoll); |
| 1092 | |
| 1093 | /** |
Hannes Reinecke | bf4e6b4 | 2015-11-26 08:46:57 +0100 | [diff] [blame] | 1094 | * blk_cloned_rq_check_limits - Helper function to check a cloned request |
Guoqing Jiang | 0d72031 | 2020-03-09 22:41:33 +0100 | [diff] [blame] | 1095 | * for the new queue limits |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1096 | * @q: the queue |
| 1097 | * @rq: the request being checked |
| 1098 | * |
| 1099 | * Description: |
| 1100 | * @rq may have been made based on weaker limitations of upper-level queues |
| 1101 | * in request stacking drivers, and it may violate the limitation of @q. |
| 1102 | * Since the block layer and the underlying device driver trust @rq |
| 1103 | * after it is inserted to @q, it should be checked against @q before |
| 1104 | * the insertion using this generic function. |
| 1105 | * |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1106 | * Request stacking drivers like request-based dm may change the queue |
Hannes Reinecke | bf4e6b4 | 2015-11-26 08:46:57 +0100 | [diff] [blame] | 1107 | * limits when retrying requests on other queues. Those requests need |
| 1108 | * to be checked against the new queue limits again during dispatch. |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1109 | */ |
Ritika Srivastava | 143d260 | 2020-09-01 13:17:30 -0700 | [diff] [blame] | 1110 | static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q, |
Hannes Reinecke | bf4e6b4 | 2015-11-26 08:46:57 +0100 | [diff] [blame] | 1111 | struct request *rq) |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1112 | { |
Ritika Srivastava | 8327cce5 | 2020-09-01 13:17:31 -0700 | [diff] [blame] | 1113 | unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); |
| 1114 | |
| 1115 | if (blk_rq_sectors(rq) > max_sectors) { |
| 1116 | /* |
| 1117 | * SCSI device does not have a good way to return if |
| 1118 | * Write Same/Zero is actually supported. If a device rejects |
| 1119 | * a non-read/write command (discard, write same,etc.) the |
| 1120 | * low-level device driver will set the relevant queue limit to |
| 1121 | * 0 to prevent blk-lib from issuing more of the offending |
| 1122 | * operations. Commands queued prior to the queue limit being |
| 1123 | * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O |
| 1124 | * errors being propagated to upper layers. |
| 1125 | */ |
| 1126 | if (max_sectors == 0) |
| 1127 | return BLK_STS_NOTSUPP; |
| 1128 | |
John Pittman | 61939b1 | 2019-05-23 17:49:39 -0400 | [diff] [blame] | 1129 | printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", |
Ritika Srivastava | 8327cce5 | 2020-09-01 13:17:31 -0700 | [diff] [blame] | 1130 | __func__, blk_rq_sectors(rq), max_sectors); |
Ritika Srivastava | 143d260 | 2020-09-01 13:17:30 -0700 | [diff] [blame] | 1131 | return BLK_STS_IOERR; |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1132 | } |
| 1133 | |
| 1134 | /* |
Christoph Hellwig | 9bb33f2 | 2021-03-31 09:30:00 +0200 | [diff] [blame] | 1135 | * The queue settings related to segment counting may differ from the |
| 1136 | * original queue. |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1137 | */ |
Christoph Hellwig | e9cd19c | 2019-06-06 12:29:02 +0200 | [diff] [blame] | 1138 | rq->nr_phys_segments = blk_recalc_rq_segments(rq); |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1139 | if (rq->nr_phys_segments > queue_max_segments(q)) { |
John Pittman | 61939b1 | 2019-05-23 17:49:39 -0400 | [diff] [blame] | 1140 | printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", |
| 1141 | __func__, rq->nr_phys_segments, queue_max_segments(q)); |
Ritika Srivastava | 143d260 | 2020-09-01 13:17:30 -0700 | [diff] [blame] | 1142 | return BLK_STS_IOERR; |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1143 | } |
| 1144 | |
Ritika Srivastava | 143d260 | 2020-09-01 13:17:30 -0700 | [diff] [blame] | 1145 | return BLK_STS_OK; |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1146 | } |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1147 | |
| 1148 | /** |
| 1149 | * blk_insert_cloned_request - Helper for stacking drivers to submit a request |
| 1150 | * @q: the queue to submit the request |
| 1151 | * @rq: the request being queued |
| 1152 | */ |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 1153 | blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1154 | { |
Ritika Srivastava | 8327cce5 | 2020-09-01 13:17:31 -0700 | [diff] [blame] | 1155 | blk_status_t ret; |
| 1156 | |
| 1157 | ret = blk_cloned_rq_check_limits(q, rq); |
| 1158 | if (ret != BLK_STS_OK) |
| 1159 | return ret; |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1160 | |
Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1161 | if (rq->rq_disk && |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 1162 | should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq))) |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 1163 | return BLK_STS_IOERR; |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1164 | |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 1165 | if (blk_crypto_insert_cloned_request(rq)) |
| 1166 | return BLK_STS_IOERR; |
| 1167 | |
Pavel Begunkov | be6bfe3 | 2021-10-09 13:25:41 +0100 | [diff] [blame] | 1168 | blk_account_io_start(rq); |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1169 | |
| 1170 | /* |
Jens Axboe | a1ce35f | 2018-10-29 10:23:51 -0600 | [diff] [blame] | 1171 | * Since we have a scheduler attached on the top device, |
| 1172 | * bypass a potential scheduler on the bottom device for |
| 1173 | * insert. |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1174 | */ |
Bart Van Assche | fd9c40f | 2019-04-04 10:08:43 -0700 | [diff] [blame] | 1175 | return blk_mq_request_issue_directly(rq, true); |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1176 | } |
| 1177 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); |
| 1178 | |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1179 | /** |
| 1180 | * blk_rq_err_bytes - determine number of bytes till the next failure boundary |
| 1181 | * @rq: request to examine |
| 1182 | * |
| 1183 | * Description: |
| 1184 | * A request could be merge of IOs which require different failure |
| 1185 | * handling. This function determines the number of bytes which |
| 1186 | * can be failed from the beginning of the request without |
| 1187 | * crossing into area which need to be retried further. |
| 1188 | * |
| 1189 | * Return: |
| 1190 | * The number of bytes to fail. |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1191 | */ |
| 1192 | unsigned int blk_rq_err_bytes(const struct request *rq) |
| 1193 | { |
| 1194 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
| 1195 | unsigned int bytes = 0; |
| 1196 | struct bio *bio; |
| 1197 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 1198 | if (!(rq->rq_flags & RQF_MIXED_MERGE)) |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1199 | return blk_rq_bytes(rq); |
| 1200 | |
| 1201 | /* |
| 1202 | * Currently the only 'mixing' which can happen is between |
| 1203 | * different fastfail types. We can safely fail portions |
| 1204 | * which have all the failfast bits that the first one has - |
| 1205 | * the ones which are at least as eager to fail as the first |
| 1206 | * one. |
| 1207 | */ |
| 1208 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 1209 | if ((bio->bi_opf & ff) != ff) |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1210 | break; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 1211 | bytes += bio->bi_iter.bi_size; |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1212 | } |
| 1213 | |
| 1214 | /* this could lead to infinite loop */ |
| 1215 | BUG_ON(blk_rq_bytes(rq) && !bytes); |
| 1216 | return bytes; |
| 1217 | } |
| 1218 | EXPORT_SYMBOL_GPL(blk_rq_err_bytes); |
| 1219 | |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 1220 | static void update_io_ticks(struct block_device *part, unsigned long now, |
| 1221 | bool end) |
Christoph Hellwig | 9123bf6 | 2020-05-27 07:24:13 +0200 | [diff] [blame] | 1222 | { |
| 1223 | unsigned long stamp; |
| 1224 | again: |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 1225 | stamp = READ_ONCE(part->bd_stamp); |
Chunguang Xu | d80c228 | 2021-07-06 05:47:26 +0800 | [diff] [blame] | 1226 | if (unlikely(time_after(now, stamp))) { |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 1227 | if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp)) |
Christoph Hellwig | 9123bf6 | 2020-05-27 07:24:13 +0200 | [diff] [blame] | 1228 | __part_stat_add(part, io_ticks, end ? now - stamp : 1); |
| 1229 | } |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 1230 | if (part->bd_partno) { |
| 1231 | part = bdev_whole(part); |
Christoph Hellwig | 9123bf6 | 2020-05-27 07:24:13 +0200 | [diff] [blame] | 1232 | goto again; |
| 1233 | } |
| 1234 | } |
| 1235 | |
Pavel Begunkov | be6bfe3 | 2021-10-09 13:25:41 +0100 | [diff] [blame] | 1236 | void __blk_account_io_done(struct request *req, u64 now) |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1237 | { |
Pavel Begunkov | be6bfe3 | 2021-10-09 13:25:41 +0100 | [diff] [blame] | 1238 | const int sgrp = op_stat_group(req_op(req)); |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1239 | |
Pavel Begunkov | be6bfe3 | 2021-10-09 13:25:41 +0100 | [diff] [blame] | 1240 | part_stat_lock(); |
| 1241 | update_io_ticks(req->part, jiffies, true); |
| 1242 | part_stat_inc(req->part, ios[sgrp]); |
| 1243 | part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); |
| 1244 | part_stat_unlock(); |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1245 | } |
| 1246 | |
Pavel Begunkov | be6bfe3 | 2021-10-09 13:25:41 +0100 | [diff] [blame] | 1247 | void __blk_account_io_start(struct request *rq) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 1248 | { |
Christoph Hellwig | 0b6e522 | 2021-01-24 11:02:38 +0100 | [diff] [blame] | 1249 | /* passthrough requests can hold bios that do not have ->bi_bdev set */ |
| 1250 | if (rq->bio && rq->bio->bi_bdev) |
| 1251 | rq->part = rq->bio->bi_bdev; |
| 1252 | else |
| 1253 | rq->part = rq->rq_disk->part0; |
Christoph Hellwig | 524f9ff | 2020-05-27 07:24:19 +0200 | [diff] [blame] | 1254 | |
Mike Snitzer | 112f158 | 2018-12-06 11:41:18 -0500 | [diff] [blame] | 1255 | part_stat_lock(); |
Christoph Hellwig | 76268f3 | 2020-05-13 12:49:34 +0200 | [diff] [blame] | 1256 | update_io_ticks(rq->part, jiffies, false); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 1257 | part_stat_unlock(); |
| 1258 | } |
| 1259 | |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 1260 | static unsigned long __part_start_io_acct(struct block_device *part, |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1261 | unsigned int sectors, unsigned int op) |
Christoph Hellwig | 956d510 | 2020-05-27 07:24:04 +0200 | [diff] [blame] | 1262 | { |
Christoph Hellwig | 956d510 | 2020-05-27 07:24:04 +0200 | [diff] [blame] | 1263 | const int sgrp = op_stat_group(op); |
| 1264 | unsigned long now = READ_ONCE(jiffies); |
| 1265 | |
| 1266 | part_stat_lock(); |
| 1267 | update_io_ticks(part, now, false); |
| 1268 | part_stat_inc(part, ios[sgrp]); |
| 1269 | part_stat_add(part, sectors[sgrp], sectors); |
| 1270 | part_stat_local_inc(part, in_flight[op_is_write(op)]); |
| 1271 | part_stat_unlock(); |
| 1272 | |
| 1273 | return now; |
| 1274 | } |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1275 | |
Christoph Hellwig | 99dfc43 | 2021-01-24 11:02:37 +0100 | [diff] [blame] | 1276 | /** |
| 1277 | * bio_start_io_acct - start I/O accounting for bio based drivers |
| 1278 | * @bio: bio to start account for |
| 1279 | * |
| 1280 | * Returns the start time that should be passed back to bio_end_io_acct(). |
| 1281 | */ |
| 1282 | unsigned long bio_start_io_acct(struct bio *bio) |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1283 | { |
Christoph Hellwig | 99dfc43 | 2021-01-24 11:02:37 +0100 | [diff] [blame] | 1284 | return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio)); |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1285 | } |
Christoph Hellwig | 99dfc43 | 2021-01-24 11:02:37 +0100 | [diff] [blame] | 1286 | EXPORT_SYMBOL_GPL(bio_start_io_acct); |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1287 | |
| 1288 | unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, |
| 1289 | unsigned int op) |
| 1290 | { |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 1291 | return __part_start_io_acct(disk->part0, sectors, op); |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1292 | } |
Christoph Hellwig | 956d510 | 2020-05-27 07:24:04 +0200 | [diff] [blame] | 1293 | EXPORT_SYMBOL(disk_start_io_acct); |
| 1294 | |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 1295 | static void __part_end_io_acct(struct block_device *part, unsigned int op, |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1296 | unsigned long start_time) |
Christoph Hellwig | 956d510 | 2020-05-27 07:24:04 +0200 | [diff] [blame] | 1297 | { |
Christoph Hellwig | 956d510 | 2020-05-27 07:24:04 +0200 | [diff] [blame] | 1298 | const int sgrp = op_stat_group(op); |
| 1299 | unsigned long now = READ_ONCE(jiffies); |
| 1300 | unsigned long duration = now - start_time; |
| 1301 | |
| 1302 | part_stat_lock(); |
| 1303 | update_io_ticks(part, now, true); |
| 1304 | part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); |
| 1305 | part_stat_local_dec(part, in_flight[op_is_write(op)]); |
| 1306 | part_stat_unlock(); |
| 1307 | } |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1308 | |
Christoph Hellwig | 99dfc43 | 2021-01-24 11:02:37 +0100 | [diff] [blame] | 1309 | void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, |
| 1310 | struct block_device *orig_bdev) |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1311 | { |
Christoph Hellwig | 99dfc43 | 2021-01-24 11:02:37 +0100 | [diff] [blame] | 1312 | __part_end_io_acct(orig_bdev, bio_op(bio), start_time); |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1313 | } |
Christoph Hellwig | 99dfc43 | 2021-01-24 11:02:37 +0100 | [diff] [blame] | 1314 | EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1315 | |
| 1316 | void disk_end_io_acct(struct gendisk *disk, unsigned int op, |
| 1317 | unsigned long start_time) |
| 1318 | { |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 1319 | __part_end_io_acct(disk->part0, op, start_time); |
Song Liu | 7b26410 | 2020-08-31 15:27:23 -0700 | [diff] [blame] | 1320 | } |
Christoph Hellwig | 956d510 | 2020-05-27 07:24:04 +0200 | [diff] [blame] | 1321 | EXPORT_SYMBOL(disk_end_io_acct); |
| 1322 | |
Christoph Hellwig | ef71de8 | 2017-11-02 21:29:51 +0300 | [diff] [blame] | 1323 | /* |
| 1324 | * Steal bios from a request and add them to a bio list. |
| 1325 | * The request must not have been partially completed before. |
| 1326 | */ |
| 1327 | void blk_steal_bios(struct bio_list *list, struct request *rq) |
| 1328 | { |
| 1329 | if (rq->bio) { |
| 1330 | if (list->tail) |
| 1331 | list->tail->bi_next = rq->bio; |
| 1332 | else |
| 1333 | list->head = rq->bio; |
| 1334 | list->tail = rq->biotail; |
| 1335 | |
| 1336 | rq->bio = NULL; |
| 1337 | rq->biotail = NULL; |
| 1338 | } |
| 1339 | |
| 1340 | rq->__data_len = 0; |
| 1341 | } |
| 1342 | EXPORT_SYMBOL_GPL(blk_steal_bios); |
| 1343 | |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 1344 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
| 1345 | /** |
| 1346 | * rq_flush_dcache_pages - Helper function to flush all pages in a request |
| 1347 | * @rq: the request to be flushed |
| 1348 | * |
| 1349 | * Description: |
| 1350 | * Flush all pages in @rq. |
| 1351 | */ |
| 1352 | void rq_flush_dcache_pages(struct request *rq) |
| 1353 | { |
| 1354 | struct req_iterator iter; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 1355 | struct bio_vec bvec; |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 1356 | |
| 1357 | rq_for_each_segment(bvec, rq, iter) |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 1358 | flush_dcache_page(bvec.bv_page); |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 1359 | } |
| 1360 | EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); |
| 1361 | #endif |
| 1362 | |
Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 1363 | /** |
| 1364 | * blk_lld_busy - Check if underlying low-level drivers of a device are busy |
| 1365 | * @q : the queue of the device being checked |
| 1366 | * |
| 1367 | * Description: |
| 1368 | * Check if underlying low-level drivers of a device are busy. |
| 1369 | * If the drivers want to export their busy state, they must set own |
| 1370 | * exporting function using blk_queue_lld_busy() first. |
| 1371 | * |
| 1372 | * Basically, this function is used only by request stacking drivers |
| 1373 | * to stop dispatching requests to underlying devices when underlying |
| 1374 | * devices are busy. This behavior helps more I/O merging on the queue |
| 1375 | * of the request stacking driver and prevents I/O throughput regression |
| 1376 | * on burst I/O load. |
| 1377 | * |
| 1378 | * Return: |
| 1379 | * 0 - Not busy (The request stacking driver should dispatch request) |
| 1380 | * 1 - Busy (The request stacking driver should stop dispatching request) |
| 1381 | */ |
| 1382 | int blk_lld_busy(struct request_queue *q) |
| 1383 | { |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 1384 | if (queue_is_mq(q) && q->mq_ops->busy) |
Jens Axboe | 9ba2052 | 2018-10-29 10:15:10 -0600 | [diff] [blame] | 1385 | return q->mq_ops->busy(q); |
Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 1386 | |
| 1387 | return 0; |
| 1388 | } |
| 1389 | EXPORT_SYMBOL_GPL(blk_lld_busy); |
| 1390 | |
Mike Snitzer | 78d8e58 | 2015-06-26 10:01:13 -0400 | [diff] [blame] | 1391 | /** |
| 1392 | * blk_rq_unprep_clone - Helper function to free all bios in a cloned request |
| 1393 | * @rq: the clone request to be cleaned up |
| 1394 | * |
| 1395 | * Description: |
| 1396 | * Free all bios in @rq for a cloned request. |
| 1397 | */ |
| 1398 | void blk_rq_unprep_clone(struct request *rq) |
| 1399 | { |
| 1400 | struct bio *bio; |
| 1401 | |
| 1402 | while ((bio = rq->bio) != NULL) { |
| 1403 | rq->bio = bio->bi_next; |
| 1404 | |
| 1405 | bio_put(bio); |
| 1406 | } |
| 1407 | } |
| 1408 | EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); |
| 1409 | |
Mike Snitzer | 78d8e58 | 2015-06-26 10:01:13 -0400 | [diff] [blame] | 1410 | /** |
| 1411 | * blk_rq_prep_clone - Helper function to setup clone request |
| 1412 | * @rq: the request to be setup |
| 1413 | * @rq_src: original request to be cloned |
| 1414 | * @bs: bio_set that bios for clone are allocated from |
| 1415 | * @gfp_mask: memory allocation mask for bio |
| 1416 | * @bio_ctr: setup function to be called for each clone bio. |
| 1417 | * Returns %0 for success, non %0 for failure. |
| 1418 | * @data: private data to be passed to @bio_ctr |
| 1419 | * |
| 1420 | * Description: |
| 1421 | * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. |
Mike Snitzer | 78d8e58 | 2015-06-26 10:01:13 -0400 | [diff] [blame] | 1422 | * Also, pages which the original bios are pointing to are not copied |
| 1423 | * and the cloned bios just point same pages. |
| 1424 | * So cloned bios must be completed before original bios, which means |
| 1425 | * the caller must complete @rq before @rq_src. |
| 1426 | */ |
| 1427 | int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
| 1428 | struct bio_set *bs, gfp_t gfp_mask, |
| 1429 | int (*bio_ctr)(struct bio *, struct bio *, void *), |
| 1430 | void *data) |
| 1431 | { |
| 1432 | struct bio *bio, *bio_src; |
| 1433 | |
| 1434 | if (!bs) |
Kent Overstreet | f4f8154 | 2018-05-08 21:33:52 -0400 | [diff] [blame] | 1435 | bs = &fs_bio_set; |
Mike Snitzer | 78d8e58 | 2015-06-26 10:01:13 -0400 | [diff] [blame] | 1436 | |
| 1437 | __rq_for_each_bio(bio_src, rq_src) { |
| 1438 | bio = bio_clone_fast(bio_src, gfp_mask, bs); |
| 1439 | if (!bio) |
| 1440 | goto free_and_out; |
| 1441 | |
| 1442 | if (bio_ctr && bio_ctr(bio, bio_src, data)) |
| 1443 | goto free_and_out; |
| 1444 | |
| 1445 | if (rq->bio) { |
| 1446 | rq->biotail->bi_next = bio; |
| 1447 | rq->biotail = bio; |
Eric Biggers | 93f221a | 2020-09-15 20:53:14 -0700 | [diff] [blame] | 1448 | } else { |
Mike Snitzer | 78d8e58 | 2015-06-26 10:01:13 -0400 | [diff] [blame] | 1449 | rq->bio = rq->biotail = bio; |
Eric Biggers | 93f221a | 2020-09-15 20:53:14 -0700 | [diff] [blame] | 1450 | } |
| 1451 | bio = NULL; |
Mike Snitzer | 78d8e58 | 2015-06-26 10:01:13 -0400 | [diff] [blame] | 1452 | } |
| 1453 | |
Guoqing Jiang | 361301a | 2020-03-09 22:41:36 +0100 | [diff] [blame] | 1454 | /* Copy attributes of the original request to the clone request. */ |
| 1455 | rq->__sector = blk_rq_pos(rq_src); |
| 1456 | rq->__data_len = blk_rq_bytes(rq_src); |
| 1457 | if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { |
| 1458 | rq->rq_flags |= RQF_SPECIAL_PAYLOAD; |
| 1459 | rq->special_vec = rq_src->special_vec; |
| 1460 | } |
| 1461 | rq->nr_phys_segments = rq_src->nr_phys_segments; |
| 1462 | rq->ioprio = rq_src->ioprio; |
Mike Snitzer | 78d8e58 | 2015-06-26 10:01:13 -0400 | [diff] [blame] | 1463 | |
Eric Biggers | 93f221a | 2020-09-15 20:53:14 -0700 | [diff] [blame] | 1464 | if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) |
| 1465 | goto free_and_out; |
Mike Snitzer | 78d8e58 | 2015-06-26 10:01:13 -0400 | [diff] [blame] | 1466 | |
| 1467 | return 0; |
| 1468 | |
| 1469 | free_and_out: |
| 1470 | if (bio) |
| 1471 | bio_put(bio); |
| 1472 | blk_rq_unprep_clone(rq); |
| 1473 | |
| 1474 | return -ENOMEM; |
Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 1475 | } |
| 1476 | EXPORT_SYMBOL_GPL(blk_rq_prep_clone); |
| 1477 | |
Jens Axboe | 59c3d45 | 2014-04-08 09:15:35 -0600 | [diff] [blame] | 1478 | int kblockd_schedule_work(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1479 | { |
| 1480 | return queue_work(kblockd_workqueue, work); |
| 1481 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | EXPORT_SYMBOL(kblockd_schedule_work); |
| 1483 | |
Jens Axboe | 818cd1c | 2017-04-10 09:54:55 -0600 | [diff] [blame] | 1484 | int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, |
| 1485 | unsigned long delay) |
| 1486 | { |
| 1487 | return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); |
| 1488 | } |
| 1489 | EXPORT_SYMBOL(kblockd_mod_delayed_work_on); |
| 1490 | |
Jens Axboe | 47c122e | 2021-10-06 06:34:11 -0600 | [diff] [blame] | 1491 | void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) |
| 1492 | { |
| 1493 | struct task_struct *tsk = current; |
| 1494 | |
| 1495 | /* |
| 1496 | * If this is a nested plug, don't actually assign it. |
| 1497 | */ |
| 1498 | if (tsk->plug) |
| 1499 | return; |
| 1500 | |
Jens Axboe | bc490f8 | 2021-10-18 10:12:12 -0600 | [diff] [blame] | 1501 | plug->mq_list = NULL; |
Jens Axboe | 47c122e | 2021-10-06 06:34:11 -0600 | [diff] [blame] | 1502 | plug->cached_rq = NULL; |
| 1503 | plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); |
| 1504 | plug->rq_count = 0; |
| 1505 | plug->multiple_queues = false; |
Jens Axboe | dc5fc361 | 2021-10-19 06:02:30 -0600 | [diff] [blame] | 1506 | plug->has_elevator = false; |
Jens Axboe | 47c122e | 2021-10-06 06:34:11 -0600 | [diff] [blame] | 1507 | plug->nowait = false; |
| 1508 | INIT_LIST_HEAD(&plug->cb_list); |
| 1509 | |
| 1510 | /* |
| 1511 | * Store ordering should not be needed here, since a potential |
| 1512 | * preempt will imply a full memory barrier |
| 1513 | */ |
| 1514 | tsk->plug = plug; |
| 1515 | } |
| 1516 | |
Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 1517 | /** |
| 1518 | * blk_start_plug - initialize blk_plug and track it inside the task_struct |
| 1519 | * @plug: The &struct blk_plug that needs to be initialized |
| 1520 | * |
| 1521 | * Description: |
Jeff Moyer | 4040585 | 2019-01-08 16:57:34 -0500 | [diff] [blame] | 1522 | * blk_start_plug() indicates to the block layer an intent by the caller |
| 1523 | * to submit multiple I/O requests in a batch. The block layer may use |
| 1524 | * this hint to defer submitting I/Os from the caller until blk_finish_plug() |
| 1525 | * is called. However, the block layer may choose to submit requests |
| 1526 | * before a call to blk_finish_plug() if the number of queued I/Os |
| 1527 | * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than |
| 1528 | * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if |
| 1529 | * the task schedules (see below). |
| 1530 | * |
Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 1531 | * Tracking blk_plug inside the task_struct will help with auto-flushing the |
| 1532 | * pending I/O should the task end up blocking between blk_start_plug() and |
| 1533 | * blk_finish_plug(). This is important from a performance perspective, but |
| 1534 | * also ensures that we don't deadlock. For instance, if the task is blocking |
| 1535 | * for a memory allocation, memory reclaim could end up wanting to free a |
| 1536 | * page belonging to that request that is currently residing in our private |
| 1537 | * plug. By flushing the pending I/O when the process goes to sleep, we avoid |
| 1538 | * this kind of deadlock. |
| 1539 | */ |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1540 | void blk_start_plug(struct blk_plug *plug) |
| 1541 | { |
Jens Axboe | 47c122e | 2021-10-06 06:34:11 -0600 | [diff] [blame] | 1542 | blk_start_plug_nr_ios(plug, 1); |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1543 | } |
| 1544 | EXPORT_SYMBOL(blk_start_plug); |
| 1545 | |
NeilBrown | 74018dc | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 1546 | static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) |
NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 1547 | { |
| 1548 | LIST_HEAD(callbacks); |
| 1549 | |
Shaohua Li | 2a7d555 | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 1550 | while (!list_empty(&plug->cb_list)) { |
| 1551 | list_splice_init(&plug->cb_list, &callbacks); |
NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 1552 | |
Shaohua Li | 2a7d555 | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 1553 | while (!list_empty(&callbacks)) { |
| 1554 | struct blk_plug_cb *cb = list_first_entry(&callbacks, |
NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 1555 | struct blk_plug_cb, |
| 1556 | list); |
Shaohua Li | 2a7d555 | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 1557 | list_del(&cb->list); |
NeilBrown | 74018dc | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 1558 | cb->callback(cb, from_schedule); |
Shaohua Li | 2a7d555 | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 1559 | } |
NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 1560 | } |
| 1561 | } |
| 1562 | |
NeilBrown | 9cbb175 | 2012-07-31 09:08:14 +0200 | [diff] [blame] | 1563 | struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, |
| 1564 | int size) |
| 1565 | { |
| 1566 | struct blk_plug *plug = current->plug; |
| 1567 | struct blk_plug_cb *cb; |
| 1568 | |
| 1569 | if (!plug) |
| 1570 | return NULL; |
| 1571 | |
| 1572 | list_for_each_entry(cb, &plug->cb_list, list) |
| 1573 | if (cb->callback == unplug && cb->data == data) |
| 1574 | return cb; |
| 1575 | |
| 1576 | /* Not currently on the callback list */ |
| 1577 | BUG_ON(size < sizeof(*cb)); |
| 1578 | cb = kzalloc(size, GFP_ATOMIC); |
| 1579 | if (cb) { |
| 1580 | cb->data = data; |
| 1581 | cb->callback = unplug; |
| 1582 | list_add(&cb->list, &plug->cb_list); |
| 1583 | } |
| 1584 | return cb; |
| 1585 | } |
| 1586 | EXPORT_SYMBOL(blk_check_plugged); |
| 1587 | |
Christoph Hellwig | 008f75a | 2021-10-20 16:41:19 +0200 | [diff] [blame] | 1588 | void blk_flush_plug(struct blk_plug *plug, bool from_schedule) |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1589 | { |
Pavel Begunkov | b600455 | 2021-10-20 16:41:18 +0200 | [diff] [blame] | 1590 | if (!list_empty(&plug->cb_list)) |
| 1591 | flush_plug_callbacks(plug, from_schedule); |
Jens Axboe | bc490f8 | 2021-10-18 10:12:12 -0600 | [diff] [blame] | 1592 | if (!rq_list_empty(plug->mq_list)) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 1593 | blk_mq_flush_plug_list(plug, from_schedule); |
Jens Axboe | c5fc7b9 | 2021-11-03 05:49:07 -0600 | [diff] [blame] | 1594 | /* |
| 1595 | * Unconditionally flush out cached requests, even if the unplug |
| 1596 | * event came from schedule. Since we know hold references to the |
| 1597 | * queue for cached requests, we don't want a blocked task holding |
| 1598 | * up a queue freeze/quiesce event. |
| 1599 | */ |
| 1600 | if (unlikely(!rq_list_empty(plug->cached_rq))) |
Jens Axboe | 47c122e | 2021-10-06 06:34:11 -0600 | [diff] [blame] | 1601 | blk_mq_free_plug_rqs(plug); |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1602 | } |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1603 | |
Jeff Moyer | 4040585 | 2019-01-08 16:57:34 -0500 | [diff] [blame] | 1604 | /** |
| 1605 | * blk_finish_plug - mark the end of a batch of submitted I/O |
| 1606 | * @plug: The &struct blk_plug passed to blk_start_plug() |
| 1607 | * |
| 1608 | * Description: |
| 1609 | * Indicate that a batch of I/O submissions is complete. This function |
| 1610 | * must be paired with an initial call to blk_start_plug(). The intent |
| 1611 | * is to allow the block layer to optimize I/O submission. See the |
| 1612 | * documentation for blk_start_plug() for more information. |
| 1613 | */ |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1614 | void blk_finish_plug(struct blk_plug *plug) |
| 1615 | { |
Christoph Hellwig | 008f75a | 2021-10-20 16:41:19 +0200 | [diff] [blame] | 1616 | if (plug == current->plug) { |
| 1617 | blk_flush_plug(plug, false); |
| 1618 | current->plug = NULL; |
| 1619 | } |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1620 | } |
| 1621 | EXPORT_SYMBOL(blk_finish_plug); |
| 1622 | |
Ming Lei | 71ac860 | 2020-05-14 16:45:09 +0800 | [diff] [blame] | 1623 | void blk_io_schedule(void) |
| 1624 | { |
| 1625 | /* Prevent hang_check timer from firing at us during very long I/O */ |
| 1626 | unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; |
| 1627 | |
| 1628 | if (timeout) |
| 1629 | io_schedule_timeout(timeout); |
| 1630 | else |
| 1631 | io_schedule(); |
| 1632 | } |
| 1633 | EXPORT_SYMBOL_GPL(blk_io_schedule); |
| 1634 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1635 | int __init blk_dev_init(void) |
| 1636 | { |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 1637 | BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); |
| 1638 | BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * |
Pankaj Bharadiya | c593642 | 2019-12-09 10:31:43 -0800 | [diff] [blame] | 1639 | sizeof_field(struct request, cmd_flags)); |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 1640 | BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * |
Pankaj Bharadiya | c593642 | 2019-12-09 10:31:43 -0800 | [diff] [blame] | 1641 | sizeof_field(struct bio, bi_opf)); |
Nikanth Karthikesan | 9eb55b0 | 2009-04-27 14:53:54 +0200 | [diff] [blame] | 1642 | |
Tejun Heo | 89b90be | 2011-01-03 15:01:47 +0100 | [diff] [blame] | 1643 | /* used for unplugging and affects IO latency/throughput - HIGHPRI */ |
| 1644 | kblockd_workqueue = alloc_workqueue("kblockd", |
Matias Bjørling | 28747fc | 2014-06-11 23:43:54 +0200 | [diff] [blame] | 1645 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 | if (!kblockd_workqueue) |
| 1647 | panic("Failed to create kblockd\n"); |
| 1648 | |
Ilya Dryomov | c2789bd | 2015-11-20 22:16:46 +0100 | [diff] [blame] | 1649 | blk_requestq_cachep = kmem_cache_create("request_queue", |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1650 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 | |
Omar Sandoval | 18fbda9 | 2017-01-31 14:53:20 -0800 | [diff] [blame] | 1652 | blk_debugfs_root = debugfs_create_dir("block", NULL); |
Omar Sandoval | 18fbda9 | 2017-01-31 14:53:20 -0800 | [diff] [blame] | 1653 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1654 | return 0; |
| 1655 | } |