blob: d5a3e1a4c2f79b3262901c13dcfd840a21343dfc [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_BLKDEV_H
3#define _LINUX_BLKDEV_H
4
Russell King85fd0bc2012-05-14 08:29:23 +02005#include <linux/sched.h>
Ingo Molnare6017572017-02-01 16:36:40 +01006#include <linux/sched/clock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/major.h>
8#include <linux/genhd.h>
9#include <linux/list.h>
Jens Axboe320ae512013-10-24 09:20:05 +010010#include <linux/llist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/timer.h>
12#include <linux/workqueue.h>
13#include <linux/pagemap.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040014#include <linux/backing-dev-defs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/wait.h>
16#include <linux/mempool.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080017#include <linux/pfn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/stringify.h>
Hugh Dickins3e6053d2008-09-11 10:57:55 +020020#include <linux/gfp.h>
FUJITA Tomonorid351af02007-07-09 12:40:35 +020021#include <linux/bsg.h>
Jens Axboec7c22e42008-09-13 20:26:01 +020022#include <linux/smp.h>
Tejun Heo548bc8e2013-01-09 08:05:13 -080023#include <linux/rcupdate.h>
Tejun Heoadd703f2014-07-01 10:34:38 -060024#include <linux/percpu-refcount.h>
Christoph Hellwig84be4562015-05-01 12:46:15 +020025#include <linux/scatterlist.h>
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +090026#include <linux/blkzoned.h>
Geert Uytterhoevendb04e182020-08-19 14:34:03 +020027#include <linux/pm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Paul Gortmakerde477252011-05-26 13:46:22 -040029struct module;
Christoph Hellwig21b2f0c2006-03-22 17:52:04 +010030struct scsi_ioctl_command;
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032struct request_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033struct elevator_queue;
Jens Axboe2056a782006-03-23 20:00:26 +010034struct blk_trace;
Jens Axboe3d6392c2007-07-09 12:38:05 +020035struct request;
36struct sg_io_hdr;
Mike Christieaa387cc2011-07-31 22:05:09 +020037struct bsg_job;
Tejun Heo3c798392012-04-16 13:57:25 -070038struct blkcg_gq;
Ming Lei7c94e1c2014-09-25 23:23:43 +080039struct blk_flush_queue;
Christoph Hellwigbbd3e062015-10-15 14:10:48 +020040struct pr_ops;
Josef Bacika7905042018-07-03 09:32:35 -060041struct rq_qos;
Omar Sandoval34dbad52017-03-21 08:56:08 -070042struct blk_queue_stats;
43struct blk_stat_callback;
Satya Tangirala1b262832020-05-14 00:37:17 +000044struct blk_keyslot_manager;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#define BLKDEV_MIN_RQ 4
47#define BLKDEV_MAX_RQ 128 /* Default maximum */
48
Minwoo Im096392e2018-02-15 23:53:17 +090049/* Must be consistent with blk_mq_poll_stats_bkt() */
Stephen Bates02063192017-04-20 16:59:11 -060050#define BLK_MQ_POLL_STATS_BKTS 16
51
Yufen Yu29ece8b2019-03-18 22:44:41 +080052/* Doing classic polling */
53#define BLK_MQ_POLL_CLASSIC -1
54
Tejun Heo8bd435b2012-04-13 13:11:28 -070055/*
56 * Maximum number of blkcg policies allowed to be registered concurrently.
57 * Defined here to simplify include dependency.
58 */
Jens Axboe01c5f852018-09-11 10:59:53 -060059#define BLKCG_MAX_POLS 5
Tejun Heo8bd435b2012-04-13 13:11:28 -070060
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020061typedef void (rq_end_io_fn)(struct request *, blk_status_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Jens Axboe4aff5e22006-08-10 08:44:47 +020063/*
Christoph Hellwige8064022016-10-20 15:12:13 +020064 * request flags */
65typedef __u32 __bitwise req_flags_t;
66
67/* elevator knows about this request */
68#define RQF_SORTED ((__force req_flags_t)(1 << 0))
69/* drive already may have started this one */
70#define RQF_STARTED ((__force req_flags_t)(1 << 1))
Christoph Hellwige8064022016-10-20 15:12:13 +020071/* may not be passed by ioscheduler */
72#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
73/* request for flush sequence */
74#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
75/* merge of different types, fail separately */
76#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
77/* track inflight for MQ */
78#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
79/* don't call prep for this one */
80#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
81/* set for "ide_preempt" requests and also for requests for which the SCSI
82 "quiesce" state must be ignored. */
83#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
Christoph Hellwige8064022016-10-20 15:12:13 +020084/* vaguely specified driver internal error. Ignored by the block layer */
85#define RQF_FAILED ((__force req_flags_t)(1 << 10))
86/* don't warn about errors */
87#define RQF_QUIET ((__force req_flags_t)(1 << 11))
88/* elevator private data attached */
89#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
Konstantin Khlebnikov4822e902018-10-11 10:07:06 +030090/* account into disk and partition IO statistics */
Christoph Hellwige8064022016-10-20 15:12:13 +020091#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
92/* request came from our alloc pool */
93#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
94/* runtime pm request */
95#define RQF_PM ((__force req_flags_t)(1 << 15))
96/* on IO scheduler merge hash */
97#define RQF_HASHED ((__force req_flags_t)(1 << 16))
Konstantin Khlebnikov4822e902018-10-11 10:07:06 +030098/* track IO completion time */
Jens Axboecf43e6b2016-11-07 21:32:37 -070099#define RQF_STATS ((__force req_flags_t)(1 << 17))
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700100/* Look at ->special_vec for the actual data payload instead of the
101 bio chain. */
102#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900103/* The per-zone write lock is held for this request */
104#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
Jens Axboe76a86f92018-01-10 11:30:56 -0700105/* already slept for hybrid poll */
Keith Busch12f5b932018-05-29 15:52:28 +0200106#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
Christoph Hellwigda661262018-06-14 13:58:45 +0200107/* ->timeout has been called, don't expire again */
108#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
Christoph Hellwige8064022016-10-20 15:12:13 +0200109
110/* flags that prevent us from merging requests: */
111#define RQF_NOMERGE_FLAGS \
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700112 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
Christoph Hellwige8064022016-10-20 15:12:13 +0200113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114/*
Keith Busch12f5b932018-05-29 15:52:28 +0200115 * Request state for blk-mq.
116 */
117enum mq_rq_state {
118 MQ_RQ_IDLE = 0,
119 MQ_RQ_IN_FLIGHT = 1,
120 MQ_RQ_COMPLETE = 2,
121};
122
123/*
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200124 * Try to put the fields that are referenced together in the same cacheline.
125 *
126 * If you modify this structure, make sure to update blk_rq_init() and
127 * especially blk_mq_rq_ctx_init() to take care of the added fields.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 */
129struct request {
Jens Axboe165125e2007-07-24 09:28:11 +0200130 struct request_queue *q;
Jens Axboe320ae512013-10-24 09:20:05 +0100131 struct blk_mq_ctx *mq_ctx;
Jens Axboeea4f9952018-10-29 15:06:13 -0600132 struct blk_mq_hw_ctx *mq_hctx;
Jens Axboee6a1c872006-08-10 09:00:21 +0200133
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600134 unsigned int cmd_flags; /* op and common flags */
Christoph Hellwige8064022016-10-20 15:12:13 +0200135 req_flags_t rq_flags;
Jens Axboed486f1f2017-01-31 12:34:41 -0700136
Minwoo Im2f578aa2019-06-09 05:15:51 +0900137 int tag;
Jens Axboed486f1f2017-01-31 12:34:41 -0700138 int internal_tag;
139
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900140 /* the following two fields are internal, NEVER access directly */
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900141 unsigned int __data_len; /* total data len */
Richard Kennedy181fdde2010-03-19 08:58:16 +0100142 sector_t __sector; /* sector cursor */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 struct bio *bio;
145 struct bio *biotail;
146
Jens Axboe7c3fb702018-01-10 11:46:39 -0700147 struct list_head queuelist;
148
Jens Axboe360f92c2014-04-09 20:27:01 -0600149 /*
150 * The hash is used inside the scheduler, and killed once the
151 * request reaches the dispatch list. The ipi_list is only used
152 * to queue the request for softirq completion, which is long
153 * after the request has been unhashed (and even removed from
154 * the dispatch list).
155 */
156 union {
157 struct hlist_node hash; /* merge hash */
158 struct list_head ipi_list;
159 };
160
Jens Axboee6a1c872006-08-10 09:00:21 +0200161 /*
162 * The rb_node is only used inside the io scheduler, requests
163 * are pruned when moved to the dispatch queue. So let the
Mike Snitzerc1867942011-02-11 11:08:00 +0100164 * completion_data share space with the rb_node.
Jens Axboee6a1c872006-08-10 09:00:21 +0200165 */
166 union {
167 struct rb_node rb_node; /* sort/lookup */
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700168 struct bio_vec special_vec;
Mike Snitzerc1867942011-02-11 11:08:00 +0100169 void *completion_data;
Christoph Hellwige26738e2017-04-20 16:03:11 +0200170 int error_count; /* for legacy drivers, don't use */
Jens Axboee6a1c872006-08-10 09:00:21 +0200171 };
Jens Axboe98170642006-07-28 09:23:08 +0200172
Jens Axboeff7d1452006-07-12 14:04:37 +0200173 /*
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +0200174 * Three pointers are available for the IO schedulers, if they need
Mike Snitzerc1867942011-02-11 11:08:00 +0100175 * more they have to dynamically allocate it. Flush requests are
176 * never put on the IO scheduler. So let the flush fields share
Tejun Heoa612fdd2011-12-14 00:33:41 +0100177 * space with the elevator data.
Jens Axboeff7d1452006-07-12 14:04:37 +0200178 */
Mike Snitzerc1867942011-02-11 11:08:00 +0100179 union {
Tejun Heoa612fdd2011-12-14 00:33:41 +0100180 struct {
181 struct io_cq *icq;
182 void *priv[2];
183 } elv;
184
Mike Snitzerc1867942011-02-11 11:08:00 +0100185 struct {
186 unsigned int seq;
187 struct list_head list;
Jeff Moyer4853aba2011-08-15 21:37:25 +0200188 rq_end_io_fn *saved_end_io;
Mike Snitzerc1867942011-02-11 11:08:00 +0100189 } flush;
190 };
Jens Axboeff7d1452006-07-12 14:04:37 +0200191
Jens Axboe8f34ee72006-06-13 09:02:34 +0200192 struct gendisk *rq_disk;
Jerome Marchand09e099d2011-01-05 16:57:38 +0100193 struct hd_struct *part;
Tejun Heo6f816b42019-08-28 15:05:57 -0700194#ifdef CONFIG_BLK_RQ_ALLOC_TIME
195 /* Time that the first bio started allocating this request. */
196 u64 alloc_time_ns;
197#endif
198 /* Time that this request was allocated for this IO. */
Omar Sandoval522a7772018-05-09 02:08:53 -0700199 u64 start_time_ns;
Omar Sandoval544ccc8d2018-05-09 02:08:50 -0700200 /* Time that I/O was submitted to the device. */
201 u64 io_start_time_ns;
202
203#ifdef CONFIG_BLK_WBT
204 unsigned short wbt_flags;
205#endif
Hou Tao3d244302019-05-21 15:59:03 +0800206 /*
207 * rq sectors used for blk stats. It has the same value
208 * with blk_rq_sectors(rq), except that it never be zeroed
209 * by completion.
210 */
211 unsigned short stats_sectors;
Omar Sandoval544ccc8d2018-05-09 02:08:50 -0700212
213 /*
214 * Number of scatter-gather DMA addr+len pairs after
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 * physical address coalescing is performed.
216 */
217 unsigned short nr_phys_segments;
Jens Axboe7c3fb702018-01-10 11:46:39 -0700218
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200219#if defined(CONFIG_BLK_DEV_INTEGRITY)
220 unsigned short nr_integrity_segments;
221#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Satya Tangiralaa892c8d2020-05-14 00:37:18 +0000223#ifdef CONFIG_BLK_INLINE_ENCRYPTION
224 struct bio_crypt_ctx *crypt_ctx;
225 struct blk_ksm_keyslot *crypt_keyslot;
226#endif
227
Jens Axboe7c3fb702018-01-10 11:46:39 -0700228 unsigned short write_hint;
Jens Axboe8f34ee72006-06-13 09:02:34 +0200229 unsigned short ioprio;
230
Keith Busch12f5b932018-05-29 15:52:28 +0200231 enum mq_rq_state state;
232 refcount_t ref;
Tejun Heo1d9bd512018-01-09 08:29:48 -0800233
Jens Axboe0b7576d2018-05-29 08:47:57 -0600234 unsigned int timeout;
Christoph Hellwig079076b2018-11-14 17:02:05 +0100235 unsigned long deadline;
Jens Axboe0a72e7f2018-01-09 14:23:42 -0700236
Jens Axboe7c3fb702018-01-10 11:46:39 -0700237 union {
Linus Torvalds0a4b6e22018-01-29 11:51:49 -0800238 struct __call_single_data csd;
Jens Axboe7c3fb702018-01-10 11:46:39 -0700239 u64 fifo_time;
240 };
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 /*
Jens Axboec00895a2006-09-30 20:29:12 +0200243 * completion callback.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 */
245 rq_end_io_fn *end_io;
246 void *end_io_data;
247};
248
Ming Lei14cb0dc2017-12-18 15:40:43 +0800249static inline bool blk_op_is_scsi(unsigned int op)
250{
251 return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
252}
253
254static inline bool blk_op_is_private(unsigned int op)
255{
256 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
257}
258
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100259static inline bool blk_rq_is_scsi(struct request *rq)
260{
Ming Lei14cb0dc2017-12-18 15:40:43 +0800261 return blk_op_is_scsi(req_op(rq));
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100262}
263
264static inline bool blk_rq_is_private(struct request *rq)
265{
Ming Lei14cb0dc2017-12-18 15:40:43 +0800266 return blk_op_is_private(req_op(rq));
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100267}
268
Christoph Hellwig57292b52017-01-31 16:57:29 +0100269static inline bool blk_rq_is_passthrough(struct request *rq)
270{
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100271 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
Christoph Hellwig57292b52017-01-31 16:57:29 +0100272}
273
Ming Lei14cb0dc2017-12-18 15:40:43 +0800274static inline bool bio_is_passthrough(struct bio *bio)
275{
276 unsigned op = bio_op(bio);
277
278 return blk_op_is_scsi(op) || blk_op_is_private(op);
279}
280
Fernando Luis Vázquez Cao766ca442008-08-14 09:59:13 +0200281static inline unsigned short req_get_ioprio(struct request *req)
282{
283 return req->ioprio;
284}
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286#include <linux/elevator.h>
287
Jens Axboe320ae512013-10-24 09:20:05 +0100288struct blk_queue_ctx;
289
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290struct bio_vec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Jens Axboe242f9dc2008-09-14 05:55:09 -0700292enum blk_eh_timer_return {
Christoph Hellwig88b0cfa2018-05-29 15:52:38 +0200293 BLK_EH_DONE, /* drivers has completed the command */
294 BLK_EH_RESET_TIMER, /* reset timer and try again */
Jens Axboe242f9dc2008-09-14 05:55:09 -0700295};
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297enum blk_queue_state {
298 Queue_down,
299 Queue_up,
300};
301
Shaohua Liee1b6f72015-01-15 17:32:25 -0800302#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
303#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
FUJITA Tomonoriabf54392008-08-16 14:10:05 +0900305#define BLK_SCSI_MAX_CMDS (256)
306#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
307
Damien Le Moal797476b2016-10-18 15:40:29 +0900308/*
309 * Zoned block device models (zoned limit).
Christoph Hellwig3093a472020-07-20 08:12:49 +0200310 *
311 * Note: This needs to be ordered from the least to the most severe
312 * restrictions for the inheritance in blk_stack_limits() to work.
Damien Le Moal797476b2016-10-18 15:40:29 +0900313 */
314enum blk_zoned_model {
Christoph Hellwig3093a472020-07-20 08:12:49 +0200315 BLK_ZONED_NONE = 0, /* Regular block device */
316 BLK_ZONED_HA, /* Host-aware zoned block device */
317 BLK_ZONED_HM, /* Host-managed zoned block device */
Damien Le Moal797476b2016-10-18 15:40:29 +0900318};
319
Martin K. Petersen025146e2009-05-22 17:17:51 -0400320struct queue_limits {
321 unsigned long bounce_pfn;
322 unsigned long seg_boundary_mask;
Keith Busch03100aa2015-08-19 14:24:05 -0700323 unsigned long virt_boundary_mask;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400324
325 unsigned int max_hw_sectors;
Martin K. Petersenca369d52015-11-13 16:46:48 -0500326 unsigned int max_dev_sectors;
Jens Axboe762380a2014-06-05 13:38:39 -0600327 unsigned int chunk_sectors;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400328 unsigned int max_sectors;
329 unsigned int max_segment_size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400330 unsigned int physical_block_size;
Mikulas Patockaad6bf882020-01-15 08:35:25 -0500331 unsigned int logical_block_size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400332 unsigned int alignment_offset;
333 unsigned int io_min;
334 unsigned int io_opt;
Christoph Hellwig67efc922009-09-30 13:54:20 +0200335 unsigned int max_discard_sectors;
Jens Axboe0034af02015-07-16 09:14:26 -0600336 unsigned int max_hw_discard_sectors;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400337 unsigned int max_write_same_sectors;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800338 unsigned int max_write_zeroes_sectors;
Keith Busch0512a752020-05-12 17:55:47 +0900339 unsigned int max_zone_append_sectors;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100340 unsigned int discard_granularity;
341 unsigned int discard_alignment;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400342
Martin K. Petersen8a783622010-02-26 00:20:39 -0500343 unsigned short max_segments;
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200344 unsigned short max_integrity_segments;
Christoph Hellwig1e739732017-02-08 14:46:49 +0100345 unsigned short max_discard_segments;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400346
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400347 unsigned char misaligned;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100348 unsigned char discard_misaligned;
Kent Overstreetc78afc62013-07-11 22:39:53 -0700349 unsigned char raid_partial_stripes_expensive;
Damien Le Moal797476b2016-10-18 15:40:29 +0900350 enum blk_zoned_model zoned;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400351};
352
Christoph Hellwigd4100352019-11-11 11:39:30 +0900353typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
354 void *data);
355
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900356#ifdef CONFIG_BLK_DEV_ZONED
357
Christoph Hellwigd4100352019-11-11 11:39:30 +0900358#define BLK_ALL_ZONES ((unsigned int)-1)
359int blkdev_report_zones(struct block_device *bdev, sector_t sector,
360 unsigned int nr_zones, report_zones_cb cb, void *data);
Christoph Hellwig9b38bb42019-12-03 10:39:04 +0100361unsigned int blkdev_nr_zones(struct gendisk *disk);
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900362extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
363 sector_t sectors, sector_t nr_sectors,
364 gfp_t gfp_mask);
Damien Le Moale7326712020-05-12 17:55:49 +0900365int blk_revalidate_disk_zones(struct gendisk *disk,
366 void (*update_driver_data)(struct gendisk *disk));
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900367
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900368extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
369 unsigned int cmd, unsigned long arg);
Ajay Joshie876df12019-10-27 23:05:46 +0900370extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
371 unsigned int cmd, unsigned long arg);
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900372
373#else /* CONFIG_BLK_DEV_ZONED */
374
Christoph Hellwig9b38bb42019-12-03 10:39:04 +0100375static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
Damien Le Moala91e1382018-10-12 19:08:43 +0900376{
377 return 0;
378}
Damien Le Moalbf505452018-10-12 19:08:50 +0900379
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900380static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
381 fmode_t mode, unsigned int cmd,
382 unsigned long arg)
383{
384 return -ENOTTY;
385}
386
Ajay Joshie876df12019-10-27 23:05:46 +0900387static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
388 fmode_t mode, unsigned int cmd,
389 unsigned long arg)
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900390{
391 return -ENOTTY;
392}
393
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900394#endif /* CONFIG_BLK_DEV_ZONED */
395
Richard Kennedyd7b76302011-07-13 21:17:23 +0200396struct request_queue {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 struct request *last_merge;
Jens Axboeb374d182008-10-31 10:05:07 +0100398 struct elevator_queue *elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Omar Sandoval34dbad52017-03-21 08:56:08 -0700400 struct blk_queue_stats *stats;
Josef Bacika7905042018-07-03 09:32:35 -0600401 struct rq_qos *rq_qos;
Jens Axboe87760e52016-11-09 12:38:14 -0700402
Jens Axboef8a5b122016-12-13 09:24:51 -0700403 const struct blk_mq_ops *mq_ops;
Jens Axboe320ae512013-10-24 09:20:05 +0100404
Jens Axboe320ae512013-10-24 09:20:05 +0100405 /* sw queues */
Ming Leie6cdb092014-06-03 11:24:06 +0800406 struct blk_mq_ctx __percpu *queue_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100407
Jens Axboed278d4a2016-03-30 10:21:08 -0600408 unsigned int queue_depth;
409
Jens Axboe320ae512013-10-24 09:20:05 +0100410 /* hw dispatch queues */
411 struct blk_mq_hw_ctx **queue_hw_ctx;
412 unsigned int nr_hw_queues;
413
Jan Karadc3b17c2017-02-02 15:56:50 +0100414 struct backing_dev_info *backing_dev_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416 /*
417 * The queue owner gets to use this for whatever they like.
418 * ll_rw_blk doesn't touch it.
419 */
420 void *queuedata;
421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 * various queue flags, see QUEUE_* below
424 */
425 unsigned long queue_flags;
Bart Van Asschecd84a622018-09-26 14:01:04 -0700426 /*
427 * Number of contexts that have called blk_set_pm_only(). If this
428 * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
429 * processed.
430 */
431 atomic_t pm_only;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 /*
Tejun Heoa73f7302011-12-14 00:33:37 +0100434 * ida allocated id for this queue. Used to index queues from
435 * ioctx.
436 */
437 int id;
438
439 /*
Richard Kennedyd7b76302011-07-13 21:17:23 +0200440 * queue needs bounce pages for pages above this limit
441 */
442 gfp_t bounce_gfp;
443
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700444 spinlock_t queue_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446 /*
447 * queue kobject
448 */
449 struct kobject kobj;
450
Jens Axboe320ae512013-10-24 09:20:05 +0100451 /*
452 * mq queue kobject
453 */
Ming Lei1db49092018-11-20 09:44:35 +0800454 struct kobject *mq_kobj;
Jens Axboe320ae512013-10-24 09:20:05 +0100455
Dan Williamsac6fc482015-10-21 13:20:18 -0400456#ifdef CONFIG_BLK_DEV_INTEGRITY
457 struct blk_integrity integrity;
458#endif /* CONFIG_BLK_DEV_INTEGRITY */
459
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +0100460#ifdef CONFIG_PM
Lin Ming6c954662013-03-23 11:42:26 +0800461 struct device *dev;
Geert Uytterhoevendb04e182020-08-19 14:34:03 +0200462 enum rpm_status rpm_status;
Lin Ming6c954662013-03-23 11:42:26 +0800463 unsigned int nr_pending;
464#endif
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 /*
467 * queue settings
468 */
469 unsigned long nr_requests; /* Max # of requests */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Tejun Heoe3790c72008-03-04 11:18:17 +0100471 unsigned int dma_pad_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 unsigned int dma_alignment;
473
Satya Tangirala1b262832020-05-14 00:37:17 +0000474#ifdef CONFIG_BLK_INLINE_ENCRYPTION
475 /* Inline crypto capabilities */
476 struct blk_keyslot_manager *ksm;
477#endif
478
Jens Axboe242f9dc2008-09-14 05:55:09 -0700479 unsigned int rq_timeout;
Jens Axboe64f1c212016-11-14 13:03:03 -0700480 int poll_nsec;
Omar Sandoval34dbad52017-03-21 08:56:08 -0700481
482 struct blk_stat_callback *poll_cb;
Stephen Bates02063192017-04-20 16:59:11 -0600483 struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
Omar Sandoval34dbad52017-03-21 08:56:08 -0700484
Jens Axboe242f9dc2008-09-14 05:55:09 -0700485 struct timer_list timeout;
Christoph Hellwig287922e2015-10-30 20:57:30 +0800486 struct work_struct timeout_work;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700487
John Garrybccf5e22020-08-19 23:20:26 +0800488 atomic_t nr_active_requests_shared_sbitmap;
489
Tejun Heoa612fdd2011-12-14 00:33:41 +0100490 struct list_head icq_list;
Tejun Heo4eef3042012-03-05 13:15:18 -0800491#ifdef CONFIG_BLK_CGROUP
Tejun Heoa2b16932012-04-13 13:11:33 -0700492 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
Tejun Heo3c798392012-04-16 13:57:25 -0700493 struct blkcg_gq *root_blkg;
Tejun Heo03aa2642012-03-05 13:15:19 -0800494 struct list_head blkg_list;
Tejun Heo4eef3042012-03-05 13:15:18 -0800495#endif
Tejun Heoa612fdd2011-12-14 00:33:41 +0100496
Martin K. Petersen025146e2009-05-22 17:17:51 -0400497 struct queue_limits limits;
498
Damien Le Moal68c43f12019-09-05 18:51:31 +0900499 unsigned int required_elevator_features;
500
Bart Van Assche6a5ac982018-06-15 14:55:21 -0700501#ifdef CONFIG_BLK_DEV_ZONED
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 /*
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900503 * Zoned block device information for request dispatch control.
504 * nr_zones is the total number of zones of the device. This is always
Christoph Hellwigf216fdd2019-12-03 10:39:05 +0100505 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
506 * bits which indicates if a zone is conventional (bit set) or
507 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900508 * bits which indicates if a zone is write locked, that is, if a write
509 * request targeting the zone was dispatched. All three fields are
510 * initialized by the low level device driver (e.g. scsi/sd.c).
511 * Stacking drivers (device mappers) may or may not initialize
512 * these fields.
Bart Van Asscheccce20f2018-04-16 18:04:41 -0700513 *
514 * Reads of this information must be protected with blk_queue_enter() /
515 * blk_queue_exit(). Modifying this information is only allowed while
516 * no requests are being processed. See also blk_mq_freeze_queue() and
517 * blk_mq_unfreeze_queue().
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900518 */
519 unsigned int nr_zones;
Christoph Hellwigf216fdd2019-12-03 10:39:05 +0100520 unsigned long *conv_zones_bitmap;
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900521 unsigned long *seq_zones_wlock;
Niklas Cassele15864f2020-07-14 23:18:23 +0200522 unsigned int max_open_zones;
Niklas Cassel659bf822020-07-14 23:18:24 +0200523 unsigned int max_active_zones;
Bart Van Assche6a5ac982018-06-15 14:55:21 -0700524#endif /* CONFIG_BLK_DEV_ZONED */
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900525
526 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 * sg stuff
528 */
529 unsigned int sg_timeout;
530 unsigned int sg_reserved_size;
Christoph Lameter19460892005-06-23 00:08:19 -0700531 int node;
Luis Chamberlain85e0cbb2020-06-19 20:47:30 +0000532 struct mutex debugfs_mutex;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700533#ifdef CONFIG_BLK_DEV_IO_TRACE
Jan Karac780e862020-02-06 15:28:12 +0100534 struct blk_trace __rcu *blk_trace;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700535#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 /*
Tejun Heo4913efe2010-09-03 11:56:16 +0200537 * for flush operations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800539 struct blk_flush_queue *fq;
Al Viro483f4af2006-03-18 18:34:37 -0500540
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600541 struct list_head requeue_list;
542 spinlock_t requeue_lock;
Mike Snitzer28494502016-09-14 13:28:30 -0400543 struct delayed_work requeue_work;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600544
Al Viro483f4af2006-03-18 18:34:37 -0500545 struct mutex sysfs_lock;
Ming Leicecf5d82019-08-27 19:01:48 +0800546 struct mutex sysfs_dir_lock;
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200547
Ming Lei2f8f1332019-04-30 09:52:27 +0800548 /*
549 * for reusing dead hctx instance in case of updating
550 * nr_hw_queues
551 */
552 struct list_head unused_hctx_list;
553 spinlock_t unused_hctx_lock;
554
Bob Liu7996a8b2019-05-21 11:25:55 +0800555 int mq_freeze_depth;
Tejun Heod7325802012-03-05 13:14:58 -0800556
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200557#if defined(CONFIG_BLK_DEV_BSG)
558 struct bsg_class_device bsg_dev;
559#endif
Vivek Goyale43473b2010-09-15 17:06:35 -0400560
561#ifdef CONFIG_BLK_DEV_THROTTLING
562 /* Throttle data */
563 struct throtl_data *td;
564#endif
Tejun Heo548bc8e2013-01-09 08:05:13 -0800565 struct rcu_head rcu_head;
Jens Axboe320ae512013-10-24 09:20:05 +0100566 wait_queue_head_t mq_freeze_wq;
Bob Liu7996a8b2019-05-21 11:25:55 +0800567 /*
568 * Protect concurrent access to q_usage_counter by
569 * percpu_ref_kill() and percpu_ref_reinit().
570 */
571 struct mutex mq_freeze_lock;
Dan Williams3ef28e82015-10-21 13:20:12 -0400572 struct percpu_ref q_usage_counter;
Jens Axboe0d2602c2014-05-13 15:10:52 -0600573
574 struct blk_mq_tag_set *tag_set;
575 struct list_head tag_set_list;
Kent Overstreet338aa962018-05-20 18:25:47 -0400576 struct bio_set bio_split;
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900577
Omar Sandoval07e4fea2017-01-25 08:06:40 -0800578 struct dentry *debugfs_dir;
Luis Chamberlain85e0cbb2020-06-19 20:47:30 +0000579
580#ifdef CONFIG_BLK_DEBUG_FS
Omar Sandovald332ce02017-05-04 08:24:40 -0600581 struct dentry *sched_debugfs_dir;
Ming Leicc566942018-12-17 09:46:00 +0800582 struct dentry *rqos_debugfs_dir;
Omar Sandoval07e4fea2017-01-25 08:06:40 -0800583#endif
584
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900585 bool mq_sysfs_init_done;
Christoph Hellwig6d247d72017-01-27 09:51:45 -0700586
587 size_t cmd_size;
Bart Van Asschedc9edc42017-06-14 13:27:50 -0600588
Jens Axboef793dfd2017-06-26 08:15:27 -0600589#define BLK_MAX_WRITE_HINTS 5
590 u64 write_hints[BLK_MAX_WRITE_HINTS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591};
592
Hou Taobfe373f2020-04-28 09:54:56 +0800593/* Keep blk_queue_flag_name[] in sync with the definitions below */
Jens Axboeeca7abf2019-02-09 15:42:07 -0700594#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
595#define QUEUE_FLAG_DYING 1 /* queue being torn down */
Jens Axboeeca7abf2019-02-09 15:42:07 -0700596#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
597#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
598#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
599#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
600#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
601#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
602#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */
603#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
604#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
605#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */
606#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
607#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
608#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
Christoph Hellwig1cb039f2020-09-24 08:51:38 +0200609#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
Jens Axboeeca7abf2019-02-09 15:42:07 -0700610#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
611#define QUEUE_FLAG_WC 17 /* Write back caching */
612#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
613#define QUEUE_FLAG_DAX 19 /* device supports DAX */
614#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
615#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
616#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
617#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */
618#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
619#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
Chaitanya Kulkarnie84e8f02019-08-01 10:26:35 -0700620#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
Tejun Heo6f816b42019-08-28 15:05:57 -0700621#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
Mike Snitzer021a2442020-09-23 16:06:51 -0400622#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
623#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
Tejun Heo797e7db2006-01-06 09:51:03 +0100624
Jens Axboe94eddfb2013-11-19 09:25:07 -0700625#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
Mike Snitzer021a2442020-09-23 16:06:51 -0400626 (1 << QUEUE_FLAG_SAME_COMP) | \
627 (1 << QUEUE_FLAG_NOWAIT))
Jens Axboe94eddfb2013-11-19 09:25:07 -0700628
Bart Van Assche8814ce82018-03-07 17:10:04 -0800629void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
630void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
631bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
Bart Van Assche8814ce82018-03-07 17:10:04 -0800632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100634#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
Bart Van Asschec246e802012-12-06 14:32:01 +0100635#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100636#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200637#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100638#define blk_queue_noxmerges(q) \
639 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
Jens Axboea68bbddba2008-09-24 13:03:33 +0200640#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
Christoph Hellwig1cb039f2020-09-24 08:51:38 +0200641#define blk_queue_stable_writes(q) \
642 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
Jens Axboebc58ba92009-01-23 10:54:44 +0100643#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
Jens Axboee2e1a142010-06-09 10:42:09 +0200644#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
Christoph Hellwigc15227d2009-09-30 13:52:12 +0200645#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
Chaitanya Kulkarnie84e8f02019-08-01 10:26:35 -0700646#define blk_queue_zone_resetall(q) \
647 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
Christoph Hellwig288dab82016-06-09 16:00:36 +0200648#define blk_queue_secure_erase(q) \
649 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
Toshi Kani163d4ba2016-06-23 17:05:50 -0400650#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
Bart Van Assche9efc1602017-05-31 14:43:46 -0700651#define blk_queue_scsi_passthrough(q) \
652 test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
Logan Gunthorpe49d92c02018-10-04 15:27:41 -0600653#define blk_queue_pci_p2pdma(q) \
654 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
Tejun Heo6f816b42019-08-28 15:05:57 -0700655#ifdef CONFIG_BLK_RQ_ALLOC_TIME
656#define blk_queue_rq_alloc_time(q) \
657 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
658#else
659#define blk_queue_rq_alloc_time(q) false
660#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200662#define blk_noretry_request(rq) \
663 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
664 REQ_FAILFAST_DRIVER))
Ming Leif4560ff2017-06-18 14:24:27 -0600665#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
Bart Van Asschecd84a622018-09-26 14:01:04 -0700666#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
Dave Chinner0ce91442018-04-18 14:08:27 +1000667#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
Ming Lei58c898b2019-08-27 19:01:47 +0800668#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
Mike Snitzer021a2442020-09-23 16:06:51 -0400669#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
Bart Van Asschec9254f22017-11-09 10:49:57 -0800670
Bart Van Asschecd84a622018-09-26 14:01:04 -0700671extern void blk_set_pm_only(struct request_queue *q);
672extern void blk_clear_pm_only(struct request_queue *q);
Jens Axboe4aff5e22006-08-10 08:44:47 +0200673
Christoph Hellwig57292b52017-01-31 16:57:29 +0100674static inline bool blk_account_rq(struct request *rq)
675{
676 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
677}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
680
Mike Christie4e1b2d522016-06-05 14:32:22 -0500681#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Christoph Hellwig9d9de532019-03-03 08:18:30 -0700683#define rq_dma_dir(rq) \
684 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
685
Christoph Hellwig3ab3a032019-03-03 08:40:36 -0700686#define dma_map_bvec(dev, bv, dir, attrs) \
687 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
688 (dir), (attrs))
689
Jens Axboe344e9ff2018-11-15 12:22:51 -0700690static inline bool queue_is_mq(struct request_queue *q)
Jens Axboe49fd5242014-04-16 10:57:18 -0600691{
Jens Axboea1ce35f2018-10-29 10:23:51 -0600692 return q->mq_ops;
Jens Axboe49fd5242014-04-16 10:57:18 -0600693}
694
Damien Le Moal797476b2016-10-18 15:40:29 +0900695static inline enum blk_zoned_model
696blk_queue_zoned_model(struct request_queue *q)
697{
698 return q->limits.zoned;
699}
700
701static inline bool blk_queue_is_zoned(struct request_queue *q)
702{
703 switch (blk_queue_zoned_model(q)) {
704 case BLK_ZONED_HA:
705 case BLK_ZONED_HM:
706 return true;
707 default:
708 return false;
709 }
710}
711
Damien Le Moal113ab722019-07-10 13:53:10 +0900712static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900713{
714 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
715}
716
Bart Van Assche6a5ac982018-06-15 14:55:21 -0700717#ifdef CONFIG_BLK_DEV_ZONED
Damien Le Moal965b6522018-10-12 19:08:48 +0900718static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
719{
720 return blk_queue_is_zoned(q) ? q->nr_zones : 0;
721}
722
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900723static inline unsigned int blk_queue_zone_no(struct request_queue *q,
724 sector_t sector)
725{
726 if (!blk_queue_is_zoned(q))
727 return 0;
728 return sector >> ilog2(q->limits.chunk_sectors);
729}
730
731static inline bool blk_queue_zone_is_seq(struct request_queue *q,
732 sector_t sector)
733{
Christoph Hellwigf216fdd2019-12-03 10:39:05 +0100734 if (!blk_queue_is_zoned(q))
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900735 return false;
Christoph Hellwigf216fdd2019-12-03 10:39:05 +0100736 if (!q->conv_zones_bitmap)
737 return true;
738 return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900739}
Niklas Cassele15864f2020-07-14 23:18:23 +0200740
741static inline void blk_queue_max_open_zones(struct request_queue *q,
742 unsigned int max_open_zones)
743{
744 q->max_open_zones = max_open_zones;
745}
746
747static inline unsigned int queue_max_open_zones(const struct request_queue *q)
748{
749 return q->max_open_zones;
750}
Niklas Cassel659bf822020-07-14 23:18:24 +0200751
752static inline void blk_queue_max_active_zones(struct request_queue *q,
753 unsigned int max_active_zones)
754{
755 q->max_active_zones = max_active_zones;
756}
757
758static inline unsigned int queue_max_active_zones(const struct request_queue *q)
759{
760 return q->max_active_zones;
761}
Damien Le Moal965b6522018-10-12 19:08:48 +0900762#else /* CONFIG_BLK_DEV_ZONED */
763static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
764{
765 return 0;
766}
Johannes Thumshirn02992df82020-05-12 17:55:45 +0900767static inline bool blk_queue_zone_is_seq(struct request_queue *q,
768 sector_t sector)
769{
770 return false;
771}
772static inline unsigned int blk_queue_zone_no(struct request_queue *q,
773 sector_t sector)
774{
775 return 0;
776}
Niklas Cassele15864f2020-07-14 23:18:23 +0200777static inline unsigned int queue_max_open_zones(const struct request_queue *q)
778{
779 return 0;
780}
Niklas Cassel659bf822020-07-14 23:18:24 +0200781static inline unsigned int queue_max_active_zones(const struct request_queue *q)
782{
783 return 0;
784}
Bart Van Assche6a5ac982018-06-15 14:55:21 -0700785#endif /* CONFIG_BLK_DEV_ZONED */
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900786
Jens Axboe1faa16d2009-04-06 14:48:01 +0200787static inline bool rq_is_sync(struct request *rq)
788{
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600789 return op_is_sync(rq->cmd_flags);
Jens Axboe1faa16d2009-04-06 14:48:01 +0200790}
791
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400792static inline bool rq_mergeable(struct request *rq)
793{
Christoph Hellwig57292b52017-01-31 16:57:29 +0100794 if (blk_rq_is_passthrough(rq))
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400795 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
Mike Christie3a5e02c2016-06-05 14:32:23 -0500797 if (req_op(rq) == REQ_OP_FLUSH)
798 return false;
799
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800800 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
801 return false;
802
Keith Busch0512a752020-05-12 17:55:47 +0900803 if (req_op(rq) == REQ_OP_ZONE_APPEND)
804 return false;
805
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400806 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
807 return false;
Christoph Hellwige8064022016-10-20 15:12:13 +0200808 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
809 return false;
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400810
811 return true;
812}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400814static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
815{
Christoph Hellwigefbeccd2017-06-19 09:24:41 +0200816 if (bio_page(a) == bio_page(b) &&
817 bio_offset(a) == bio_offset(b))
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400818 return true;
819
820 return false;
821}
822
Jens Axboed278d4a2016-03-30 10:21:08 -0600823static inline unsigned int blk_queue_depth(struct request_queue *q)
824{
825 if (q->queue_depth)
826 return q->queue_depth;
827
828 return q->nr_requests;
829}
830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831extern unsigned long blk_max_low_pfn, blk_max_pfn;
832
833/*
834 * standard bounce addresses:
835 *
836 * BLK_BOUNCE_HIGH : bounce all highmem pages
837 * BLK_BOUNCE_ANY : don't bounce anything
838 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
839 */
Andi Kleen24728922008-04-21 09:51:05 +0200840
841#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
Andi Kleen24728922008-04-21 09:51:05 +0200843#else
844#define BLK_BOUNCE_HIGH -1ULL
845#endif
846#define BLK_BOUNCE_ANY (-1ULL)
FUJITA Tomonoribfe17232010-05-31 15:59:03 +0900847#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Jens Axboe3d6392c2007-07-09 12:38:05 +0200849/*
850 * default timeout for SG_IO if none specified
851 */
852#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
Linus Torvaldsf2f1fa72008-12-05 14:49:18 -0800853#define BLK_MIN_SG_TIMEOUT (7 * HZ)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200854
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900855struct rq_map_data {
856 struct page **pages;
857 int page_order;
858 int nr_entries;
FUJITA Tomonori56c451f2008-12-18 14:49:37 +0900859 unsigned long offset;
FUJITA Tomonori97ae77a2008-12-18 14:49:38 +0900860 int null_mapped;
FUJITA Tomonoriecb554a2009-07-09 14:46:53 +0200861 int from_user;
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900862};
863
NeilBrown5705f702007-09-25 12:35:59 +0200864struct req_iterator {
Kent Overstreet79886132013-11-23 17:19:00 -0800865 struct bvec_iter iter;
NeilBrown5705f702007-09-25 12:35:59 +0200866 struct bio *bio;
867};
868
869/* This should not be used directly - use rq_for_each_segment */
Jens Axboe1e428072009-02-23 09:03:10 +0100870#define for_each_bio(_bio) \
871 for (; _bio; _bio = _bio->bi_next)
NeilBrown5705f702007-09-25 12:35:59 +0200872#define __rq_for_each_bio(_bio, rq) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 if ((rq->bio)) \
874 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
875
NeilBrown5705f702007-09-25 12:35:59 +0200876#define rq_for_each_segment(bvl, _rq, _iter) \
877 __rq_for_each_bio(_iter.bio, _rq) \
Kent Overstreet79886132013-11-23 17:19:00 -0800878 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
NeilBrown5705f702007-09-25 12:35:59 +0200879
Ming Leid18d9172019-02-15 19:13:11 +0800880#define rq_for_each_bvec(bvl, _rq, _iter) \
881 __rq_for_each_bio(_iter.bio, _rq) \
882 bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
883
Kent Overstreet4550dd62013-08-07 14:26:21 -0700884#define rq_iter_last(bvec, _iter) \
Kent Overstreet79886132013-11-23 17:19:00 -0800885 (_iter.bio->bi_next == NULL && \
Kent Overstreet4550dd62013-08-07 14:26:21 -0700886 bio_iter_last(bvec, _iter.iter))
NeilBrown5705f702007-09-25 12:35:59 +0200887
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100888#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
889# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
890#endif
891#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
892extern void rq_flush_dcache_pages(struct request *rq);
893#else
894static inline void rq_flush_dcache_pages(struct request *rq)
895{
896}
897#endif
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899extern int blk_register_queue(struct gendisk *disk);
900extern void blk_unregister_queue(struct gendisk *disk);
Christoph Hellwiged00aab2020-07-01 10:59:44 +0200901blk_qc_t submit_bio_noacct(struct bio *bio);
FUJITA Tomonori2a4aa302008-04-29 09:54:36 +0200902extern void blk_rq_init(struct request_queue *q, struct request *rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903extern void blk_put_request(struct request *);
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700904extern struct request *blk_get_request(struct request_queue *, unsigned int op,
Christoph Hellwigff005a02018-05-09 09:54:05 +0200905 blk_mq_req_flags_t flags);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200906extern int blk_lld_busy(struct request_queue *q);
Mike Snitzer78d8e582015-06-26 10:01:13 -0400907extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
908 struct bio_set *bs, gfp_t gfp_mask,
909 int (*bio_ctr)(struct bio *, struct bio *, void *),
910 void *data);
911extern void blk_rq_unprep_clone(struct request *rq);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200912extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
Kiyoshi Ueda82124d62008-09-18 10:45:38 -0400913 struct request *rq);
Jens Axboe0abc2a12017-12-18 15:40:44 +0800914extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
Christoph Hellwigf695ca32020-07-01 10:59:39 +0200915extern void blk_queue_split(struct bio **);
Paolo Bonzini0bfc96c2012-01-12 16:01:28 +0100916extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
Paolo Bonzini577ebb32012-01-12 16:01:27 +0100917extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
918 unsigned int, void __user *);
Al Viro74f3c8a2007-08-27 15:38:10 -0400919extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
920 unsigned int, void __user *);
Al Viroe915e872008-09-02 17:16:41 -0400921extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
922 struct scsi_ioctl_command __user *);
Arnd Bergmann98aaaec2019-03-14 17:45:18 +0100923extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
924extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700925
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800926extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
Dan Williams2e6edc952015-11-19 13:29:28 -0800927extern void blk_queue_exit(struct request_queue *q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928extern void blk_sync_queue(struct request_queue *q);
FUJITA Tomonoria3bce902008-08-28 16:17:05 +0900929extern int blk_rq_map_user(struct request_queue *, struct request *,
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900930 struct rq_map_data *, void __user *, unsigned long,
931 gfp_t);
Jens Axboe8e5cfc42006-12-19 11:12:46 +0100932extern int blk_rq_unmap_user(struct bio *);
Jens Axboe165125e2007-07-24 09:28:11 +0200933extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
934extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100935 struct rq_map_data *, const struct iov_iter *,
936 gfp_t);
Christoph Hellwigb7819b92017-04-20 16:02:55 +0200937extern void blk_execute_rq(struct request_queue *, struct gendisk *,
James Bottomley 994ca9a2005-06-20 14:11:09 +0200938 struct request *, int);
Jens Axboe165125e2007-07-24 09:28:11 +0200939extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
Jens Axboe15fc8582006-01-06 10:00:50 +0100940 struct request *, int, rq_end_io_fn *);
Mike Christie6e39b692005-11-11 05:30:24 -0600941
Chaitanya Kulkarnie47bc4e2019-06-20 10:59:16 -0700942/* Helper to convert REQ_OP_XXX to its string format XXX */
943extern const char *blk_op_str(unsigned int op);
944
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200945int blk_status_to_errno(blk_status_t status);
946blk_status_t errno_to_blk_status(int errno);
947
Jens Axboe0a1b8b82018-11-26 08:24:43 -0700948int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
Jens Axboe05229beed2015-11-05 10:44:55 -0700949
Jens Axboe165125e2007-07-24 09:28:11 +0200950static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951{
Tejun Heoff9ea322014-09-08 08:03:56 +0900952 return bdev->bd_disk->queue; /* this is never NULL */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953}
954
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955/*
Bart Van Assche233bde22018-03-14 15:48:06 -0700956 * The basic unit of block I/O is a sector. It is used in a number of contexts
957 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
958 * bytes. Variables of type sector_t represent an offset or size that is a
959 * multiple of 512 bytes. Hence these two constants.
960 */
961#ifndef SECTOR_SHIFT
962#define SECTOR_SHIFT 9
963#endif
964#ifndef SECTOR_SIZE
965#define SECTOR_SIZE (1 << SECTOR_SHIFT)
966#endif
967
968/*
Tejun Heo80a761f2009-07-03 17:48:17 +0900969 * blk_rq_pos() : the current sector
970 * blk_rq_bytes() : bytes left in the entire request
971 * blk_rq_cur_bytes() : bytes left in the current segment
972 * blk_rq_err_bytes() : bytes left till the next error boundary
973 * blk_rq_sectors() : sectors left in the entire request
974 * blk_rq_cur_sectors() : sectors left in the current segment
Hou Tao3d244302019-05-21 15:59:03 +0800975 * blk_rq_stats_sectors() : sectors of the entire request used for stats
Tejun Heo5efccd12009-04-23 11:05:18 +0900976 */
Tejun Heo5b936292009-05-07 22:24:38 +0900977static inline sector_t blk_rq_pos(const struct request *rq)
978{
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900979 return rq->__sector;
Tejun Heo5b936292009-05-07 22:24:38 +0900980}
981
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900982static inline unsigned int blk_rq_bytes(const struct request *rq)
983{
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900984 return rq->__data_len;
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900985}
986
987static inline int blk_rq_cur_bytes(const struct request *rq)
988{
989 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
990}
Tejun Heo5efccd12009-04-23 11:05:18 +0900991
Tejun Heo80a761f2009-07-03 17:48:17 +0900992extern unsigned int blk_rq_err_bytes(const struct request *rq);
993
Tejun Heo5b936292009-05-07 22:24:38 +0900994static inline unsigned int blk_rq_sectors(const struct request *rq)
995{
Bart Van Assche233bde22018-03-14 15:48:06 -0700996 return blk_rq_bytes(rq) >> SECTOR_SHIFT;
Tejun Heo5b936292009-05-07 22:24:38 +0900997}
998
999static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1000{
Bart Van Assche233bde22018-03-14 15:48:06 -07001001 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
Tejun Heo5b936292009-05-07 22:24:38 +09001002}
1003
Hou Tao3d244302019-05-21 15:59:03 +08001004static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1005{
1006 return rq->stats_sectors;
1007}
1008
Bart Van Assche6a5ac982018-06-15 14:55:21 -07001009#ifdef CONFIG_BLK_DEV_ZONED
Chaitanya Kulkarni02694e82020-03-25 10:49:54 -07001010
1011/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
1012const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
1013
Christoph Hellwig6cc77e92017-12-21 15:43:38 +09001014static inline unsigned int blk_rq_zone_no(struct request *rq)
1015{
1016 return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
1017}
1018
1019static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1020{
1021 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
1022}
Bart Van Assche6a5ac982018-06-15 14:55:21 -07001023#endif /* CONFIG_BLK_DEV_ZONED */
Christoph Hellwig6cc77e92017-12-21 15:43:38 +09001024
Christoph Hellwig2e3258e2017-01-13 12:29:10 +01001025/*
1026 * Some commands like WRITE SAME have a payload or data transfer size which
1027 * is different from the size of the request. Any driver that supports such
1028 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1029 * calculate the data transfer size.
1030 */
1031static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1032{
1033 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1034 return rq->special_vec.bv_len;
1035 return blk_rq_bytes(rq);
1036}
1037
Christoph Hellwig3aef3ca2019-03-03 09:14:01 -07001038/*
1039 * Return the first full biovec in the request. The caller needs to check that
1040 * there are any bvecs before calling this helper.
1041 */
1042static inline struct bio_vec req_bvec(struct request *rq)
1043{
1044 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1045 return rq->special_vec;
1046 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1047}
1048
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -04001049static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
Mike Christie8fe0d472016-06-05 14:32:15 -05001050 int op)
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -04001051{
Adrian Hunter7afafc82016-08-16 10:59:35 +03001052 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
Bart Van Assche233bde22018-03-14 15:48:06 -07001053 return min(q->limits.max_discard_sectors,
1054 UINT_MAX >> SECTOR_SHIFT);
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -04001055
Mike Christie8fe0d472016-06-05 14:32:15 -05001056 if (unlikely(op == REQ_OP_WRITE_SAME))
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001057 return q->limits.max_write_same_sectors;
1058
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -08001059 if (unlikely(op == REQ_OP_WRITE_ZEROES))
1060 return q->limits.max_write_zeroes_sectors;
1061
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -04001062 return q->limits.max_sectors;
1063}
1064
Jens Axboe762380a2014-06-05 13:38:39 -06001065/*
1066 * Return maximum size of a request at given offset. Only valid for
1067 * file system requests.
1068 */
1069static inline unsigned int blk_max_size_offset(struct request_queue *q,
1070 sector_t offset)
1071{
Mike Snitzer07d098e2020-09-21 22:32:49 -04001072 unsigned int chunk_sectors = q->limits.chunk_sectors;
1073
1074 if (!chunk_sectors)
Jens Axboe736ed4d2014-06-17 22:09:29 -07001075 return q->limits.max_sectors;
Jens Axboe762380a2014-06-05 13:38:39 -06001076
Mike Snitzer07d098e2020-09-21 22:32:49 -04001077 if (likely(is_power_of_2(chunk_sectors)))
1078 chunk_sectors -= offset & (chunk_sectors - 1);
1079 else
1080 chunk_sectors -= sector_div(offset, chunk_sectors);
1081
1082 return min(q->limits.max_sectors, chunk_sectors);
Jens Axboe762380a2014-06-05 13:38:39 -06001083}
1084
Damien Le Moal17007f32016-07-20 21:40:47 -06001085static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
1086 sector_t offset)
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -04001087{
1088 struct request_queue *q = rq->q;
1089
Christoph Hellwig57292b52017-01-31 16:57:29 +01001090 if (blk_rq_is_passthrough(rq))
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -04001091 return q->limits.max_hw_sectors;
1092
Adrian Hunter7afafc82016-08-16 10:59:35 +03001093 if (!q->limits.chunk_sectors ||
1094 req_op(rq) == REQ_OP_DISCARD ||
1095 req_op(rq) == REQ_OP_SECURE_ERASE)
Mike Christie8fe0d472016-06-05 14:32:15 -05001096 return blk_queue_get_max_sectors(q, req_op(rq));
Jens Axboe762380a2014-06-05 13:38:39 -06001097
Damien Le Moal17007f32016-07-20 21:40:47 -06001098 return min(blk_max_size_offset(q, offset),
Mike Christie8fe0d472016-06-05 14:32:15 -05001099 blk_queue_get_max_sectors(q, req_op(rq)));
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -04001100}
1101
Jun'ichi Nomura75afb352013-09-21 13:57:47 -06001102static inline unsigned int blk_rq_count_bios(struct request *rq)
1103{
1104 unsigned int nr_bios = 0;
1105 struct bio *bio;
1106
1107 __rq_for_each_bio(bio, rq)
1108 nr_bios++;
1109
1110 return nr_bios;
1111}
1112
Christoph Hellwigef71de82017-11-02 21:29:51 +03001113void blk_steal_bios(struct bio_list *list, struct request *rq);
1114
Tejun Heo9934c8c2009-05-08 11:54:16 +09001115/*
Tejun Heo2e60e022009-04-23 11:05:18 +09001116 * Request completion related functions.
1117 *
1118 * blk_update_request() completes given number of bytes and updates
1119 * the request without completing it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 */
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02001121extern bool blk_update_request(struct request *rq, blk_status_t error,
Tejun Heo2e60e022009-04-23 11:05:18 +09001122 unsigned int nr_bytes);
Tejun Heo2e60e022009-04-23 11:05:18 +09001123
Jens Axboe242f9dc2008-09-14 05:55:09 -07001124extern void blk_abort_request(struct request *);
Jens Axboeff856ba2006-01-09 16:02:34 +01001125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 * Access functions for manipulating queue properties
1128 */
Jens Axboe165125e2007-07-24 09:28:11 +02001129extern void blk_cleanup_queue(struct request_queue *);
Jens Axboe165125e2007-07-24 09:28:11 +02001130extern void blk_queue_bounce_limit(struct request_queue *, u64);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05001131extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
Jens Axboe762380a2014-06-05 13:38:39 -06001132extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
Martin K. Petersen8a783622010-02-26 00:20:39 -05001133extern void blk_queue_max_segments(struct request_queue *, unsigned short);
Christoph Hellwig1e739732017-02-08 14:46:49 +01001134extern void blk_queue_max_discard_segments(struct request_queue *,
1135 unsigned short);
Jens Axboe165125e2007-07-24 09:28:11 +02001136extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
Christoph Hellwig67efc922009-09-30 13:54:20 +02001137extern void blk_queue_max_discard_sectors(struct request_queue *q,
1138 unsigned int max_discard_sectors);
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001139extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1140 unsigned int max_write_same_sectors);
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -08001141extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1142 unsigned int max_write_same_sectors);
Mikulas Patockaad6bf882020-01-15 08:35:25 -05001143extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
Keith Busch0512a752020-05-12 17:55:47 +09001144extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
1145 unsigned int max_zone_append_sectors);
Martin K. Petersen892b6f92010-10-13 21:18:03 +02001146extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001147extern void blk_queue_alignment_offset(struct request_queue *q,
1148 unsigned int alignment);
Christoph Hellwigc2e4cd52020-09-24 08:51:34 +02001149void blk_queue_update_readahead(struct request_queue *q);
Martin K. Petersen7c958e32009-07-31 11:49:11 -04001150extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001151extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
Martin K. Petersen3c5820c2009-09-11 21:54:52 +02001152extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001153extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
Jens Axboed278d4a2016-03-30 10:21:08 -06001154extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
Martin K. Petersene475bba2009-06-16 08:23:52 +02001155extern void blk_set_default_limits(struct queue_limits *lim);
Martin K. Petersenb1bd0552012-01-11 16:27:11 +01001156extern void blk_set_stacking_limits(struct queue_limits *lim);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001157extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1158 sector_t offset);
1159extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1160 sector_t offset);
FUJITA Tomonori27f82212008-07-04 09:30:03 +02001161extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
Jens Axboe165125e2007-07-24 09:28:11 +02001162extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
Keith Busch03100aa2015-08-19 14:24:05 -07001163extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
Jens Axboe165125e2007-07-24 09:28:11 +02001164extern void blk_queue_dma_alignment(struct request_queue *, int);
James Bottomley11c3e682007-12-31 16:37:00 -06001165extern void blk_queue_update_dma_alignment(struct request_queue *, int);
Jens Axboe242f9dc2008-09-14 05:55:09 -07001166extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
Jens Axboe93e9d8e2016-04-12 12:32:46 -06001167extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
Damien Le Moal68c43f12019-09-05 18:51:31 +09001168extern void blk_queue_required_elevator_features(struct request_queue *q,
1169 unsigned int features);
Yoshihiro Shimoda45147fb2019-08-28 21:35:42 +09001170extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1171 struct device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Christoph Hellwig1e739732017-02-08 14:46:49 +01001173/*
1174 * Number of physical segments as sent to the device.
1175 *
1176 * Normally this is the number of discontiguous data segments sent by the
1177 * submitter. But for data-less command like discard we might have no
1178 * actual data segments submitted, but the driver might have to add it's
1179 * own special payload. In that case we still return 1 here so that this
1180 * special payload will be mapped.
1181 */
Christoph Hellwigf9d03f92016-12-08 15:20:32 -07001182static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1183{
1184 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1185 return 1;
1186 return rq->nr_phys_segments;
1187}
1188
Christoph Hellwig1e739732017-02-08 14:46:49 +01001189/*
1190 * Number of discard segments (or ranges) the driver needs to fill in.
1191 * Each discard bio merged into a request is counted as one segment.
1192 */
1193static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1194{
1195 return max_t(unsigned short, rq->nr_phys_segments, 1);
1196}
1197
Christoph Hellwig89de1502020-04-14 09:42:22 +02001198int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1199 struct scatterlist *sglist, struct scatterlist **last_sg);
1200static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1201 struct scatterlist *sglist)
1202{
1203 struct scatterlist *last_sg = NULL;
1204
1205 return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1206}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207extern void blk_dump_rq_flags(struct request *, char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
Tejun Heo09ac46c2011-12-14 00:33:38 +01001209bool __must_check blk_get_queue(struct request_queue *);
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02001210struct request_queue *blk_alloc_queue(int node_id);
Jens Axboe165125e2007-07-24 09:28:11 +02001211extern void blk_put_queue(struct request_queue *);
Jens Axboe3f21c262015-06-05 10:57:37 -06001212extern void blk_set_queue_dying(struct request_queue *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213
Christoph Hellwig1a4dcfa2020-06-20 09:16:43 +02001214#ifdef CONFIG_BLOCK
Shaohua Li316cc672011-07-08 08:19:21 +02001215/*
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001216 * blk_plug permits building a queue of related requests by holding the I/O
1217 * fragments for a short period. This allows merging of sequential requests
1218 * into single larger request. As the requests are moved from a per-task list to
1219 * the device's request_queue in a batch, this results in improved scalability
1220 * as the lock contention for request_queue lock is reduced.
1221 *
1222 * It is ok not to disable preemption when adding the request to the plug list
1223 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1224 * the plug list when the task sleeps by itself. For details, please see
1225 * schedule() where blk_schedule_flush_plug() is called.
Shaohua Li316cc672011-07-08 08:19:21 +02001226 */
Jens Axboe73c10102011-03-08 13:19:51 +01001227struct blk_plug {
Jens Axboe320ae512013-10-24 09:20:05 +01001228 struct list_head mq_list; /* blk-mq requests */
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001229 struct list_head cb_list; /* md requires an unplug callback */
Jens Axboe5f0ed772018-11-23 22:04:33 -07001230 unsigned short rq_count;
Jens Axboece5b0092018-11-27 17:13:56 -07001231 bool multiple_queues;
Jens Axboe5a473e82020-06-04 11:23:39 -06001232 bool nowait;
Jens Axboe73c10102011-03-08 13:19:51 +01001233};
Shaohua Li55c022b2011-07-08 08:19:20 +02001234#define BLK_MAX_REQUEST_COUNT 16
Shaohua Li50d24c32016-11-03 17:03:53 -07001235#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
Shaohua Li55c022b2011-07-08 08:19:20 +02001236
NeilBrown9cbb1752012-07-31 09:08:14 +02001237struct blk_plug_cb;
NeilBrown74018dc2012-07-31 09:08:15 +02001238typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
NeilBrown048c9372011-04-18 09:52:22 +02001239struct blk_plug_cb {
1240 struct list_head list;
NeilBrown9cbb1752012-07-31 09:08:14 +02001241 blk_plug_cb_fn callback;
1242 void *data;
NeilBrown048c9372011-04-18 09:52:22 +02001243};
NeilBrown9cbb1752012-07-31 09:08:14 +02001244extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1245 void *data, int size);
Jens Axboe73c10102011-03-08 13:19:51 +01001246extern void blk_start_plug(struct blk_plug *);
1247extern void blk_finish_plug(struct blk_plug *);
Jens Axboef6603782011-04-15 15:49:07 +02001248extern void blk_flush_plug_list(struct blk_plug *, bool);
Jens Axboe73c10102011-03-08 13:19:51 +01001249
1250static inline void blk_flush_plug(struct task_struct *tsk)
1251{
1252 struct blk_plug *plug = tsk->plug;
1253
Christoph Hellwig88b996c2011-04-15 15:20:10 +02001254 if (plug)
Jens Axboea237c1c2011-04-16 13:27:55 +02001255 blk_flush_plug_list(plug, false);
1256}
1257
1258static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1259{
1260 struct blk_plug *plug = tsk->plug;
1261
1262 if (plug)
Jens Axboef6603782011-04-15 15:49:07 +02001263 blk_flush_plug_list(plug, true);
Jens Axboe73c10102011-03-08 13:19:51 +01001264}
1265
1266static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1267{
1268 struct blk_plug *plug = tsk->plug;
1269
Jens Axboe320ae512013-10-24 09:20:05 +01001270 return plug &&
Jens Axboea1ce35f2018-10-29 10:23:51 -06001271 (!list_empty(&plug->mq_list) ||
Jens Axboe320ae512013-10-24 09:20:05 +01001272 !list_empty(&plug->cb_list));
Jens Axboe73c10102011-03-08 13:19:51 +01001273}
1274
Christoph Hellwig1a4dcfa2020-06-20 09:16:43 +02001275int blkdev_issue_flush(struct block_device *, gfp_t);
1276long nr_blockdev_pages(void);
1277#else /* CONFIG_BLOCK */
1278struct blk_plug {
1279};
1280
1281static inline void blk_start_plug(struct blk_plug *plug)
1282{
1283}
1284
1285static inline void blk_finish_plug(struct blk_plug *plug)
1286{
1287}
1288
1289static inline void blk_flush_plug(struct task_struct *task)
1290{
1291}
1292
1293static inline void blk_schedule_flush_plug(struct task_struct *task)
1294{
1295}
1296
1297
1298static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1299{
1300 return false;
1301}
1302
1303static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
1304{
1305 return 0;
1306}
1307
1308static inline long nr_blockdev_pages(void)
1309{
1310 return 0;
1311}
1312#endif /* CONFIG_BLOCK */
1313
Ming Lei71ac8602020-05-14 16:45:09 +08001314extern void blk_io_schedule(void);
1315
Christoph Hellwigee472d82017-04-05 19:21:08 +02001316extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1317 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
Christoph Hellwige950fdf2016-07-19 11:23:33 +02001318
1319#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
Christoph Hellwigdd3932e2010-09-16 20:51:46 +02001320
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +04001321extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1322 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
Christoph Hellwig38f25252016-04-16 14:55:28 -04001323extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +02001324 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -05001325 struct bio **biop);
Christoph Hellwigee472d82017-04-05 19:21:08 +02001326
1327#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
Christoph Hellwigcb365b92017-04-05 19:21:10 +02001328#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
Christoph Hellwigee472d82017-04-05 19:21:08 +02001329
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -08001330extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1331 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
Christoph Hellwigee472d82017-04-05 19:21:08 +02001332 unsigned flags);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +04001333extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigee472d82017-04-05 19:21:08 +02001334 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1335
Christoph Hellwig2cf6d262010-08-18 05:29:10 -04001336static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1337 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
David Woodhousefb2dce82008-08-05 18:01:53 +01001338{
Bart Van Assche233bde22018-03-14 15:48:06 -07001339 return blkdev_issue_discard(sb->s_bdev,
1340 block << (sb->s_blocksize_bits -
1341 SECTOR_SHIFT),
1342 nr_blocks << (sb->s_blocksize_bits -
1343 SECTOR_SHIFT),
Christoph Hellwig2cf6d262010-08-18 05:29:10 -04001344 gfp_mask, flags);
David Woodhousefb2dce82008-08-05 18:01:53 +01001345}
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001346static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
Theodore Ts'oa107e5a2010-10-27 23:44:47 -04001347 sector_t nr_blocks, gfp_t gfp_mask)
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001348{
1349 return blkdev_issue_zeroout(sb->s_bdev,
Bart Van Assche233bde22018-03-14 15:48:06 -07001350 block << (sb->s_blocksize_bits -
1351 SECTOR_SHIFT),
1352 nr_blocks << (sb->s_blocksize_bits -
1353 SECTOR_SHIFT),
Christoph Hellwigee472d82017-04-05 19:21:08 +02001354 gfp_mask, 0);
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001355}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
Christoph Hellwigf00c4d82017-11-05 10:36:31 +03001357extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
Adel Gadllah0b07de82008-06-26 13:48:27 +02001358
Christoph Hellwigfa01b1e2020-09-03 07:40:57 +02001359static inline bool bdev_is_partition(struct block_device *bdev)
1360{
1361 return bdev->bd_partno;
1362}
1363
Martin K. Peterseneb28d312010-02-26 00:20:37 -05001364enum blk_default_limits {
1365 BLK_MAX_SEGMENTS = 128,
1366 BLK_SAFE_MAX_SECTORS = 255,
Jeff Moyerd2be5372015-08-13 14:57:57 -04001367 BLK_DEF_MAX_SECTORS = 2560,
Martin K. Peterseneb28d312010-02-26 00:20:37 -05001368 BLK_MAX_SEGMENT_SIZE = 65536,
1369 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1370};
Milan Broz0e435ac2008-12-03 12:55:08 +01001371
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001372static inline unsigned long queue_segment_boundary(const struct request_queue *q)
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001373{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001374 return q->limits.seg_boundary_mask;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001375}
1376
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001377static inline unsigned long queue_virt_boundary(const struct request_queue *q)
Keith Busch03100aa2015-08-19 14:24:05 -07001378{
1379 return q->limits.virt_boundary_mask;
1380}
1381
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001382static inline unsigned int queue_max_sectors(const struct request_queue *q)
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001383{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001384 return q->limits.max_sectors;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001385}
1386
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001387static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001388{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001389 return q->limits.max_hw_sectors;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001390}
1391
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001392static inline unsigned short queue_max_segments(const struct request_queue *q)
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001393{
Martin K. Petersen8a783622010-02-26 00:20:39 -05001394 return q->limits.max_segments;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001395}
1396
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001397static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
Christoph Hellwig1e739732017-02-08 14:46:49 +01001398{
1399 return q->limits.max_discard_segments;
1400}
1401
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001402static inline unsigned int queue_max_segment_size(const struct request_queue *q)
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001403{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001404 return q->limits.max_segment_size;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001405}
1406
Keith Busch0512a752020-05-12 17:55:47 +09001407static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
1408{
1409 return q->limits.max_zone_append_sectors;
1410}
1411
Mikulas Patockaad6bf882020-01-15 08:35:25 -05001412static inline unsigned queue_logical_block_size(const struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413{
1414 int retval = 512;
1415
Martin K. Petersen025146e2009-05-22 17:17:51 -04001416 if (q && q->limits.logical_block_size)
1417 retval = q->limits.logical_block_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
1419 return retval;
1420}
1421
Mikulas Patockaad6bf882020-01-15 08:35:25 -05001422static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423{
Martin K. Petersene1defc42009-05-22 17:17:49 -04001424 return queue_logical_block_size(bdev_get_queue(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425}
1426
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001427static inline unsigned int queue_physical_block_size(const struct request_queue *q)
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001428{
1429 return q->limits.physical_block_size;
1430}
1431
Martin K. Petersen892b6f92010-10-13 21:18:03 +02001432static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
Martin K. Petersenac481c22009-10-03 20:52:01 +02001433{
1434 return queue_physical_block_size(bdev_get_queue(bdev));
1435}
1436
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001437static inline unsigned int queue_io_min(const struct request_queue *q)
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001438{
1439 return q->limits.io_min;
1440}
1441
Martin K. Petersenac481c22009-10-03 20:52:01 +02001442static inline int bdev_io_min(struct block_device *bdev)
1443{
1444 return queue_io_min(bdev_get_queue(bdev));
1445}
1446
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001447static inline unsigned int queue_io_opt(const struct request_queue *q)
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001448{
1449 return q->limits.io_opt;
1450}
1451
Martin K. Petersenac481c22009-10-03 20:52:01 +02001452static inline int bdev_io_opt(struct block_device *bdev)
1453{
1454 return queue_io_opt(bdev_get_queue(bdev));
1455}
1456
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001457static inline int queue_alignment_offset(const struct request_queue *q)
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001458{
Martin K. Petersenac481c22009-10-03 20:52:01 +02001459 if (q->limits.misaligned)
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001460 return -1;
1461
Martin K. Petersenac481c22009-10-03 20:52:01 +02001462 return q->limits.alignment_offset;
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001463}
1464
Martin K. Petersene03a72e2010-01-11 03:21:51 -05001465static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
Martin K. Petersen81744ee2009-12-29 08:35:35 +01001466{
1467 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
Bart Van Assche233bde22018-03-14 15:48:06 -07001468 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
1469 << SECTOR_SHIFT;
Martin K. Petersen81744ee2009-12-29 08:35:35 +01001470
Mike Snitzerb8839b82014-10-08 18:26:13 -04001471 return (granularity + lim->alignment_offset - alignment) % granularity;
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001472}
1473
Martin K. Petersenac481c22009-10-03 20:52:01 +02001474static inline int bdev_alignment_offset(struct block_device *bdev)
1475{
1476 struct request_queue *q = bdev_get_queue(bdev);
1477
1478 if (q->limits.misaligned)
1479 return -1;
Christoph Hellwigfa01b1e2020-09-03 07:40:57 +02001480 if (bdev_is_partition(bdev))
Christoph Hellwig7b8917f2020-08-31 20:02:33 +02001481 return queue_limit_alignment_offset(&q->limits,
1482 bdev->bd_part->start_sect);
Martin K. Petersenac481c22009-10-03 20:52:01 +02001483 return q->limits.alignment_offset;
1484}
1485
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001486static inline int queue_discard_alignment(const struct request_queue *q)
Martin K. Petersen86b37282009-11-10 11:50:21 +01001487{
1488 if (q->limits.discard_misaligned)
1489 return -1;
1490
1491 return q->limits.discard_alignment;
1492}
1493
Martin K. Petersene03a72e2010-01-11 03:21:51 -05001494static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
Martin K. Petersen86b37282009-11-10 11:50:21 +01001495{
Linus Torvalds59771072012-12-19 07:18:35 -08001496 unsigned int alignment, granularity, offset;
Martin K. Petersendd3d1452010-01-11 03:21:48 -05001497
Martin K. Petersena934a002011-05-18 10:37:35 +02001498 if (!lim->max_discard_sectors)
1499 return 0;
1500
Linus Torvalds59771072012-12-19 07:18:35 -08001501 /* Why are these in bytes, not sectors? */
Bart Van Assche233bde22018-03-14 15:48:06 -07001502 alignment = lim->discard_alignment >> SECTOR_SHIFT;
1503 granularity = lim->discard_granularity >> SECTOR_SHIFT;
Linus Torvalds59771072012-12-19 07:18:35 -08001504 if (!granularity)
1505 return 0;
1506
1507 /* Offset of the partition start in 'granularity' sectors */
1508 offset = sector_div(sector, granularity);
1509
1510 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1511 offset = (granularity + alignment - offset) % granularity;
1512
1513 /* Turn it back into bytes, gaah */
Bart Van Assche233bde22018-03-14 15:48:06 -07001514 return offset << SECTOR_SHIFT;
Martin K. Petersen86b37282009-11-10 11:50:21 +01001515}
1516
Paolo Bonzinic6e66632012-08-02 09:48:50 +02001517static inline int bdev_discard_alignment(struct block_device *bdev)
1518{
1519 struct request_queue *q = bdev_get_queue(bdev);
1520
Christoph Hellwigfa01b1e2020-09-03 07:40:57 +02001521 if (bdev_is_partition(bdev))
Christoph Hellwig7cf34d92020-08-31 20:02:34 +02001522 return queue_limit_discard_alignment(&q->limits,
1523 bdev->bd_part->start_sect);
Paolo Bonzinic6e66632012-08-02 09:48:50 +02001524 return q->limits.discard_alignment;
1525}
1526
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001527static inline unsigned int bdev_write_same(struct block_device *bdev)
1528{
1529 struct request_queue *q = bdev_get_queue(bdev);
1530
1531 if (q)
1532 return q->limits.max_write_same_sectors;
1533
1534 return 0;
1535}
1536
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -08001537static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1538{
1539 struct request_queue *q = bdev_get_queue(bdev);
1540
1541 if (q)
1542 return q->limits.max_write_zeroes_sectors;
1543
1544 return 0;
1545}
1546
Damien Le Moal797476b2016-10-18 15:40:29 +09001547static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1548{
1549 struct request_queue *q = bdev_get_queue(bdev);
1550
1551 if (q)
1552 return blk_queue_zoned_model(q);
1553
1554 return BLK_ZONED_NONE;
1555}
1556
1557static inline bool bdev_is_zoned(struct block_device *bdev)
1558{
1559 struct request_queue *q = bdev_get_queue(bdev);
1560
1561 if (q)
1562 return blk_queue_is_zoned(q);
1563
1564 return false;
1565}
1566
Damien Le Moal113ab722019-07-10 13:53:10 +09001567static inline sector_t bdev_zone_sectors(struct block_device *bdev)
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +09001568{
1569 struct request_queue *q = bdev_get_queue(bdev);
1570
1571 if (q)
Damien Le Moalf99e8642017-01-12 07:58:32 -07001572 return blk_queue_zone_sectors(q);
Christoph Hellwig6cc77e92017-12-21 15:43:38 +09001573 return 0;
1574}
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +09001575
Niklas Cassele15864f2020-07-14 23:18:23 +02001576static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
1577{
1578 struct request_queue *q = bdev_get_queue(bdev);
1579
1580 if (q)
1581 return queue_max_open_zones(q);
1582 return 0;
1583}
1584
Niklas Cassel659bf822020-07-14 23:18:24 +02001585static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
1586{
1587 struct request_queue *q = bdev_get_queue(bdev);
1588
1589 if (q)
1590 return queue_max_active_zones(q);
1591 return 0;
1592}
1593
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001594static inline int queue_dma_alignment(const struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595{
Pete Wyckoff482eb682008-01-01 10:23:02 -05001596 return q ? q->dma_alignment : 511;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597}
1598
Namhyung Kim14417792010-09-15 13:08:27 +02001599static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
FUJITA Tomonori87904072008-08-28 15:05:58 +09001600 unsigned int len)
1601{
1602 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
Namhyung Kim14417792010-09-15 13:08:27 +02001603 return !(addr & alignment) && !(len & alignment);
FUJITA Tomonori87904072008-08-28 15:05:58 +09001604}
1605
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606/* assumes size > 256 */
1607static inline unsigned int blksize_bits(unsigned int size)
1608{
1609 unsigned int bits = 8;
1610 do {
1611 bits++;
1612 size >>= 1;
1613 } while (size > 256);
1614 return bits;
1615}
1616
Adrian Bunk2befb9e2005-09-10 00:27:17 -07001617static inline unsigned int block_size(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618{
Christoph Hellwig6b7b1812020-06-26 10:01:55 +02001619 return 1 << bdev->bd_inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620}
1621
Jens Axboe59c3d452014-04-08 09:15:35 -06001622int kblockd_schedule_work(struct work_struct *work);
Jens Axboe818cd1c2017-04-10 09:54:55 -06001623int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1626 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1627#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1628 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1629
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001630#if defined(CONFIG_BLK_DEV_INTEGRITY)
1631
Martin K. Petersen8288f492014-09-26 19:20:02 -04001632enum blk_integrity_flags {
1633 BLK_INTEGRITY_VERIFY = 1 << 0,
1634 BLK_INTEGRITY_GENERATE = 1 << 1,
Martin K. Petersen3aec2f42014-09-26 19:20:03 -04001635 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
Martin K. Petersenaae7df52014-09-26 19:20:05 -04001636 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
Martin K. Petersen8288f492014-09-26 19:20:02 -04001637};
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001638
Martin K. Petersen18593082014-09-26 19:20:01 -04001639struct blk_integrity_iter {
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001640 void *prot_buf;
1641 void *data_buf;
Martin K. Petersen3be91c42014-09-26 19:19:59 -04001642 sector_t seed;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001643 unsigned int data_size;
Martin K. Petersen3be91c42014-09-26 19:19:59 -04001644 unsigned short interval;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001645 const char *disk_name;
1646};
1647
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001648typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
Max Gurtovoy54d4e6a2019-09-16 18:44:29 +03001649typedef void (integrity_prepare_fn) (struct request *);
1650typedef void (integrity_complete_fn) (struct request *, unsigned int);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001651
Martin K. Petersen0f8087e2015-10-21 13:19:33 -04001652struct blk_integrity_profile {
1653 integrity_processing_fn *generate_fn;
1654 integrity_processing_fn *verify_fn;
Max Gurtovoy54d4e6a2019-09-16 18:44:29 +03001655 integrity_prepare_fn *prepare_fn;
1656 integrity_complete_fn *complete_fn;
Martin K. Petersen0f8087e2015-10-21 13:19:33 -04001657 const char *name;
1658};
1659
Martin K. Petersen25520d52015-10-21 13:19:49 -04001660extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001661extern void blk_integrity_unregister(struct gendisk *);
Martin K. Petersenad7fce92008-10-01 03:38:39 -04001662extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
Martin K. Petersen13f05c82010-09-10 20:50:10 +02001663extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1664 struct scatterlist *);
1665extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001666extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1667 struct request *);
1668extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1669 struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001670
Martin K. Petersen25520d52015-10-21 13:19:49 -04001671static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1672{
Dan Williamsac6fc482015-10-21 13:20:18 -04001673 struct blk_integrity *bi = &disk->queue->integrity;
Martin K. Petersen25520d52015-10-21 13:19:49 -04001674
1675 if (!bi->profile)
1676 return NULL;
1677
1678 return bi;
1679}
1680
Jens Axboeb04accc2008-10-02 12:53:22 +02001681static inline
1682struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1683{
Martin K. Petersen25520d52015-10-21 13:19:49 -04001684 return blk_get_integrity(bdev->bd_disk);
Martin K. Petersenb02739b2008-10-02 18:47:49 +02001685}
1686
Satya Tangiralad145dc22020-05-14 00:37:19 +00001687static inline bool
1688blk_integrity_queue_supports_integrity(struct request_queue *q)
1689{
1690 return q->integrity.profile;
1691}
1692
Martin K. Petersen180b2f92014-09-26 19:19:56 -04001693static inline bool blk_integrity_rq(struct request *rq)
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001694{
Martin K. Petersen180b2f92014-09-26 19:19:56 -04001695 return rq->cmd_flags & REQ_INTEGRITY;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001696}
1697
Martin K. Petersen13f05c82010-09-10 20:50:10 +02001698static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1699 unsigned int segs)
1700{
1701 q->limits.max_integrity_segments = segs;
1702}
1703
1704static inline unsigned short
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001705queue_max_integrity_segments(const struct request_queue *q)
Martin K. Petersen13f05c82010-09-10 20:50:10 +02001706{
1707 return q->limits.max_integrity_segments;
1708}
1709
Greg Edwards359f6422018-07-25 10:22:58 -04001710/**
1711 * bio_integrity_intervals - Return number of integrity intervals for a bio
1712 * @bi: blk_integrity profile for device
1713 * @sectors: Size of the bio in 512-byte sectors
1714 *
1715 * Description: The block layer calculates everything in 512 byte
1716 * sectors but integrity metadata is done in terms of the data integrity
1717 * interval size of the storage device. Convert the block layer sectors
1718 * to the appropriate number of integrity intervals.
1719 */
1720static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1721 unsigned int sectors)
1722{
1723 return sectors >> (bi->interval_exp - 9);
1724}
1725
1726static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1727 unsigned int sectors)
1728{
1729 return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
1730}
1731
Christoph Hellwig2a876f52019-03-03 08:38:29 -07001732/*
1733 * Return the first bvec that contains integrity data. Only drivers that are
1734 * limited to a single integrity segment should use this helper.
1735 */
1736static inline struct bio_vec *rq_integrity_vec(struct request *rq)
1737{
1738 if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
1739 return NULL;
1740 return rq->bio->bi_integrity->bip_vec;
1741}
1742
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001743#else /* CONFIG_BLK_DEV_INTEGRITY */
1744
Stephen Rothwellfd832402012-01-12 09:17:30 +01001745struct bio;
1746struct block_device;
1747struct gendisk;
1748struct blk_integrity;
1749
1750static inline int blk_integrity_rq(struct request *rq)
1751{
1752 return 0;
1753}
1754static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1755 struct bio *b)
1756{
1757 return 0;
1758}
1759static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1760 struct bio *b,
1761 struct scatterlist *s)
1762{
1763 return 0;
1764}
1765static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1766{
Michele Curti61a04e52014-10-09 15:30:17 -07001767 return NULL;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001768}
1769static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1770{
1771 return NULL;
1772}
Satya Tangiralad145dc22020-05-14 00:37:19 +00001773static inline bool
1774blk_integrity_queue_supports_integrity(struct request_queue *q)
1775{
1776 return false;
1777}
Stephen Rothwellfd832402012-01-12 09:17:30 +01001778static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1779{
1780 return 0;
1781}
Martin K. Petersen25520d52015-10-21 13:19:49 -04001782static inline void blk_integrity_register(struct gendisk *d,
Stephen Rothwellfd832402012-01-12 09:17:30 +01001783 struct blk_integrity *b)
1784{
Stephen Rothwellfd832402012-01-12 09:17:30 +01001785}
1786static inline void blk_integrity_unregister(struct gendisk *d)
1787{
1788}
1789static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1790 unsigned int segs)
1791{
1792}
Bart Van Asscheaf2c68f2019-08-01 15:50:40 -07001793static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
Stephen Rothwellfd832402012-01-12 09:17:30 +01001794{
1795 return 0;
1796}
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001797static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1798 struct request *r1,
1799 struct request *r2)
Stephen Rothwellfd832402012-01-12 09:17:30 +01001800{
Martin K. Petersencb1a5ab2014-10-28 20:27:43 -06001801 return true;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001802}
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001803static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1804 struct request *r,
1805 struct bio *b)
Stephen Rothwellfd832402012-01-12 09:17:30 +01001806{
Martin K. Petersencb1a5ab2014-10-28 20:27:43 -06001807 return true;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001808}
Martin K. Petersen25520d52015-10-21 13:19:49 -04001809
Greg Edwards359f6422018-07-25 10:22:58 -04001810static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1811 unsigned int sectors)
1812{
1813 return 0;
1814}
1815
1816static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1817 unsigned int sectors)
1818{
1819 return 0;
1820}
1821
Christoph Hellwig2a876f52019-03-03 08:38:29 -07001822static inline struct bio_vec *rq_integrity_vec(struct request *rq)
1823{
1824 return NULL;
1825}
1826
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001827#endif /* CONFIG_BLK_DEV_INTEGRITY */
1828
Satya Tangiralad145dc22020-05-14 00:37:19 +00001829#ifdef CONFIG_BLK_INLINE_ENCRYPTION
1830
1831bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
1832
1833void blk_ksm_unregister(struct request_queue *q);
1834
1835#else /* CONFIG_BLK_INLINE_ENCRYPTION */
1836
1837static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
1838 struct request_queue *q)
1839{
1840 return true;
1841}
1842
1843static inline void blk_ksm_unregister(struct request_queue *q) { }
1844
1845#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1846
1847
Al Viro08f85852007-10-08 13:26:20 -04001848struct block_device_operations {
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02001849 blk_qc_t (*submit_bio) (struct bio *bio);
Al Virod4430d62008-03-02 09:09:22 -05001850 int (*open) (struct block_device *, fmode_t);
Al Virodb2a1442013-05-05 21:52:57 -04001851 void (*release) (struct gendisk *, fmode_t);
Tejun Heo3f289dc2018-07-18 04:47:36 -07001852 int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
Al Virod4430d62008-03-02 09:09:22 -05001853 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1854 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
Tejun Heo77ea8872010-12-08 20:57:37 +01001855 unsigned int (*check_events) (struct gendisk *disk,
1856 unsigned int clearing);
Tejun Heoc3e33e02010-05-15 20:09:29 +02001857 void (*unlock_native_capacity) (struct gendisk *);
Al Viro08f85852007-10-08 13:26:20 -04001858 int (*revalidate_disk) (struct gendisk *);
1859 int (*getgeo)(struct block_device *, struct hd_geometry *);
Nitin Guptab3a27d02010-05-17 11:02:43 +05301860 /* this callback is with swap_lock and sometimes page table lock held */
1861 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
Christoph Hellwige76239a2018-10-12 19:08:49 +09001862 int (*report_zones)(struct gendisk *, sector_t sector,
Christoph Hellwigd4100352019-11-11 11:39:30 +09001863 unsigned int nr_zones, report_zones_cb cb, void *data);
Christoph Hellwig348e1142020-03-27 09:07:17 +01001864 char *(*devnode)(struct gendisk *disk, umode_t *mode);
Al Viro08f85852007-10-08 13:26:20 -04001865 struct module *owner;
Christoph Hellwigbbd3e062015-10-15 14:10:48 +02001866 const struct pr_ops *pr_ops;
Al Viro08f85852007-10-08 13:26:20 -04001867};
1868
Arnd Bergmannee6a1292019-11-28 15:48:10 +01001869#ifdef CONFIG_COMPAT
1870extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
1871 unsigned int, unsigned long);
1872#else
1873#define blkdev_compat_ptr_ioctl NULL
1874#endif
1875
Al Viro633a08b2007-08-29 20:34:12 -04001876extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1877 unsigned long);
Matthew Wilcox47a191f2014-06-04 16:07:46 -07001878extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1879extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1880 struct writeback_control *);
Christoph Hellwig6cc77e92017-12-21 15:43:38 +09001881
1882#ifdef CONFIG_BLK_DEV_ZONED
1883bool blk_req_needs_zone_write_lock(struct request *rq);
Johannes Thumshirn1392d372020-05-12 17:55:48 +09001884bool blk_req_zone_write_trylock(struct request *rq);
Christoph Hellwig6cc77e92017-12-21 15:43:38 +09001885void __blk_req_zone_write_lock(struct request *rq);
1886void __blk_req_zone_write_unlock(struct request *rq);
1887
1888static inline void blk_req_zone_write_lock(struct request *rq)
1889{
1890 if (blk_req_needs_zone_write_lock(rq))
1891 __blk_req_zone_write_lock(rq);
1892}
1893
1894static inline void blk_req_zone_write_unlock(struct request *rq)
1895{
1896 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1897 __blk_req_zone_write_unlock(rq);
1898}
1899
1900static inline bool blk_req_zone_is_write_locked(struct request *rq)
1901{
1902 return rq->q->seq_zones_wlock &&
1903 test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
1904}
1905
1906static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1907{
1908 if (!blk_req_needs_zone_write_lock(rq))
1909 return true;
1910 return !blk_req_zone_is_write_locked(rq);
1911}
1912#else
1913static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1914{
1915 return false;
1916}
1917
1918static inline void blk_req_zone_write_lock(struct request *rq)
1919{
1920}
1921
1922static inline void blk_req_zone_write_unlock(struct request *rq)
1923{
1924}
1925static inline bool blk_req_zone_is_write_locked(struct request *rq)
1926{
1927 return false;
1928}
1929
1930static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1931{
1932 return true;
1933}
1934#endif /* CONFIG_BLK_DEV_ZONED */
1935
Jens Axboe06193172018-11-13 21:16:54 -07001936static inline void blk_wake_io_task(struct task_struct *waiter)
1937{
1938 /*
1939 * If we're polling, the task itself is doing the completions. For
1940 * that case, we don't need to signal a wakeup, it's enough to just
1941 * mark us as RUNNING.
1942 */
1943 if (waiter == current)
1944 __set_current_state(TASK_RUNNING);
1945 else
1946 wake_up_process(waiter);
1947}
1948
Christoph Hellwig956d5102020-05-27 07:24:04 +02001949unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1950 unsigned int op);
1951void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1952 unsigned long start_time);
1953
Song Liu7b264102020-08-31 15:27:23 -07001954unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
1955 struct bio *bio);
1956void part_end_io_acct(struct hd_struct *part, struct bio *bio,
1957 unsigned long start_time);
1958
Christoph Hellwig956d5102020-05-27 07:24:04 +02001959/**
1960 * bio_start_io_acct - start I/O accounting for bio based drivers
1961 * @bio: bio to start account for
1962 *
1963 * Returns the start time that should be passed back to bio_end_io_acct().
1964 */
1965static inline unsigned long bio_start_io_acct(struct bio *bio)
1966{
1967 return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio));
1968}
1969
1970/**
1971 * bio_end_io_acct - end I/O accounting for bio based drivers
1972 * @bio: bio to end account for
1973 * @start: start time returned by bio_start_io_acct()
1974 */
1975static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1976{
1977 return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time);
1978}
Christoph Hellwig956d5102020-05-27 07:24:04 +02001979
Christoph Hellwig3f1266f2020-06-20 09:16:41 +02001980int bdev_read_only(struct block_device *bdev);
1981int set_blocksize(struct block_device *bdev, int size);
1982
1983const char *bdevname(struct block_device *bdev, char *buffer);
1984struct block_device *lookup_bdev(const char *);
1985
1986void blkdev_show(struct seq_file *seqf, off_t offset);
1987
1988#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1989#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
1990#ifdef CONFIG_BLOCK
1991#define BLKDEV_MAJOR_MAX 512
1992#else
1993#define BLKDEV_MAJOR_MAX 0
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994#endif
Christoph Hellwig3f1266f2020-06-20 09:16:41 +02001995
Christoph Hellwig3f1266f2020-06-20 09:16:41 +02001996struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1997 void *holder);
1998struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
Christoph Hellwigecbe6bc2020-07-16 16:33:09 +02001999int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
2000 void *holder);
Christoph Hellwig3f1266f2020-06-20 09:16:41 +02002001void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
2002 void *holder);
2003void blkdev_put(struct block_device *bdev, fmode_t mode);
2004
Christoph Hellwig621c1f42020-06-20 09:16:44 +02002005struct block_device *I_BDEV(struct inode *inode);
Christoph Hellwig3f1266f2020-06-20 09:16:41 +02002006struct block_device *bdget(dev_t);
2007struct block_device *bdgrab(struct block_device *bdev);
2008void bdput(struct block_device *);
2009
2010#ifdef CONFIG_BLOCK
2011void invalidate_bdev(struct block_device *bdev);
Jan Kara384d87e2020-09-04 10:58:52 +02002012int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
2013 loff_t lend);
Christoph Hellwig3f1266f2020-06-20 09:16:41 +02002014int sync_blockdev(struct block_device *bdev);
2015#else
2016static inline void invalidate_bdev(struct block_device *bdev)
2017{
2018}
Jan Kara384d87e2020-09-04 10:58:52 +02002019static inline int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
2020 loff_t lstart, loff_t lend)
2021{
2022 return 0;
2023}
Christoph Hellwig3f1266f2020-06-20 09:16:41 +02002024static inline int sync_blockdev(struct block_device *bdev)
2025{
2026 return 0;
2027}
2028#endif
2029int fsync_bdev(struct block_device *bdev);
2030
2031struct super_block *freeze_bdev(struct block_device *bdev);
2032int thaw_bdev(struct block_device *bdev, struct super_block *sb);
2033
2034#endif /* _LINUX_BLKDEV_H */