Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_BLKDEV_H |
| 3 | #define _LINUX_BLKDEV_H |
| 4 | |
Russell King | 85fd0bc | 2012-05-14 08:29:23 +0200 | [diff] [blame] | 5 | #include <linux/sched.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 6 | #include <linux/sched/clock.h> |
Russell King | 85fd0bc | 2012-05-14 08:29:23 +0200 | [diff] [blame] | 7 | |
Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 8 | #ifdef CONFIG_BLOCK |
| 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/major.h> |
| 11 | #include <linux/genhd.h> |
| 12 | #include <linux/list.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 13 | #include <linux/llist.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/timer.h> |
| 15 | #include <linux/workqueue.h> |
| 16 | #include <linux/pagemap.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 17 | #include <linux/backing-dev-defs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/wait.h> |
| 19 | #include <linux/mempool.h> |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 20 | #include <linux/pfn.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/bio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/stringify.h> |
Hugh Dickins | 3e6053d | 2008-09-11 10:57:55 +0200 | [diff] [blame] | 23 | #include <linux/gfp.h> |
FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 24 | #include <linux/bsg.h> |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 25 | #include <linux/smp.h> |
Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 26 | #include <linux/rcupdate.h> |
Tejun Heo | add703f | 2014-07-01 10:34:38 -0600 | [diff] [blame] | 27 | #include <linux/percpu-refcount.h> |
Christoph Hellwig | 84be456 | 2015-05-01 12:46:15 +0200 | [diff] [blame] | 28 | #include <linux/scatterlist.h> |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 29 | #include <linux/blkzoned.h> |
Tejun Heo | 1d9bd51 | 2018-01-09 08:29:48 -0800 | [diff] [blame] | 30 | #include <linux/seqlock.h> |
| 31 | #include <linux/u64_stats_sync.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Paul Gortmaker | de47725 | 2011-05-26 13:46:22 -0400 | [diff] [blame] | 33 | struct module; |
Christoph Hellwig | 21b2f0c | 2006-03-22 17:52:04 +0100 | [diff] [blame] | 34 | struct scsi_ioctl_command; |
| 35 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | struct request_queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | struct elevator_queue; |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 38 | struct blk_trace; |
Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 39 | struct request; |
| 40 | struct sg_io_hdr; |
Mike Christie | aa387cc | 2011-07-31 22:05:09 +0200 | [diff] [blame] | 41 | struct bsg_job; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 42 | struct blkcg_gq; |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 43 | struct blk_flush_queue; |
Christoph Hellwig | bbd3e06 | 2015-10-15 14:10:48 +0200 | [diff] [blame] | 44 | struct pr_ops; |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 45 | struct rq_wb; |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 46 | struct blk_queue_stats; |
| 47 | struct blk_stat_callback; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
| 49 | #define BLKDEV_MIN_RQ 4 |
| 50 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
| 51 | |
Minwoo Im | 096392e | 2018-02-15 23:53:17 +0900 | [diff] [blame] | 52 | /* Must be consistent with blk_mq_poll_stats_bkt() */ |
Stephen Bates | 0206319 | 2017-04-20 16:59:11 -0600 | [diff] [blame] | 53 | #define BLK_MQ_POLL_STATS_BKTS 16 |
| 54 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 55 | /* |
| 56 | * Maximum number of blkcg policies allowed to be registered concurrently. |
| 57 | * Defined here to simplify include dependency. |
| 58 | */ |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 59 | #define BLKCG_MAX_POLS 3 |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 60 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 61 | typedef void (rq_end_io_fn)(struct request *, blk_status_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 63 | #define BLK_RL_SYNCFULL (1U << 0) |
| 64 | #define BLK_RL_ASYNCFULL (1U << 1) |
| 65 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | struct request_list { |
Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 67 | struct request_queue *q; /* the queue this rl belongs to */ |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 68 | #ifdef CONFIG_BLK_CGROUP |
| 69 | struct blkcg_gq *blkg; /* blkg this request pool belongs to */ |
| 70 | #endif |
Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 71 | /* |
| 72 | * count[], starved[], and wait[] are indexed by |
| 73 | * BLK_RW_SYNC/BLK_RW_ASYNC |
| 74 | */ |
Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 75 | int count[2]; |
| 76 | int starved[2]; |
| 77 | mempool_t *rq_pool; |
| 78 | wait_queue_head_t wait[2]; |
Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 79 | unsigned int flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | }; |
| 81 | |
Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 82 | /* |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 83 | * request flags */ |
| 84 | typedef __u32 __bitwise req_flags_t; |
| 85 | |
| 86 | /* elevator knows about this request */ |
| 87 | #define RQF_SORTED ((__force req_flags_t)(1 << 0)) |
| 88 | /* drive already may have started this one */ |
| 89 | #define RQF_STARTED ((__force req_flags_t)(1 << 1)) |
| 90 | /* uses tagged queueing */ |
| 91 | #define RQF_QUEUED ((__force req_flags_t)(1 << 2)) |
| 92 | /* may not be passed by ioscheduler */ |
| 93 | #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) |
| 94 | /* request for flush sequence */ |
| 95 | #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) |
| 96 | /* merge of different types, fail separately */ |
| 97 | #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) |
| 98 | /* track inflight for MQ */ |
| 99 | #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) |
| 100 | /* don't call prep for this one */ |
| 101 | #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) |
| 102 | /* set for "ide_preempt" requests and also for requests for which the SCSI |
| 103 | "quiesce" state must be ignored. */ |
| 104 | #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) |
| 105 | /* contains copies of user pages */ |
| 106 | #define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) |
| 107 | /* vaguely specified driver internal error. Ignored by the block layer */ |
| 108 | #define RQF_FAILED ((__force req_flags_t)(1 << 10)) |
| 109 | /* don't warn about errors */ |
| 110 | #define RQF_QUIET ((__force req_flags_t)(1 << 11)) |
| 111 | /* elevator private data attached */ |
| 112 | #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) |
| 113 | /* account I/O stat */ |
| 114 | #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) |
| 115 | /* request came from our alloc pool */ |
| 116 | #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) |
| 117 | /* runtime pm request */ |
| 118 | #define RQF_PM ((__force req_flags_t)(1 << 15)) |
| 119 | /* on IO scheduler merge hash */ |
| 120 | #define RQF_HASHED ((__force req_flags_t)(1 << 16)) |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 121 | /* IO stats tracking on */ |
| 122 | #define RQF_STATS ((__force req_flags_t)(1 << 17)) |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 123 | /* Look at ->special_vec for the actual data payload instead of the |
| 124 | bio chain. */ |
| 125 | #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 126 | /* The per-zone write lock is held for this request */ |
| 127 | #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) |
Tejun Heo | 634f9e4 | 2018-01-09 08:29:51 -0800 | [diff] [blame] | 128 | /* timeout is expired */ |
| 129 | #define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20)) |
Jens Axboe | 76a86f9 | 2018-01-10 11:30:56 -0700 | [diff] [blame] | 130 | /* already slept for hybrid poll */ |
| 131 | #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21)) |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 132 | |
| 133 | /* flags that prevent us from merging requests: */ |
| 134 | #define RQF_NOMERGE_FLAGS \ |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 135 | (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 136 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | /* |
Christoph Hellwig | af76e55 | 2014-05-06 12:12:45 +0200 | [diff] [blame] | 138 | * Try to put the fields that are referenced together in the same cacheline. |
| 139 | * |
| 140 | * If you modify this structure, make sure to update blk_rq_init() and |
| 141 | * especially blk_mq_rq_ctx_init() to take care of the added fields. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | */ |
| 143 | struct request { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 144 | struct request_queue *q; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 145 | struct blk_mq_ctx *mq_ctx; |
Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 146 | |
Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 147 | int cpu; |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 148 | unsigned int cmd_flags; /* op and common flags */ |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 149 | req_flags_t rq_flags; |
Jens Axboe | d486f1f | 2017-01-31 12:34:41 -0700 | [diff] [blame] | 150 | |
| 151 | int internal_tag; |
| 152 | |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 153 | /* the following two fields are internal, NEVER access directly */ |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 154 | unsigned int __data_len; /* total data len */ |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 155 | int tag; |
Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 156 | sector_t __sector; /* sector cursor */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | |
| 158 | struct bio *bio; |
| 159 | struct bio *biotail; |
| 160 | |
Jens Axboe | 7c3fb70 | 2018-01-10 11:46:39 -0700 | [diff] [blame] | 161 | struct list_head queuelist; |
| 162 | |
Jens Axboe | 360f92c | 2014-04-09 20:27:01 -0600 | [diff] [blame] | 163 | /* |
| 164 | * The hash is used inside the scheduler, and killed once the |
| 165 | * request reaches the dispatch list. The ipi_list is only used |
| 166 | * to queue the request for softirq completion, which is long |
| 167 | * after the request has been unhashed (and even removed from |
| 168 | * the dispatch list). |
| 169 | */ |
| 170 | union { |
| 171 | struct hlist_node hash; /* merge hash */ |
| 172 | struct list_head ipi_list; |
| 173 | }; |
| 174 | |
Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 175 | /* |
| 176 | * The rb_node is only used inside the io scheduler, requests |
| 177 | * are pruned when moved to the dispatch queue. So let the |
Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 178 | * completion_data share space with the rb_node. |
Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 179 | */ |
| 180 | union { |
| 181 | struct rb_node rb_node; /* sort/lookup */ |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 182 | struct bio_vec special_vec; |
Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 183 | void *completion_data; |
Christoph Hellwig | e26738e | 2017-04-20 16:03:11 +0200 | [diff] [blame] | 184 | int error_count; /* for legacy drivers, don't use */ |
Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 185 | }; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 186 | |
Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 187 | /* |
Vivek Goyal | 7f1dc8a | 2010-04-21 17:44:16 +0200 | [diff] [blame] | 188 | * Three pointers are available for the IO schedulers, if they need |
Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 189 | * more they have to dynamically allocate it. Flush requests are |
| 190 | * never put on the IO scheduler. So let the flush fields share |
Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 191 | * space with the elevator data. |
Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 192 | */ |
Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 193 | union { |
Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 194 | struct { |
| 195 | struct io_cq *icq; |
| 196 | void *priv[2]; |
| 197 | } elv; |
| 198 | |
Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 199 | struct { |
| 200 | unsigned int seq; |
| 201 | struct list_head list; |
Jeff Moyer | 4853aba | 2011-08-15 21:37:25 +0200 | [diff] [blame] | 202 | rq_end_io_fn *saved_end_io; |
Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 203 | } flush; |
| 204 | }; |
Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 205 | |
Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 206 | struct gendisk *rq_disk; |
Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 207 | struct hd_struct *part; |
Omar Sandoval | 522a777 | 2018-05-09 02:08:53 -0700 | [diff] [blame^] | 208 | /* Time that I/O was submitted to the kernel. */ |
| 209 | u64 start_time_ns; |
Omar Sandoval | 544ccc8d | 2018-05-09 02:08:50 -0700 | [diff] [blame] | 210 | /* Time that I/O was submitted to the device. */ |
| 211 | u64 io_start_time_ns; |
| 212 | |
| 213 | #ifdef CONFIG_BLK_WBT |
| 214 | unsigned short wbt_flags; |
| 215 | #endif |
| 216 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
| 217 | unsigned short throtl_size; |
| 218 | #endif |
| 219 | |
| 220 | /* |
| 221 | * Number of scatter-gather DMA addr+len pairs after |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | * physical address coalescing is performed. |
| 223 | */ |
| 224 | unsigned short nr_phys_segments; |
Jens Axboe | 7c3fb70 | 2018-01-10 11:46:39 -0700 | [diff] [blame] | 225 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 226 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 227 | unsigned short nr_integrity_segments; |
| 228 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | |
Jens Axboe | 7c3fb70 | 2018-01-10 11:46:39 -0700 | [diff] [blame] | 230 | unsigned short write_hint; |
Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 231 | unsigned short ioprio; |
| 232 | |
Jens Axboe | 1dd5198 | 2017-04-05 12:16:38 -0600 | [diff] [blame] | 233 | unsigned int timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | void *special; /* opaque pointer available for LLD use */ |
Jens Axboe | cdd6026 | 2006-07-28 09:32:07 +0200 | [diff] [blame] | 236 | |
FUJITA Tomonori | 7a85f88 | 2008-03-04 11:17:11 +0100 | [diff] [blame] | 237 | unsigned int extra_len; /* length of alignment and padding */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | |
Tejun Heo | 1d9bd51 | 2018-01-09 08:29:48 -0800 | [diff] [blame] | 239 | /* |
| 240 | * On blk-mq, the lower bits of ->gstate (generation number and |
| 241 | * state) carry the MQ_RQ_* state value and the upper bits the |
| 242 | * generation number which is monotonically incremented and used to |
| 243 | * distinguish the reuse instances. |
| 244 | * |
| 245 | * ->gstate_seq allows updates to ->gstate and other fields |
| 246 | * (currently ->deadline) during request start to be read |
| 247 | * atomically from the timeout path, so that it can operate on a |
| 248 | * coherent set of information. |
| 249 | */ |
| 250 | seqcount_t gstate_seq; |
| 251 | u64 gstate; |
Jens Axboe | cb6934f | 2017-06-27 09:22:02 -0600 | [diff] [blame] | 252 | |
Tejun Heo | 1d9bd51 | 2018-01-09 08:29:48 -0800 | [diff] [blame] | 253 | /* |
| 254 | * ->aborted_gstate is used by the timeout to claim a specific |
| 255 | * recycle instance of this request. See blk_mq_timeout_work(). |
| 256 | */ |
| 257 | struct u64_stats_sync aborted_gstate_sync; |
| 258 | u64 aborted_gstate; |
| 259 | |
Jens Axboe | 0a72e7f | 2018-01-09 14:23:42 -0700 | [diff] [blame] | 260 | /* access through blk_rq_set_deadline, blk_rq_deadline */ |
| 261 | unsigned long __deadline; |
| 262 | |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 263 | struct list_head timeout_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
Jens Axboe | 7c3fb70 | 2018-01-10 11:46:39 -0700 | [diff] [blame] | 265 | union { |
Linus Torvalds | 0a4b6e2 | 2018-01-29 11:51:49 -0800 | [diff] [blame] | 266 | struct __call_single_data csd; |
Jens Axboe | 7c3fb70 | 2018-01-10 11:46:39 -0700 | [diff] [blame] | 267 | u64 fifo_time; |
| 268 | }; |
| 269 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | /* |
Jens Axboe | c00895a | 2006-09-30 20:29:12 +0200 | [diff] [blame] | 271 | * completion callback. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | */ |
| 273 | rq_end_io_fn *end_io; |
| 274 | void *end_io_data; |
FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 275 | |
| 276 | /* for bidi */ |
| 277 | struct request *next_rq; |
Jens Axboe | 7c3fb70 | 2018-01-10 11:46:39 -0700 | [diff] [blame] | 278 | |
| 279 | #ifdef CONFIG_BLK_CGROUP |
| 280 | struct request_list *rl; /* rl this rq is alloced from */ |
Jens Axboe | 7c3fb70 | 2018-01-10 11:46:39 -0700 | [diff] [blame] | 281 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | }; |
| 283 | |
Ming Lei | 14cb0dc | 2017-12-18 15:40:43 +0800 | [diff] [blame] | 284 | static inline bool blk_op_is_scsi(unsigned int op) |
| 285 | { |
| 286 | return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; |
| 287 | } |
| 288 | |
| 289 | static inline bool blk_op_is_private(unsigned int op) |
| 290 | { |
| 291 | return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; |
| 292 | } |
| 293 | |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 294 | static inline bool blk_rq_is_scsi(struct request *rq) |
| 295 | { |
Ming Lei | 14cb0dc | 2017-12-18 15:40:43 +0800 | [diff] [blame] | 296 | return blk_op_is_scsi(req_op(rq)); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | static inline bool blk_rq_is_private(struct request *rq) |
| 300 | { |
Ming Lei | 14cb0dc | 2017-12-18 15:40:43 +0800 | [diff] [blame] | 301 | return blk_op_is_private(req_op(rq)); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 302 | } |
| 303 | |
Christoph Hellwig | 57292b5 | 2017-01-31 16:57:29 +0100 | [diff] [blame] | 304 | static inline bool blk_rq_is_passthrough(struct request *rq) |
| 305 | { |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 306 | return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); |
Christoph Hellwig | 57292b5 | 2017-01-31 16:57:29 +0100 | [diff] [blame] | 307 | } |
| 308 | |
Ming Lei | 14cb0dc | 2017-12-18 15:40:43 +0800 | [diff] [blame] | 309 | static inline bool bio_is_passthrough(struct bio *bio) |
| 310 | { |
| 311 | unsigned op = bio_op(bio); |
| 312 | |
| 313 | return blk_op_is_scsi(op) || blk_op_is_private(op); |
| 314 | } |
| 315 | |
Fernando Luis Vázquez Cao | 766ca44 | 2008-08-14 09:59:13 +0200 | [diff] [blame] | 316 | static inline unsigned short req_get_ioprio(struct request *req) |
| 317 | { |
| 318 | return req->ioprio; |
| 319 | } |
| 320 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | #include <linux/elevator.h> |
| 322 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 323 | struct blk_queue_ctx; |
| 324 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 325 | typedef void (request_fn_proc) (struct request_queue *q); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 326 | typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); |
Christoph Hellwig | ea435e1 | 2017-11-02 21:29:54 +0300 | [diff] [blame] | 327 | typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 328 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 329 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | |
| 331 | struct bio_vec; |
Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 332 | typedef void (softirq_done_fn)(struct request *); |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 333 | typedef int (dma_drain_needed_fn)(struct request *); |
Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 334 | typedef int (lld_busy_fn) (struct request_queue *q); |
Mike Christie | aa387cc | 2011-07-31 22:05:09 +0200 | [diff] [blame] | 335 | typedef int (bsg_job_fn) (struct bsg_job *); |
Christoph Hellwig | 6d247d7 | 2017-01-27 09:51:45 -0700 | [diff] [blame] | 336 | typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t); |
| 337 | typedef void (exit_rq_fn)(struct request_queue *, struct request *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 339 | enum blk_eh_timer_return { |
| 340 | BLK_EH_NOT_HANDLED, |
| 341 | BLK_EH_HANDLED, |
| 342 | BLK_EH_RESET_TIMER, |
| 343 | }; |
| 344 | |
| 345 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); |
| 346 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | enum blk_queue_state { |
| 348 | Queue_down, |
| 349 | Queue_up, |
| 350 | }; |
| 351 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | struct blk_queue_tag { |
| 353 | struct request **tag_index; /* map of busy tags */ |
| 354 | unsigned long *tag_map; /* bit map of free/busy tags */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | int max_depth; /* what we will send to device */ |
Tejun Heo | ba02508 | 2005-08-05 13:28:11 -0700 | [diff] [blame] | 356 | int real_max_depth; /* what the array can hold */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | atomic_t refcnt; /* map can be shared */ |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 358 | int alloc_policy; /* tag allocation policy */ |
| 359 | int next_tag; /* next tag */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | }; |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 361 | #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ |
| 362 | #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | |
FUJITA Tomonori | abf5439 | 2008-08-16 14:10:05 +0900 | [diff] [blame] | 364 | #define BLK_SCSI_MAX_CMDS (256) |
| 365 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
| 366 | |
Damien Le Moal | 797476b | 2016-10-18 15:40:29 +0900 | [diff] [blame] | 367 | /* |
| 368 | * Zoned block device models (zoned limit). |
| 369 | */ |
| 370 | enum blk_zoned_model { |
| 371 | BLK_ZONED_NONE, /* Regular block device */ |
| 372 | BLK_ZONED_HA, /* Host-aware zoned block device */ |
| 373 | BLK_ZONED_HM, /* Host-managed zoned block device */ |
| 374 | }; |
| 375 | |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 376 | struct queue_limits { |
| 377 | unsigned long bounce_pfn; |
| 378 | unsigned long seg_boundary_mask; |
Keith Busch | 03100aa | 2015-08-19 14:24:05 -0700 | [diff] [blame] | 379 | unsigned long virt_boundary_mask; |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 380 | |
| 381 | unsigned int max_hw_sectors; |
Martin K. Petersen | ca369d5 | 2015-11-13 16:46:48 -0500 | [diff] [blame] | 382 | unsigned int max_dev_sectors; |
Jens Axboe | 762380a | 2014-06-05 13:38:39 -0600 | [diff] [blame] | 383 | unsigned int chunk_sectors; |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 384 | unsigned int max_sectors; |
| 385 | unsigned int max_segment_size; |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 386 | unsigned int physical_block_size; |
| 387 | unsigned int alignment_offset; |
| 388 | unsigned int io_min; |
| 389 | unsigned int io_opt; |
Christoph Hellwig | 67efc92 | 2009-09-30 13:54:20 +0200 | [diff] [blame] | 390 | unsigned int max_discard_sectors; |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 391 | unsigned int max_hw_discard_sectors; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 392 | unsigned int max_write_same_sectors; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 393 | unsigned int max_write_zeroes_sectors; |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 394 | unsigned int discard_granularity; |
| 395 | unsigned int discard_alignment; |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 396 | |
| 397 | unsigned short logical_block_size; |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 398 | unsigned short max_segments; |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 399 | unsigned short max_integrity_segments; |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 400 | unsigned short max_discard_segments; |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 401 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 402 | unsigned char misaligned; |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 403 | unsigned char discard_misaligned; |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 404 | unsigned char cluster; |
Kent Overstreet | c78afc6 | 2013-07-11 22:39:53 -0700 | [diff] [blame] | 405 | unsigned char raid_partial_stripes_expensive; |
Damien Le Moal | 797476b | 2016-10-18 15:40:29 +0900 | [diff] [blame] | 406 | enum blk_zoned_model zoned; |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 407 | }; |
| 408 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 409 | #ifdef CONFIG_BLK_DEV_ZONED |
| 410 | |
| 411 | struct blk_zone_report_hdr { |
| 412 | unsigned int nr_zones; |
| 413 | u8 padding[60]; |
| 414 | }; |
| 415 | |
| 416 | extern int blkdev_report_zones(struct block_device *bdev, |
| 417 | sector_t sector, struct blk_zone *zones, |
| 418 | unsigned int *nr_zones, gfp_t gfp_mask); |
| 419 | extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, |
| 420 | sector_t nr_sectors, gfp_t gfp_mask); |
| 421 | |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 422 | extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, |
| 423 | unsigned int cmd, unsigned long arg); |
| 424 | extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, |
| 425 | unsigned int cmd, unsigned long arg); |
| 426 | |
| 427 | #else /* CONFIG_BLK_DEV_ZONED */ |
| 428 | |
| 429 | static inline int blkdev_report_zones_ioctl(struct block_device *bdev, |
| 430 | fmode_t mode, unsigned int cmd, |
| 431 | unsigned long arg) |
| 432 | { |
| 433 | return -ENOTTY; |
| 434 | } |
| 435 | |
| 436 | static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, |
| 437 | fmode_t mode, unsigned int cmd, |
| 438 | unsigned long arg) |
| 439 | { |
| 440 | return -ENOTTY; |
| 441 | } |
| 442 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 443 | #endif /* CONFIG_BLK_DEV_ZONED */ |
| 444 | |
Richard Kennedy | d7b7630 | 2011-07-13 21:17:23 +0200 | [diff] [blame] | 445 | struct request_queue { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | /* |
| 447 | * Together with queue_head for cacheline sharing |
| 448 | */ |
| 449 | struct list_head queue_head; |
| 450 | struct request *last_merge; |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 451 | struct elevator_queue *elevator; |
Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 452 | int nr_rqs[2]; /* # allocated [a]sync rqs */ |
| 453 | int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 455 | atomic_t shared_hctx_restart; |
| 456 | |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 457 | struct blk_queue_stats *stats; |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 458 | struct rq_wb *rq_wb; |
| 459 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | /* |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 461 | * If blkcg is not used, @q->root_rl serves all requests. If blkcg |
| 462 | * is used, root blkg allocates from @q->root_rl and all other |
| 463 | * blkgs from their own blkg->rl. Which one to use should be |
| 464 | * determined using bio_request_list(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | */ |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 466 | struct request_list root_rl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | |
| 468 | request_fn_proc *request_fn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | make_request_fn *make_request_fn; |
Christoph Hellwig | ea435e1 | 2017-11-02 21:29:54 +0300 | [diff] [blame] | 470 | poll_q_fn *poll_fn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | prep_rq_fn *prep_rq_fn; |
James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 472 | unprep_rq_fn *unprep_rq_fn; |
Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 473 | softirq_done_fn *softirq_done_fn; |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 474 | rq_timed_out_fn *rq_timed_out_fn; |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 475 | dma_drain_needed_fn *dma_drain_needed; |
Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 476 | lld_busy_fn *lld_busy_fn; |
Bart Van Assche | d280bab | 2017-06-20 11:15:40 -0700 | [diff] [blame] | 477 | /* Called just after a request is allocated */ |
Christoph Hellwig | 6d247d7 | 2017-01-27 09:51:45 -0700 | [diff] [blame] | 478 | init_rq_fn *init_rq_fn; |
Bart Van Assche | d280bab | 2017-06-20 11:15:40 -0700 | [diff] [blame] | 479 | /* Called just before a request is freed */ |
Christoph Hellwig | 6d247d7 | 2017-01-27 09:51:45 -0700 | [diff] [blame] | 480 | exit_rq_fn *exit_rq_fn; |
Bart Van Assche | d280bab | 2017-06-20 11:15:40 -0700 | [diff] [blame] | 481 | /* Called from inside blk_get_request() */ |
| 482 | void (*initialize_rq_fn)(struct request *rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | |
Jens Axboe | f8a5b12 | 2016-12-13 09:24:51 -0700 | [diff] [blame] | 484 | const struct blk_mq_ops *mq_ops; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 485 | |
| 486 | unsigned int *mq_map; |
| 487 | |
| 488 | /* sw queues */ |
Ming Lei | e6cdb09 | 2014-06-03 11:24:06 +0800 | [diff] [blame] | 489 | struct blk_mq_ctx __percpu *queue_ctx; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 490 | unsigned int nr_queues; |
| 491 | |
Jens Axboe | d278d4a | 2016-03-30 10:21:08 -0600 | [diff] [blame] | 492 | unsigned int queue_depth; |
| 493 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 494 | /* hw dispatch queues */ |
| 495 | struct blk_mq_hw_ctx **queue_hw_ctx; |
| 496 | unsigned int nr_hw_queues; |
| 497 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | /* |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 499 | * Dispatch queue sorting |
| 500 | */ |
Jens Axboe | 1b47f53 | 2005-10-20 16:37:00 +0200 | [diff] [blame] | 501 | sector_t end_sector; |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 502 | struct request *boundary_rq; |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 503 | |
| 504 | /* |
Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 505 | * Delayed queue handling |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | */ |
Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 507 | struct delayed_work delay_work; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | |
Jan Kara | dc3b17c | 2017-02-02 15:56:50 +0100 | [diff] [blame] | 509 | struct backing_dev_info *backing_dev_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | |
| 511 | /* |
| 512 | * The queue owner gets to use this for whatever they like. |
| 513 | * ll_rw_blk doesn't touch it. |
| 514 | */ |
| 515 | void *queuedata; |
| 516 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | * various queue flags, see QUEUE_* below |
| 519 | */ |
| 520 | unsigned long queue_flags; |
| 521 | |
| 522 | /* |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 523 | * ida allocated id for this queue. Used to index queues from |
| 524 | * ioctx. |
| 525 | */ |
| 526 | int id; |
| 527 | |
| 528 | /* |
Richard Kennedy | d7b7630 | 2011-07-13 21:17:23 +0200 | [diff] [blame] | 529 | * queue needs bounce pages for pages above this limit |
| 530 | */ |
| 531 | gfp_t bounce_gfp; |
| 532 | |
| 533 | /* |
| 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 534 | * protects queue structures from reentrancy. ->__queue_lock should |
| 535 | * _never_ be used directly, it is queue private. always use |
| 536 | * ->queue_lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | */ |
| 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 538 | spinlock_t __queue_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | spinlock_t *queue_lock; |
| 540 | |
| 541 | /* |
| 542 | * queue kobject |
| 543 | */ |
| 544 | struct kobject kobj; |
| 545 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 546 | /* |
| 547 | * mq queue kobject |
| 548 | */ |
| 549 | struct kobject mq_kobj; |
| 550 | |
Dan Williams | ac6fc48 | 2015-10-21 13:20:18 -0400 | [diff] [blame] | 551 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
| 552 | struct blk_integrity integrity; |
| 553 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 554 | |
Rafael J. Wysocki | 47fafbc | 2014-12-04 01:00:23 +0100 | [diff] [blame] | 555 | #ifdef CONFIG_PM |
Lin Ming | 6c95466 | 2013-03-23 11:42:26 +0800 | [diff] [blame] | 556 | struct device *dev; |
| 557 | int rpm_status; |
| 558 | unsigned int nr_pending; |
| 559 | #endif |
| 560 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | /* |
| 562 | * queue settings |
| 563 | */ |
| 564 | unsigned long nr_requests; /* Max # of requests */ |
| 565 | unsigned int nr_congestion_on; |
| 566 | unsigned int nr_congestion_off; |
| 567 | unsigned int nr_batching; |
| 568 | |
James Bottomley | fa0ccd8 | 2008-01-10 11:30:36 -0600 | [diff] [blame] | 569 | unsigned int dma_drain_size; |
Richard Kennedy | d7b7630 | 2011-07-13 21:17:23 +0200 | [diff] [blame] | 570 | void *dma_drain_buffer; |
Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 571 | unsigned int dma_pad_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | unsigned int dma_alignment; |
| 573 | |
| 574 | struct blk_queue_tag *queue_tags; |
Jens Axboe | 6eca900 | 2007-10-25 10:14:47 +0200 | [diff] [blame] | 575 | struct list_head tag_busy_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | |
Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 577 | unsigned int nr_sorted; |
Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 578 | unsigned int in_flight[2]; |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 579 | |
Bart Van Assche | 24faf6f | 2012-11-28 13:46:45 +0100 | [diff] [blame] | 580 | /* |
| 581 | * Number of active block driver functions for which blk_drain_queue() |
| 582 | * must wait. Must be incremented around functions that unlock the |
| 583 | * queue_lock internally, e.g. scsi_request_fn(). |
| 584 | */ |
| 585 | unsigned int request_fn_active; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 587 | unsigned int rq_timeout; |
Jens Axboe | 64f1c21 | 2016-11-14 13:03:03 -0700 | [diff] [blame] | 588 | int poll_nsec; |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 589 | |
| 590 | struct blk_stat_callback *poll_cb; |
Stephen Bates | 0206319 | 2017-04-20 16:59:11 -0600 | [diff] [blame] | 591 | struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 592 | |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 593 | struct timer_list timeout; |
Christoph Hellwig | 287922e | 2015-10-30 20:57:30 +0800 | [diff] [blame] | 594 | struct work_struct timeout_work; |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 595 | struct list_head timeout_list; |
| 596 | |
Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 597 | struct list_head icq_list; |
Tejun Heo | 4eef304 | 2012-03-05 13:15:18 -0800 | [diff] [blame] | 598 | #ifdef CONFIG_BLK_CGROUP |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 599 | DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 600 | struct blkcg_gq *root_blkg; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 601 | struct list_head blkg_list; |
Tejun Heo | 4eef304 | 2012-03-05 13:15:18 -0800 | [diff] [blame] | 602 | #endif |
Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 603 | |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 604 | struct queue_limits limits; |
| 605 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | /* |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 607 | * Zoned block device information for request dispatch control. |
| 608 | * nr_zones is the total number of zones of the device. This is always |
| 609 | * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones |
| 610 | * bits which indicates if a zone is conventional (bit clear) or |
| 611 | * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones |
| 612 | * bits which indicates if a zone is write locked, that is, if a write |
| 613 | * request targeting the zone was dispatched. All three fields are |
| 614 | * initialized by the low level device driver (e.g. scsi/sd.c). |
| 615 | * Stacking drivers (device mappers) may or may not initialize |
| 616 | * these fields. |
Bart Van Assche | ccce20f | 2018-04-16 18:04:41 -0700 | [diff] [blame] | 617 | * |
| 618 | * Reads of this information must be protected with blk_queue_enter() / |
| 619 | * blk_queue_exit(). Modifying this information is only allowed while |
| 620 | * no requests are being processed. See also blk_mq_freeze_queue() and |
| 621 | * blk_mq_unfreeze_queue(). |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 622 | */ |
| 623 | unsigned int nr_zones; |
| 624 | unsigned long *seq_zones_bitmap; |
| 625 | unsigned long *seq_zones_wlock; |
| 626 | |
| 627 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | * sg stuff |
| 629 | */ |
| 630 | unsigned int sg_timeout; |
| 631 | unsigned int sg_reserved_size; |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 632 | int node; |
Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 633 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 634 | struct blk_trace *blk_trace; |
Waiman Long | 5acb3cc | 2017-09-20 13:12:20 -0600 | [diff] [blame] | 635 | struct mutex blk_trace_mutex; |
Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 636 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | /* |
Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 638 | * for flush operations |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | */ |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 640 | struct blk_flush_queue *fq; |
Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 641 | |
Christoph Hellwig | 6fca6a6 | 2014-05-28 08:08:02 -0600 | [diff] [blame] | 642 | struct list_head requeue_list; |
| 643 | spinlock_t requeue_lock; |
Mike Snitzer | 2849450 | 2016-09-14 13:28:30 -0400 | [diff] [blame] | 644 | struct delayed_work requeue_work; |
Christoph Hellwig | 6fca6a6 | 2014-05-28 08:08:02 -0600 | [diff] [blame] | 645 | |
Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 646 | struct mutex sysfs_lock; |
FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 647 | |
Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 648 | int bypass_depth; |
Christoph Hellwig | 4ecd4fe | 2015-05-07 09:38:13 +0200 | [diff] [blame] | 649 | atomic_t mq_freeze_depth; |
Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 650 | |
FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 651 | #if defined(CONFIG_BLK_DEV_BSG) |
Mike Christie | aa387cc | 2011-07-31 22:05:09 +0200 | [diff] [blame] | 652 | bsg_job_fn *bsg_job_fn; |
FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 653 | struct bsg_class_device bsg_dev; |
| 654 | #endif |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 655 | |
| 656 | #ifdef CONFIG_BLK_DEV_THROTTLING |
| 657 | /* Throttle data */ |
| 658 | struct throtl_data *td; |
| 659 | #endif |
Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 660 | struct rcu_head rcu_head; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 661 | wait_queue_head_t mq_freeze_wq; |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 662 | struct percpu_ref q_usage_counter; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 663 | struct list_head all_q_node; |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 664 | |
| 665 | struct blk_mq_tag_set *tag_set; |
| 666 | struct list_head tag_set_list; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 667 | struct bio_set *bio_split; |
Akinobu Mita | 4593fdb | 2015-09-27 02:09:20 +0900 | [diff] [blame] | 668 | |
Omar Sandoval | 03796c1 | 2017-01-31 14:53:18 -0800 | [diff] [blame] | 669 | #ifdef CONFIG_BLK_DEBUG_FS |
Omar Sandoval | 07e4fea | 2017-01-25 08:06:40 -0800 | [diff] [blame] | 670 | struct dentry *debugfs_dir; |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 671 | struct dentry *sched_debugfs_dir; |
Omar Sandoval | 07e4fea | 2017-01-25 08:06:40 -0800 | [diff] [blame] | 672 | #endif |
| 673 | |
Akinobu Mita | 4593fdb | 2015-09-27 02:09:20 +0900 | [diff] [blame] | 674 | bool mq_sysfs_init_done; |
Christoph Hellwig | 6d247d7 | 2017-01-27 09:51:45 -0700 | [diff] [blame] | 675 | |
| 676 | size_t cmd_size; |
| 677 | void *rq_alloc_data; |
Bart Van Assche | dc9edc4 | 2017-06-14 13:27:50 -0600 | [diff] [blame] | 678 | |
| 679 | struct work_struct release_work; |
Jens Axboe | f793dfd | 2017-06-26 08:15:27 -0600 | [diff] [blame] | 680 | |
| 681 | #define BLK_MAX_WRITE_HINTS 5 |
| 682 | u64 write_hints[BLK_MAX_WRITE_HINTS]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | }; |
| 684 | |
Jens Axboe | e743eb1 | 2017-08-10 08:25:38 -0600 | [diff] [blame] | 685 | #define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */ |
| 686 | #define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ |
| 687 | #define QUEUE_FLAG_DYING 2 /* queue being torn down */ |
| 688 | #define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */ |
| 689 | #define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */ |
| 690 | #define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ |
| 691 | #define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ |
| 692 | #define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ |
Jens Axboe | e743eb1 | 2017-08-10 08:25:38 -0600 | [diff] [blame] | 693 | #define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ |
Fernando Luis Vázquez Cao | 88e740f | 2008-10-27 18:44:46 +0900 | [diff] [blame] | 694 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
Jens Axboe | e743eb1 | 2017-08-10 08:25:38 -0600 | [diff] [blame] | 695 | #define QUEUE_FLAG_IO_STAT 10 /* do IO stats */ |
| 696 | #define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */ |
| 697 | #define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */ |
| 698 | #define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */ |
| 699 | #define QUEUE_FLAG_SECERASE 14 /* supports secure erase */ |
| 700 | #define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */ |
| 701 | #define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */ |
| 702 | #define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */ |
| 703 | #define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/ |
| 704 | #define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */ |
| 705 | #define QUEUE_FLAG_WC 20 /* Write back caching */ |
| 706 | #define QUEUE_FLAG_FUA 21 /* device supports FUA writes */ |
| 707 | #define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */ |
| 708 | #define QUEUE_FLAG_DAX 23 /* device supports DAX */ |
| 709 | #define QUEUE_FLAG_STATS 24 /* track rq completion times */ |
| 710 | #define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ |
| 711 | #define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ |
| 712 | #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ |
| 713 | #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 714 | #define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */ |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 715 | |
| 716 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 717 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
| 718 | (1 << QUEUE_FLAG_ADD_RANDOM)) |
Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 719 | |
Jens Axboe | 94eddfb | 2013-11-19 09:25:07 -0700 | [diff] [blame] | 720 | #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
Christoph Hellwig | 8e0b60b | 2016-03-03 16:04:03 +0100 | [diff] [blame] | 721 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
| 722 | (1 << QUEUE_FLAG_POLL)) |
Jens Axboe | 94eddfb | 2013-11-19 09:25:07 -0700 | [diff] [blame] | 723 | |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 724 | void blk_queue_flag_set(unsigned int flag, struct request_queue *q); |
| 725 | void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); |
| 726 | bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); |
| 727 | bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); |
| 728 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
| 730 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 731 | #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) |
Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 732 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) |
Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 733 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 734 | #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 735 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 736 | #define blk_queue_noxmerges(q) \ |
| 737 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
Jens Axboe | a68bbddba | 2008-09-24 13:03:33 +0200 | [diff] [blame] | 738 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 739 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 740 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) |
Christoph Hellwig | c15227d | 2009-09-30 13:52:12 +0200 | [diff] [blame] | 741 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 742 | #define blk_queue_secure_erase(q) \ |
| 743 | (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) |
Toshi Kani | 163d4ba | 2016-06-23 17:05:50 -0400 | [diff] [blame] | 744 | #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) |
Bart Van Assche | 9efc160 | 2017-05-31 14:43:46 -0700 | [diff] [blame] | 745 | #define blk_queue_scsi_passthrough(q) \ |
| 746 | test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | |
Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 748 | #define blk_noretry_request(rq) \ |
| 749 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ |
| 750 | REQ_FAILFAST_DRIVER)) |
Ming Lei | f4560ff | 2017-06-18 14:24:27 -0600 | [diff] [blame] | 751 | #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 752 | #define blk_queue_preempt_only(q) \ |
| 753 | test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags) |
Dave Chinner | 0ce9144 | 2018-04-18 14:08:27 +1000 | [diff] [blame] | 754 | #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) |
Bart Van Assche | c9254f2 | 2017-11-09 10:49:57 -0800 | [diff] [blame] | 755 | |
| 756 | extern int blk_set_preempt_only(struct request_queue *q); |
| 757 | extern void blk_clear_preempt_only(struct request_queue *q); |
Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 758 | |
Bart Van Assche | 66f9132 | 2018-03-07 17:10:02 -0800 | [diff] [blame] | 759 | static inline int queue_in_flight(struct request_queue *q) |
| 760 | { |
| 761 | return q->in_flight[0] + q->in_flight[1]; |
| 762 | } |
| 763 | |
Christoph Hellwig | 57292b5 | 2017-01-31 16:57:29 +0100 | [diff] [blame] | 764 | static inline bool blk_account_rq(struct request *rq) |
| 765 | { |
| 766 | return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); |
| 767 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | |
Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 769 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 770 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 771 | /* rq->queuelist of dequeued request must be list_empty() */ |
| 772 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | |
| 774 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
| 775 | |
Mike Christie | 4e1b2d52 | 2016-06-05 14:32:22 -0500 | [diff] [blame] | 776 | #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | |
Jens Axboe | 49fd524 | 2014-04-16 10:57:18 -0600 | [diff] [blame] | 778 | /* |
| 779 | * Driver can handle struct request, if it either has an old style |
| 780 | * request_fn defined, or is blk-mq based. |
| 781 | */ |
| 782 | static inline bool queue_is_rq_based(struct request_queue *q) |
| 783 | { |
| 784 | return q->request_fn || q->mq_ops; |
| 785 | } |
| 786 | |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 787 | static inline unsigned int blk_queue_cluster(struct request_queue *q) |
| 788 | { |
| 789 | return q->limits.cluster; |
| 790 | } |
| 791 | |
Damien Le Moal | 797476b | 2016-10-18 15:40:29 +0900 | [diff] [blame] | 792 | static inline enum blk_zoned_model |
| 793 | blk_queue_zoned_model(struct request_queue *q) |
| 794 | { |
| 795 | return q->limits.zoned; |
| 796 | } |
| 797 | |
| 798 | static inline bool blk_queue_is_zoned(struct request_queue *q) |
| 799 | { |
| 800 | switch (blk_queue_zoned_model(q)) { |
| 801 | case BLK_ZONED_HA: |
| 802 | case BLK_ZONED_HM: |
| 803 | return true; |
| 804 | default: |
| 805 | return false; |
| 806 | } |
| 807 | } |
| 808 | |
Damien Le Moal | f99e864 | 2017-01-12 07:58:32 -0700 | [diff] [blame] | 809 | static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 810 | { |
| 811 | return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; |
| 812 | } |
| 813 | |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 814 | static inline unsigned int blk_queue_nr_zones(struct request_queue *q) |
| 815 | { |
| 816 | return q->nr_zones; |
| 817 | } |
| 818 | |
| 819 | static inline unsigned int blk_queue_zone_no(struct request_queue *q, |
| 820 | sector_t sector) |
| 821 | { |
| 822 | if (!blk_queue_is_zoned(q)) |
| 823 | return 0; |
| 824 | return sector >> ilog2(q->limits.chunk_sectors); |
| 825 | } |
| 826 | |
| 827 | static inline bool blk_queue_zone_is_seq(struct request_queue *q, |
| 828 | sector_t sector) |
| 829 | { |
| 830 | if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap) |
| 831 | return false; |
| 832 | return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); |
| 833 | } |
| 834 | |
Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 835 | static inline bool rq_is_sync(struct request *rq) |
| 836 | { |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 837 | return op_is_sync(rq->cmd_flags); |
Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 838 | } |
| 839 | |
Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 840 | static inline bool blk_rl_full(struct request_list *rl, bool sync) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | { |
Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 842 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
| 843 | |
| 844 | return rl->flags & flag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | } |
| 846 | |
Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 847 | static inline void blk_set_rl_full(struct request_list *rl, bool sync) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | { |
Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 849 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
| 850 | |
| 851 | rl->flags |= flag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | } |
| 853 | |
Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 854 | static inline void blk_clear_rl_full(struct request_list *rl, bool sync) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | { |
Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 856 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
| 857 | |
| 858 | rl->flags &= ~flag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | } |
| 860 | |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 861 | static inline bool rq_mergeable(struct request *rq) |
| 862 | { |
Christoph Hellwig | 57292b5 | 2017-01-31 16:57:29 +0100 | [diff] [blame] | 863 | if (blk_rq_is_passthrough(rq)) |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 864 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | |
Mike Christie | 3a5e02c | 2016-06-05 14:32:23 -0500 | [diff] [blame] | 866 | if (req_op(rq) == REQ_OP_FLUSH) |
| 867 | return false; |
| 868 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 869 | if (req_op(rq) == REQ_OP_WRITE_ZEROES) |
| 870 | return false; |
| 871 | |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 872 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) |
| 873 | return false; |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 874 | if (rq->rq_flags & RQF_NOMERGE_FLAGS) |
| 875 | return false; |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 876 | |
| 877 | return true; |
| 878 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 880 | static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) |
| 881 | { |
Christoph Hellwig | efbeccd | 2017-06-19 09:24:41 +0200 | [diff] [blame] | 882 | if (bio_page(a) == bio_page(b) && |
| 883 | bio_offset(a) == bio_offset(b)) |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 884 | return true; |
| 885 | |
| 886 | return false; |
| 887 | } |
| 888 | |
Jens Axboe | d278d4a | 2016-03-30 10:21:08 -0600 | [diff] [blame] | 889 | static inline unsigned int blk_queue_depth(struct request_queue *q) |
| 890 | { |
| 891 | if (q->queue_depth) |
| 892 | return q->queue_depth; |
| 893 | |
| 894 | return q->nr_requests; |
| 895 | } |
| 896 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | * q->prep_rq_fn return values |
| 899 | */ |
Martin K. Petersen | 0fb5b1f | 2016-02-04 00:52:12 -0500 | [diff] [blame] | 900 | enum { |
| 901 | BLKPREP_OK, /* serve it */ |
| 902 | BLKPREP_KILL, /* fatal error, kill, return -EIO */ |
| 903 | BLKPREP_DEFER, /* leave on queue */ |
| 904 | BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ |
| 905 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | |
| 907 | extern unsigned long blk_max_low_pfn, blk_max_pfn; |
| 908 | |
| 909 | /* |
| 910 | * standard bounce addresses: |
| 911 | * |
| 912 | * BLK_BOUNCE_HIGH : bounce all highmem pages |
| 913 | * BLK_BOUNCE_ANY : don't bounce anything |
| 914 | * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary |
| 915 | */ |
Andi Kleen | 2472892 | 2008-04-21 09:51:05 +0200 | [diff] [blame] | 916 | |
| 917 | #if BITS_PER_LONG == 32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) |
Andi Kleen | 2472892 | 2008-04-21 09:51:05 +0200 | [diff] [blame] | 919 | #else |
| 920 | #define BLK_BOUNCE_HIGH -1ULL |
| 921 | #endif |
| 922 | #define BLK_BOUNCE_ANY (-1ULL) |
FUJITA Tomonori | bfe1723 | 2010-05-31 15:59:03 +0900 | [diff] [blame] | 923 | #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | |
Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 925 | /* |
| 926 | * default timeout for SG_IO if none specified |
| 927 | */ |
| 928 | #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) |
Linus Torvalds | f2f1fa7 | 2008-12-05 14:49:18 -0800 | [diff] [blame] | 929 | #define BLK_MIN_SG_TIMEOUT (7 * HZ) |
Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 930 | |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 931 | struct rq_map_data { |
| 932 | struct page **pages; |
| 933 | int page_order; |
| 934 | int nr_entries; |
FUJITA Tomonori | 56c451f | 2008-12-18 14:49:37 +0900 | [diff] [blame] | 935 | unsigned long offset; |
FUJITA Tomonori | 97ae77a | 2008-12-18 14:49:38 +0900 | [diff] [blame] | 936 | int null_mapped; |
FUJITA Tomonori | ecb554a | 2009-07-09 14:46:53 +0200 | [diff] [blame] | 937 | int from_user; |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 938 | }; |
| 939 | |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 940 | struct req_iterator { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 941 | struct bvec_iter iter; |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 942 | struct bio *bio; |
| 943 | }; |
| 944 | |
| 945 | /* This should not be used directly - use rq_for_each_segment */ |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 946 | #define for_each_bio(_bio) \ |
| 947 | for (; _bio; _bio = _bio->bi_next) |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 948 | #define __rq_for_each_bio(_bio, rq) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | if ((rq->bio)) \ |
| 950 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) |
| 951 | |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 952 | #define rq_for_each_segment(bvl, _rq, _iter) \ |
| 953 | __rq_for_each_bio(_iter.bio, _rq) \ |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 954 | bio_for_each_segment(bvl, _iter.bio, _iter.iter) |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 955 | |
Kent Overstreet | 4550dd6 | 2013-08-07 14:26:21 -0700 | [diff] [blame] | 956 | #define rq_iter_last(bvec, _iter) \ |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 957 | (_iter.bio->bi_next == NULL && \ |
Kent Overstreet | 4550dd6 | 2013-08-07 14:26:21 -0700 | [diff] [blame] | 958 | bio_iter_last(bvec, _iter.iter)) |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 959 | |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 960 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
| 961 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" |
| 962 | #endif |
| 963 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
| 964 | extern void rq_flush_dcache_pages(struct request *rq); |
| 965 | #else |
| 966 | static inline void rq_flush_dcache_pages(struct request *rq) |
| 967 | { |
| 968 | } |
| 969 | #endif |
| 970 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | extern int blk_register_queue(struct gendisk *disk); |
| 972 | extern void blk_unregister_queue(struct gendisk *disk); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 973 | extern blk_qc_t generic_make_request(struct bio *bio); |
Christoph Hellwig | f421e1d | 2017-11-02 21:29:50 +0300 | [diff] [blame] | 974 | extern blk_qc_t direct_make_request(struct bio *bio); |
FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 975 | extern void blk_rq_init(struct request_queue *q, struct request *rq); |
Bart Van Assche | da8d7f0 | 2017-04-19 14:01:24 -0700 | [diff] [blame] | 976 | extern void blk_init_request_from_bio(struct request *req, struct bio *bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | extern void blk_put_request(struct request *); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 978 | extern void __blk_put_request(struct request_queue *, struct request *); |
Bart Van Assche | 6a15674 | 2017-11-09 10:49:54 -0800 | [diff] [blame] | 979 | extern struct request *blk_get_request_flags(struct request_queue *, |
| 980 | unsigned int op, |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 981 | blk_mq_req_flags_t flags); |
Bart Van Assche | cd6ce14 | 2017-06-20 11:15:39 -0700 | [diff] [blame] | 982 | extern struct request *blk_get_request(struct request_queue *, unsigned int op, |
| 983 | gfp_t gfp_mask); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 984 | extern void blk_requeue_request(struct request_queue *, struct request *); |
Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 985 | extern int blk_lld_busy(struct request_queue *q); |
Mike Snitzer | 78d8e58 | 2015-06-26 10:01:13 -0400 | [diff] [blame] | 986 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
| 987 | struct bio_set *bs, gfp_t gfp_mask, |
| 988 | int (*bio_ctr)(struct bio *, struct bio *, void *), |
| 989 | void *data); |
| 990 | extern void blk_rq_unprep_clone(struct request *rq); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 991 | extern blk_status_t blk_insert_cloned_request(struct request_queue *q, |
Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 992 | struct request *rq); |
Jens Axboe | 0abc2a1 | 2017-12-18 15:40:44 +0800 | [diff] [blame] | 993 | extern int blk_rq_append_bio(struct request *rq, struct bio **bio); |
Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 994 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
NeilBrown | af67c31 | 2017-06-18 14:38:57 +1000 | [diff] [blame] | 995 | extern void blk_queue_split(struct request_queue *, struct bio **); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 996 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
Paolo Bonzini | 0bfc96c | 2012-01-12 16:01:28 +0100 | [diff] [blame] | 997 | extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); |
Paolo Bonzini | 577ebb3 | 2012-01-12 16:01:27 +0100 | [diff] [blame] | 998 | extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, |
| 999 | unsigned int, void __user *); |
Al Viro | 74f3c8a | 2007-08-27 15:38:10 -0400 | [diff] [blame] | 1000 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
| 1001 | unsigned int, void __user *); |
Al Viro | e915e87 | 2008-09-02 17:16:41 -0400 | [diff] [blame] | 1002 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
| 1003 | struct scsi_ioctl_command __user *); |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1004 | |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 1005 | extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); |
Dan Williams | 2e6edc95 | 2015-11-19 13:29:28 -0800 | [diff] [blame] | 1006 | extern void blk_queue_exit(struct request_queue *q); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1007 | extern void blk_start_queue(struct request_queue *q); |
Jens Axboe | 2149141 | 2015-12-28 13:01:22 -0700 | [diff] [blame] | 1008 | extern void blk_start_queue_async(struct request_queue *q); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1009 | extern void blk_stop_queue(struct request_queue *q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | extern void blk_sync_queue(struct request_queue *q); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1011 | extern void __blk_stop_queue(struct request_queue *q); |
Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 1012 | extern void __blk_run_queue(struct request_queue *q); |
Christoph Hellwig | a7928c1 | 2015-04-17 22:37:20 +0200 | [diff] [blame] | 1013 | extern void __blk_run_queue_uncond(struct request_queue *q); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1014 | extern void blk_run_queue(struct request_queue *); |
Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 1015 | extern void blk_run_queue_async(struct request_queue *q); |
FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 1016 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 1017 | struct rq_map_data *, void __user *, unsigned long, |
| 1018 | gfp_t); |
Jens Axboe | 8e5cfc4 | 2006-12-19 11:12:46 +0100 | [diff] [blame] | 1019 | extern int blk_rq_unmap_user(struct bio *); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1020 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); |
| 1021 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 1022 | struct rq_map_data *, const struct iov_iter *, |
| 1023 | gfp_t); |
Christoph Hellwig | b7819b9 | 2017-04-20 16:02:55 +0200 | [diff] [blame] | 1024 | extern void blk_execute_rq(struct request_queue *, struct gendisk *, |
James Bottomley | 994ca9a | 2005-06-20 14:11:09 +0200 | [diff] [blame] | 1025 | struct request *, int); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1026 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
Jens Axboe | 15fc858 | 2006-01-06 10:00:50 +0100 | [diff] [blame] | 1027 | struct request *, int, rq_end_io_fn *); |
Mike Christie | 6e39b69 | 2005-11-11 05:30:24 -0600 | [diff] [blame] | 1028 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 1029 | int blk_status_to_errno(blk_status_t status); |
| 1030 | blk_status_t errno_to_blk_status(int errno); |
| 1031 | |
Christoph Hellwig | ea435e1 | 2017-11-02 21:29:54 +0300 | [diff] [blame] | 1032 | bool blk_poll(struct request_queue *q, blk_qc_t cookie); |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 1033 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1034 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | { |
Tejun Heo | ff9ea32 | 2014-09-08 08:03:56 +0900 | [diff] [blame] | 1036 | return bdev->bd_disk->queue; /* this is never NULL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | } |
| 1038 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | /* |
Bart Van Assche | 233bde2 | 2018-03-14 15:48:06 -0700 | [diff] [blame] | 1040 | * The basic unit of block I/O is a sector. It is used in a number of contexts |
| 1041 | * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 |
| 1042 | * bytes. Variables of type sector_t represent an offset or size that is a |
| 1043 | * multiple of 512 bytes. Hence these two constants. |
| 1044 | */ |
| 1045 | #ifndef SECTOR_SHIFT |
| 1046 | #define SECTOR_SHIFT 9 |
| 1047 | #endif |
| 1048 | #ifndef SECTOR_SIZE |
| 1049 | #define SECTOR_SIZE (1 << SECTOR_SHIFT) |
| 1050 | #endif |
| 1051 | |
| 1052 | /* |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1053 | * blk_rq_pos() : the current sector |
| 1054 | * blk_rq_bytes() : bytes left in the entire request |
| 1055 | * blk_rq_cur_bytes() : bytes left in the current segment |
| 1056 | * blk_rq_err_bytes() : bytes left till the next error boundary |
| 1057 | * blk_rq_sectors() : sectors left in the entire request |
| 1058 | * blk_rq_cur_sectors() : sectors left in the current segment |
Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1059 | */ |
Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 1060 | static inline sector_t blk_rq_pos(const struct request *rq) |
| 1061 | { |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 1062 | return rq->__sector; |
Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 1063 | } |
| 1064 | |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 1065 | static inline unsigned int blk_rq_bytes(const struct request *rq) |
| 1066 | { |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 1067 | return rq->__data_len; |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 1068 | } |
| 1069 | |
| 1070 | static inline int blk_rq_cur_bytes(const struct request *rq) |
| 1071 | { |
| 1072 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; |
| 1073 | } |
Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1074 | |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1075 | extern unsigned int blk_rq_err_bytes(const struct request *rq); |
| 1076 | |
Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 1077 | static inline unsigned int blk_rq_sectors(const struct request *rq) |
| 1078 | { |
Bart Van Assche | 233bde2 | 2018-03-14 15:48:06 -0700 | [diff] [blame] | 1079 | return blk_rq_bytes(rq) >> SECTOR_SHIFT; |
Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 1080 | } |
| 1081 | |
| 1082 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) |
| 1083 | { |
Bart Van Assche | 233bde2 | 2018-03-14 15:48:06 -0700 | [diff] [blame] | 1084 | return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; |
Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 1085 | } |
| 1086 | |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 1087 | static inline unsigned int blk_rq_zone_no(struct request *rq) |
| 1088 | { |
| 1089 | return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); |
| 1090 | } |
| 1091 | |
| 1092 | static inline unsigned int blk_rq_zone_is_seq(struct request *rq) |
| 1093 | { |
| 1094 | return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); |
| 1095 | } |
| 1096 | |
Christoph Hellwig | 2e3258e | 2017-01-13 12:29:10 +0100 | [diff] [blame] | 1097 | /* |
| 1098 | * Some commands like WRITE SAME have a payload or data transfer size which |
| 1099 | * is different from the size of the request. Any driver that supports such |
| 1100 | * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to |
| 1101 | * calculate the data transfer size. |
| 1102 | */ |
| 1103 | static inline unsigned int blk_rq_payload_bytes(struct request *rq) |
| 1104 | { |
| 1105 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
| 1106 | return rq->special_vec.bv_len; |
| 1107 | return blk_rq_bytes(rq); |
| 1108 | } |
| 1109 | |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 1110 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, |
Mike Christie | 8fe0d47 | 2016-06-05 14:32:15 -0500 | [diff] [blame] | 1111 | int op) |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 1112 | { |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 1113 | if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) |
Bart Van Assche | 233bde2 | 2018-03-14 15:48:06 -0700 | [diff] [blame] | 1114 | return min(q->limits.max_discard_sectors, |
| 1115 | UINT_MAX >> SECTOR_SHIFT); |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 1116 | |
Mike Christie | 8fe0d47 | 2016-06-05 14:32:15 -0500 | [diff] [blame] | 1117 | if (unlikely(op == REQ_OP_WRITE_SAME)) |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 1118 | return q->limits.max_write_same_sectors; |
| 1119 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 1120 | if (unlikely(op == REQ_OP_WRITE_ZEROES)) |
| 1121 | return q->limits.max_write_zeroes_sectors; |
| 1122 | |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 1123 | return q->limits.max_sectors; |
| 1124 | } |
| 1125 | |
Jens Axboe | 762380a | 2014-06-05 13:38:39 -0600 | [diff] [blame] | 1126 | /* |
| 1127 | * Return maximum size of a request at given offset. Only valid for |
| 1128 | * file system requests. |
| 1129 | */ |
| 1130 | static inline unsigned int blk_max_size_offset(struct request_queue *q, |
| 1131 | sector_t offset) |
| 1132 | { |
| 1133 | if (!q->limits.chunk_sectors) |
Jens Axboe | 736ed4d | 2014-06-17 22:09:29 -0700 | [diff] [blame] | 1134 | return q->limits.max_sectors; |
Jens Axboe | 762380a | 2014-06-05 13:38:39 -0600 | [diff] [blame] | 1135 | |
| 1136 | return q->limits.chunk_sectors - |
| 1137 | (offset & (q->limits.chunk_sectors - 1)); |
| 1138 | } |
| 1139 | |
Damien Le Moal | 17007f3 | 2016-07-20 21:40:47 -0600 | [diff] [blame] | 1140 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq, |
| 1141 | sector_t offset) |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 1142 | { |
| 1143 | struct request_queue *q = rq->q; |
| 1144 | |
Christoph Hellwig | 57292b5 | 2017-01-31 16:57:29 +0100 | [diff] [blame] | 1145 | if (blk_rq_is_passthrough(rq)) |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 1146 | return q->limits.max_hw_sectors; |
| 1147 | |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 1148 | if (!q->limits.chunk_sectors || |
| 1149 | req_op(rq) == REQ_OP_DISCARD || |
| 1150 | req_op(rq) == REQ_OP_SECURE_ERASE) |
Mike Christie | 8fe0d47 | 2016-06-05 14:32:15 -0500 | [diff] [blame] | 1151 | return blk_queue_get_max_sectors(q, req_op(rq)); |
Jens Axboe | 762380a | 2014-06-05 13:38:39 -0600 | [diff] [blame] | 1152 | |
Damien Le Moal | 17007f3 | 2016-07-20 21:40:47 -0600 | [diff] [blame] | 1153 | return min(blk_max_size_offset(q, offset), |
Mike Christie | 8fe0d47 | 2016-06-05 14:32:15 -0500 | [diff] [blame] | 1154 | blk_queue_get_max_sectors(q, req_op(rq))); |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 1155 | } |
| 1156 | |
Jun'ichi Nomura | 75afb35 | 2013-09-21 13:57:47 -0600 | [diff] [blame] | 1157 | static inline unsigned int blk_rq_count_bios(struct request *rq) |
| 1158 | { |
| 1159 | unsigned int nr_bios = 0; |
| 1160 | struct bio *bio; |
| 1161 | |
| 1162 | __rq_for_each_bio(bio, rq) |
| 1163 | nr_bios++; |
| 1164 | |
| 1165 | return nr_bios; |
| 1166 | } |
| 1167 | |
Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1168 | /* |
Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1169 | * Request issue related functions. |
| 1170 | */ |
| 1171 | extern struct request *blk_peek_request(struct request_queue *q); |
| 1172 | extern void blk_start_request(struct request *rq); |
| 1173 | extern struct request *blk_fetch_request(struct request_queue *q); |
| 1174 | |
Christoph Hellwig | ef71de8 | 2017-11-02 21:29:51 +0300 | [diff] [blame] | 1175 | void blk_steal_bios(struct bio_list *list, struct request *rq); |
| 1176 | |
Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1177 | /* |
Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1178 | * Request completion related functions. |
| 1179 | * |
| 1180 | * blk_update_request() completes given number of bytes and updates |
| 1181 | * the request without completing it. |
| 1182 | * |
Tejun Heo | f06d9a2 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 1183 | * blk_end_request() and friends. __blk_end_request() must be called |
| 1184 | * with the request queue spinlock acquired. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1185 | * |
| 1186 | * Several drivers define their own end_request and call |
Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1187 | * blk_end_request() for parts of the original function. |
| 1188 | * This prevents code duplication in drivers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 | */ |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 1190 | extern bool blk_update_request(struct request *rq, blk_status_t error, |
Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1191 | unsigned int nr_bytes); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 1192 | extern void blk_finish_request(struct request *rq, blk_status_t error); |
| 1193 | extern bool blk_end_request(struct request *rq, blk_status_t error, |
FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 1194 | unsigned int nr_bytes); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 1195 | extern void blk_end_request_all(struct request *rq, blk_status_t error); |
| 1196 | extern bool __blk_end_request(struct request *rq, blk_status_t error, |
FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 1197 | unsigned int nr_bytes); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 1198 | extern void __blk_end_request_all(struct request *rq, blk_status_t error); |
| 1199 | extern bool __blk_end_request_cur(struct request *rq, blk_status_t error); |
Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1200 | |
Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 1201 | extern void blk_complete_request(struct request *); |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 1202 | extern void __blk_complete_request(struct request *); |
| 1203 | extern void blk_abort_request(struct request *); |
James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 1204 | extern void blk_unprep_request(struct request *); |
Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 1205 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | * Access functions for manipulating queue properties |
| 1208 | */ |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1209 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 1210 | spinlock_t *lock, int node_id); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1211 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
Christoph Hellwig | 5ea708d | 2017-01-03 14:52:44 +0300 | [diff] [blame] | 1212 | extern int blk_init_allocated_queue(struct request_queue *); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1213 | extern void blk_cleanup_queue(struct request_queue *); |
| 1214 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
| 1215 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 1216 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
Jens Axboe | 762380a | 2014-06-05 13:38:39 -0600 | [diff] [blame] | 1217 | extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1218 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 1219 | extern void blk_queue_max_discard_segments(struct request_queue *, |
| 1220 | unsigned short); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1221 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
Christoph Hellwig | 67efc92 | 2009-09-30 13:54:20 +0200 | [diff] [blame] | 1222 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
| 1223 | unsigned int max_discard_sectors); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 1224 | extern void blk_queue_max_write_same_sectors(struct request_queue *q, |
| 1225 | unsigned int max_write_same_sectors); |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 1226 | extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, |
| 1227 | unsigned int max_write_same_sectors); |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1228 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
Martin K. Petersen | 892b6f9 | 2010-10-13 21:18:03 +0200 | [diff] [blame] | 1229 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1230 | extern void blk_queue_alignment_offset(struct request_queue *q, |
| 1231 | unsigned int alignment); |
Martin K. Petersen | 7c958e3 | 2009-07-31 11:49:11 -0400 | [diff] [blame] | 1232 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1233 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); |
Martin K. Petersen | 3c5820c | 2009-09-11 21:54:52 +0200 | [diff] [blame] | 1234 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1235 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); |
Jens Axboe | d278d4a | 2016-03-30 10:21:08 -0600 | [diff] [blame] | 1236 | extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); |
Martin K. Petersen | e475bba | 2009-06-16 08:23:52 +0200 | [diff] [blame] | 1237 | extern void blk_set_default_limits(struct queue_limits *lim); |
Martin K. Petersen | b1bd055 | 2012-01-11 16:27:11 +0100 | [diff] [blame] | 1238 | extern void blk_set_stacking_limits(struct queue_limits *lim); |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1239 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
| 1240 | sector_t offset); |
Martin K. Petersen | 17be8c2 | 2010-01-11 03:21:49 -0500 | [diff] [blame] | 1241 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
| 1242 | sector_t offset); |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1243 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
| 1244 | sector_t offset); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1245 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 1246 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 1247 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 1248 | extern int blk_queue_dma_drain(struct request_queue *q, |
| 1249 | dma_drain_needed_fn *dma_drain_needed, |
| 1250 | void *buf, unsigned int size); |
Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 1251 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1252 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
Keith Busch | 03100aa | 2015-08-19 14:24:05 -0700 | [diff] [blame] | 1253 | extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1254 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 1255 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1256 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
James Bottomley | 11c3e68 | 2007-12-31 16:37:00 -0600 | [diff] [blame] | 1257 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1258 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 1259 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
| 1260 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
shaohua.li@intel.com | f387693 | 2011-05-06 11:34:32 -0600 | [diff] [blame] | 1261 | extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); |
Jens Axboe | 93e9d8e | 2016-04-12 12:32:46 -0600 | [diff] [blame] | 1262 | extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 1264 | /* |
| 1265 | * Number of physical segments as sent to the device. |
| 1266 | * |
| 1267 | * Normally this is the number of discontiguous data segments sent by the |
| 1268 | * submitter. But for data-less command like discard we might have no |
| 1269 | * actual data segments submitted, but the driver might have to add it's |
| 1270 | * own special payload. In that case we still return 1 here so that this |
| 1271 | * special payload will be mapped. |
| 1272 | */ |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 1273 | static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) |
| 1274 | { |
| 1275 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
| 1276 | return 1; |
| 1277 | return rq->nr_phys_segments; |
| 1278 | } |
| 1279 | |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 1280 | /* |
| 1281 | * Number of discard segments (or ranges) the driver needs to fill in. |
| 1282 | * Each discard bio merged into a request is counted as one segment. |
| 1283 | */ |
| 1284 | static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) |
| 1285 | { |
| 1286 | return max_t(unsigned short, rq->nr_phys_segments, 1); |
| 1287 | } |
| 1288 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1289 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | extern void blk_dump_rq_flags(struct request *, char *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 | extern long nr_blockdev_pages(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | |
Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 1293 | bool __must_check blk_get_queue(struct request_queue *); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1294 | struct request_queue *blk_alloc_queue(gfp_t); |
Bart Van Assche | 5ee0524 | 2018-02-28 10:15:31 -0800 | [diff] [blame] | 1295 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, |
| 1296 | spinlock_t *lock); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1297 | extern void blk_put_queue(struct request_queue *); |
Jens Axboe | 3f21c26 | 2015-06-05 10:57:37 -0600 | [diff] [blame] | 1298 | extern void blk_set_queue_dying(struct request_queue *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | |
Shaohua Li | 316cc67 | 2011-07-08 08:19:21 +0200 | [diff] [blame] | 1300 | /* |
Lin Ming | 6c95466 | 2013-03-23 11:42:26 +0800 | [diff] [blame] | 1301 | * block layer runtime pm functions |
| 1302 | */ |
Rafael J. Wysocki | 47fafbc | 2014-12-04 01:00:23 +0100 | [diff] [blame] | 1303 | #ifdef CONFIG_PM |
Lin Ming | 6c95466 | 2013-03-23 11:42:26 +0800 | [diff] [blame] | 1304 | extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); |
| 1305 | extern int blk_pre_runtime_suspend(struct request_queue *q); |
| 1306 | extern void blk_post_runtime_suspend(struct request_queue *q, int err); |
| 1307 | extern void blk_pre_runtime_resume(struct request_queue *q); |
| 1308 | extern void blk_post_runtime_resume(struct request_queue *q, int err); |
Mika Westerberg | d07ab6d | 2016-02-18 10:54:11 +0200 | [diff] [blame] | 1309 | extern void blk_set_runtime_active(struct request_queue *q); |
Lin Ming | 6c95466 | 2013-03-23 11:42:26 +0800 | [diff] [blame] | 1310 | #else |
| 1311 | static inline void blk_pm_runtime_init(struct request_queue *q, |
| 1312 | struct device *dev) {} |
| 1313 | static inline int blk_pre_runtime_suspend(struct request_queue *q) |
| 1314 | { |
| 1315 | return -ENOSYS; |
| 1316 | } |
| 1317 | static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} |
| 1318 | static inline void blk_pre_runtime_resume(struct request_queue *q) {} |
| 1319 | static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} |
Tobias Klauser | 9a05e75 | 2016-11-18 15:16:06 +0100 | [diff] [blame] | 1320 | static inline void blk_set_runtime_active(struct request_queue *q) {} |
Lin Ming | 6c95466 | 2013-03-23 11:42:26 +0800 | [diff] [blame] | 1321 | #endif |
| 1322 | |
| 1323 | /* |
Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 1324 | * blk_plug permits building a queue of related requests by holding the I/O |
| 1325 | * fragments for a short period. This allows merging of sequential requests |
| 1326 | * into single larger request. As the requests are moved from a per-task list to |
| 1327 | * the device's request_queue in a batch, this results in improved scalability |
| 1328 | * as the lock contention for request_queue lock is reduced. |
| 1329 | * |
| 1330 | * It is ok not to disable preemption when adding the request to the plug list |
| 1331 | * or when attempting a merge, because blk_schedule_flush_list() will only flush |
| 1332 | * the plug list when the task sleeps by itself. For details, please see |
| 1333 | * schedule() where blk_schedule_flush_plug() is called. |
Shaohua Li | 316cc67 | 2011-07-08 08:19:21 +0200 | [diff] [blame] | 1334 | */ |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1335 | struct blk_plug { |
Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 1336 | struct list_head list; /* requests */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 1337 | struct list_head mq_list; /* blk-mq requests */ |
Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 1338 | struct list_head cb_list; /* md requires an unplug callback */ |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1339 | }; |
Shaohua Li | 55c022b | 2011-07-08 08:19:20 +0200 | [diff] [blame] | 1340 | #define BLK_MAX_REQUEST_COUNT 16 |
Shaohua Li | 50d24c3 | 2016-11-03 17:03:53 -0700 | [diff] [blame] | 1341 | #define BLK_PLUG_FLUSH_SIZE (128 * 1024) |
Shaohua Li | 55c022b | 2011-07-08 08:19:20 +0200 | [diff] [blame] | 1342 | |
NeilBrown | 9cbb175 | 2012-07-31 09:08:14 +0200 | [diff] [blame] | 1343 | struct blk_plug_cb; |
NeilBrown | 74018dc | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 1344 | typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); |
NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 1345 | struct blk_plug_cb { |
| 1346 | struct list_head list; |
NeilBrown | 9cbb175 | 2012-07-31 09:08:14 +0200 | [diff] [blame] | 1347 | blk_plug_cb_fn callback; |
| 1348 | void *data; |
NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 1349 | }; |
NeilBrown | 9cbb175 | 2012-07-31 09:08:14 +0200 | [diff] [blame] | 1350 | extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, |
| 1351 | void *data, int size); |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1352 | extern void blk_start_plug(struct blk_plug *); |
| 1353 | extern void blk_finish_plug(struct blk_plug *); |
Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 1354 | extern void blk_flush_plug_list(struct blk_plug *, bool); |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1355 | |
| 1356 | static inline void blk_flush_plug(struct task_struct *tsk) |
| 1357 | { |
| 1358 | struct blk_plug *plug = tsk->plug; |
| 1359 | |
Christoph Hellwig | 88b996c | 2011-04-15 15:20:10 +0200 | [diff] [blame] | 1360 | if (plug) |
Jens Axboe | a237c1c | 2011-04-16 13:27:55 +0200 | [diff] [blame] | 1361 | blk_flush_plug_list(plug, false); |
| 1362 | } |
| 1363 | |
| 1364 | static inline void blk_schedule_flush_plug(struct task_struct *tsk) |
| 1365 | { |
| 1366 | struct blk_plug *plug = tsk->plug; |
| 1367 | |
| 1368 | if (plug) |
Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 1369 | blk_flush_plug_list(plug, true); |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1370 | } |
| 1371 | |
| 1372 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
| 1373 | { |
| 1374 | struct blk_plug *plug = tsk->plug; |
| 1375 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 1376 | return plug && |
| 1377 | (!list_empty(&plug->list) || |
| 1378 | !list_empty(&plug->mq_list) || |
| 1379 | !list_empty(&plug->cb_list)); |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1380 | } |
| 1381 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1382 | /* |
| 1383 | * tag stuff |
| 1384 | */ |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1385 | extern int blk_queue_start_tag(struct request_queue *, struct request *); |
| 1386 | extern struct request *blk_queue_find_tag(struct request_queue *, int); |
| 1387 | extern void blk_queue_end_tag(struct request_queue *, struct request *); |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 1388 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1389 | extern void blk_queue_free_tags(struct request_queue *); |
| 1390 | extern int blk_queue_resize_tags(struct request_queue *, int); |
| 1391 | extern void blk_queue_invalidate_tags(struct request_queue *); |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 1392 | extern struct blk_queue_tag *blk_init_tags(int, int); |
James Bottomley | 492dfb4 | 2006-08-30 15:48:45 -0400 | [diff] [blame] | 1393 | extern void blk_free_tags(struct blk_queue_tag *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | |
David C Somayajulu | f583f49 | 2006-10-04 08:27:25 +0200 | [diff] [blame] | 1395 | static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, |
| 1396 | int tag) |
| 1397 | { |
| 1398 | if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) |
| 1399 | return NULL; |
| 1400 | return bqt->tag_index[tag]; |
| 1401 | } |
Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 1402 | |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 1403 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
| 1404 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
| 1405 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); |
Christoph Hellwig | e950fdf | 2016-07-19 11:23:33 +0200 | [diff] [blame] | 1406 | |
| 1407 | #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ |
Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 1408 | |
Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 1409 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
| 1410 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 1411 | extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 1412 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
Mike Christie | 469e321 | 2016-06-05 14:31:49 -0500 | [diff] [blame] | 1413 | struct bio **biop); |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 1414 | |
| 1415 | #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ |
Christoph Hellwig | cb365b9 | 2017-04-05 19:21:10 +0200 | [diff] [blame] | 1416 | #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 1417 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 1418 | extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
| 1419 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 1420 | unsigned flags); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 1421 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 1422 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags); |
| 1423 | |
Christoph Hellwig | 2cf6d26 | 2010-08-18 05:29:10 -0400 | [diff] [blame] | 1424 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, |
| 1425 | sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) |
David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 1426 | { |
Bart Van Assche | 233bde2 | 2018-03-14 15:48:06 -0700 | [diff] [blame] | 1427 | return blkdev_issue_discard(sb->s_bdev, |
| 1428 | block << (sb->s_blocksize_bits - |
| 1429 | SECTOR_SHIFT), |
| 1430 | nr_blocks << (sb->s_blocksize_bits - |
| 1431 | SECTOR_SHIFT), |
Christoph Hellwig | 2cf6d26 | 2010-08-18 05:29:10 -0400 | [diff] [blame] | 1432 | gfp_mask, flags); |
David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 1433 | } |
Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 1434 | static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, |
Theodore Ts'o | a107e5a | 2010-10-27 23:44:47 -0400 | [diff] [blame] | 1435 | sector_t nr_blocks, gfp_t gfp_mask) |
Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 1436 | { |
| 1437 | return blkdev_issue_zeroout(sb->s_bdev, |
Bart Van Assche | 233bde2 | 2018-03-14 15:48:06 -0700 | [diff] [blame] | 1438 | block << (sb->s_blocksize_bits - |
| 1439 | SECTOR_SHIFT), |
| 1440 | nr_blocks << (sb->s_blocksize_bits - |
| 1441 | SECTOR_SHIFT), |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 1442 | gfp_mask, 0); |
Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 1443 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1444 | |
Christoph Hellwig | f00c4d8 | 2017-11-05 10:36:31 +0300 | [diff] [blame] | 1445 | extern int blk_verify_command(unsigned char *cmd, fmode_t mode); |
Adel Gadllah | 0b07de8 | 2008-06-26 13:48:27 +0200 | [diff] [blame] | 1446 | |
Martin K. Petersen | eb28d31 | 2010-02-26 00:20:37 -0500 | [diff] [blame] | 1447 | enum blk_default_limits { |
| 1448 | BLK_MAX_SEGMENTS = 128, |
| 1449 | BLK_SAFE_MAX_SECTORS = 255, |
Jeff Moyer | d2be537 | 2015-08-13 14:57:57 -0400 | [diff] [blame] | 1450 | BLK_DEF_MAX_SECTORS = 2560, |
Martin K. Petersen | eb28d31 | 2010-02-26 00:20:37 -0500 | [diff] [blame] | 1451 | BLK_MAX_SEGMENT_SIZE = 65536, |
| 1452 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, |
| 1453 | }; |
Milan Broz | 0e435ac | 2008-12-03 12:55:08 +0100 | [diff] [blame] | 1454 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1455 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
| 1456 | |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1457 | static inline unsigned long queue_segment_boundary(struct request_queue *q) |
| 1458 | { |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1459 | return q->limits.seg_boundary_mask; |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1460 | } |
| 1461 | |
Keith Busch | 03100aa | 2015-08-19 14:24:05 -0700 | [diff] [blame] | 1462 | static inline unsigned long queue_virt_boundary(struct request_queue *q) |
| 1463 | { |
| 1464 | return q->limits.virt_boundary_mask; |
| 1465 | } |
| 1466 | |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1467 | static inline unsigned int queue_max_sectors(struct request_queue *q) |
| 1468 | { |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1469 | return q->limits.max_sectors; |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1470 | } |
| 1471 | |
| 1472 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) |
| 1473 | { |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1474 | return q->limits.max_hw_sectors; |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1475 | } |
| 1476 | |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1477 | static inline unsigned short queue_max_segments(struct request_queue *q) |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1478 | { |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1479 | return q->limits.max_segments; |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1480 | } |
| 1481 | |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 1482 | static inline unsigned short queue_max_discard_segments(struct request_queue *q) |
| 1483 | { |
| 1484 | return q->limits.max_discard_segments; |
| 1485 | } |
| 1486 | |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1487 | static inline unsigned int queue_max_segment_size(struct request_queue *q) |
| 1488 | { |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1489 | return q->limits.max_segment_size; |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1490 | } |
| 1491 | |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1492 | static inline unsigned short queue_logical_block_size(struct request_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1493 | { |
| 1494 | int retval = 512; |
| 1495 | |
Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1496 | if (q && q->limits.logical_block_size) |
| 1497 | retval = q->limits.logical_block_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | |
| 1499 | return retval; |
| 1500 | } |
| 1501 | |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1502 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1503 | { |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1504 | return queue_logical_block_size(bdev_get_queue(bdev)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1505 | } |
| 1506 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1507 | static inline unsigned int queue_physical_block_size(struct request_queue *q) |
| 1508 | { |
| 1509 | return q->limits.physical_block_size; |
| 1510 | } |
| 1511 | |
Martin K. Petersen | 892b6f9 | 2010-10-13 21:18:03 +0200 | [diff] [blame] | 1512 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1513 | { |
| 1514 | return queue_physical_block_size(bdev_get_queue(bdev)); |
| 1515 | } |
| 1516 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1517 | static inline unsigned int queue_io_min(struct request_queue *q) |
| 1518 | { |
| 1519 | return q->limits.io_min; |
| 1520 | } |
| 1521 | |
Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1522 | static inline int bdev_io_min(struct block_device *bdev) |
| 1523 | { |
| 1524 | return queue_io_min(bdev_get_queue(bdev)); |
| 1525 | } |
| 1526 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1527 | static inline unsigned int queue_io_opt(struct request_queue *q) |
| 1528 | { |
| 1529 | return q->limits.io_opt; |
| 1530 | } |
| 1531 | |
Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1532 | static inline int bdev_io_opt(struct block_device *bdev) |
| 1533 | { |
| 1534 | return queue_io_opt(bdev_get_queue(bdev)); |
| 1535 | } |
| 1536 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1537 | static inline int queue_alignment_offset(struct request_queue *q) |
| 1538 | { |
Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1539 | if (q->limits.misaligned) |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1540 | return -1; |
| 1541 | |
Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1542 | return q->limits.alignment_offset; |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1543 | } |
| 1544 | |
Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1545 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) |
Martin K. Petersen | 81744ee | 2009-12-29 08:35:35 +0100 | [diff] [blame] | 1546 | { |
| 1547 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); |
Bart Van Assche | 233bde2 | 2018-03-14 15:48:06 -0700 | [diff] [blame] | 1548 | unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) |
| 1549 | << SECTOR_SHIFT; |
Martin K. Petersen | 81744ee | 2009-12-29 08:35:35 +0100 | [diff] [blame] | 1550 | |
Mike Snitzer | b8839b8 | 2014-10-08 18:26:13 -0400 | [diff] [blame] | 1551 | return (granularity + lim->alignment_offset - alignment) % granularity; |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1552 | } |
| 1553 | |
Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1554 | static inline int bdev_alignment_offset(struct block_device *bdev) |
| 1555 | { |
| 1556 | struct request_queue *q = bdev_get_queue(bdev); |
| 1557 | |
| 1558 | if (q->limits.misaligned) |
| 1559 | return -1; |
| 1560 | |
| 1561 | if (bdev != bdev->bd_contains) |
| 1562 | return bdev->bd_part->alignment_offset; |
| 1563 | |
| 1564 | return q->limits.alignment_offset; |
| 1565 | } |
| 1566 | |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1567 | static inline int queue_discard_alignment(struct request_queue *q) |
| 1568 | { |
| 1569 | if (q->limits.discard_misaligned) |
| 1570 | return -1; |
| 1571 | |
| 1572 | return q->limits.discard_alignment; |
| 1573 | } |
| 1574 | |
Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1575 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1576 | { |
Linus Torvalds | 5977107 | 2012-12-19 07:18:35 -0800 | [diff] [blame] | 1577 | unsigned int alignment, granularity, offset; |
Martin K. Petersen | dd3d145 | 2010-01-11 03:21:48 -0500 | [diff] [blame] | 1578 | |
Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 1579 | if (!lim->max_discard_sectors) |
| 1580 | return 0; |
| 1581 | |
Linus Torvalds | 5977107 | 2012-12-19 07:18:35 -0800 | [diff] [blame] | 1582 | /* Why are these in bytes, not sectors? */ |
Bart Van Assche | 233bde2 | 2018-03-14 15:48:06 -0700 | [diff] [blame] | 1583 | alignment = lim->discard_alignment >> SECTOR_SHIFT; |
| 1584 | granularity = lim->discard_granularity >> SECTOR_SHIFT; |
Linus Torvalds | 5977107 | 2012-12-19 07:18:35 -0800 | [diff] [blame] | 1585 | if (!granularity) |
| 1586 | return 0; |
| 1587 | |
| 1588 | /* Offset of the partition start in 'granularity' sectors */ |
| 1589 | offset = sector_div(sector, granularity); |
| 1590 | |
| 1591 | /* And why do we do this modulus *again* in blkdev_issue_discard()? */ |
| 1592 | offset = (granularity + alignment - offset) % granularity; |
| 1593 | |
| 1594 | /* Turn it back into bytes, gaah */ |
Bart Van Assche | 233bde2 | 2018-03-14 15:48:06 -0700 | [diff] [blame] | 1595 | return offset << SECTOR_SHIFT; |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1596 | } |
| 1597 | |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 1598 | static inline int bdev_discard_alignment(struct block_device *bdev) |
| 1599 | { |
| 1600 | struct request_queue *q = bdev_get_queue(bdev); |
| 1601 | |
| 1602 | if (bdev != bdev->bd_contains) |
| 1603 | return bdev->bd_part->discard_alignment; |
| 1604 | |
| 1605 | return q->limits.discard_alignment; |
| 1606 | } |
| 1607 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 1608 | static inline unsigned int bdev_write_same(struct block_device *bdev) |
| 1609 | { |
| 1610 | struct request_queue *q = bdev_get_queue(bdev); |
| 1611 | |
| 1612 | if (q) |
| 1613 | return q->limits.max_write_same_sectors; |
| 1614 | |
| 1615 | return 0; |
| 1616 | } |
| 1617 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 1618 | static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) |
| 1619 | { |
| 1620 | struct request_queue *q = bdev_get_queue(bdev); |
| 1621 | |
| 1622 | if (q) |
| 1623 | return q->limits.max_write_zeroes_sectors; |
| 1624 | |
| 1625 | return 0; |
| 1626 | } |
| 1627 | |
Damien Le Moal | 797476b | 2016-10-18 15:40:29 +0900 | [diff] [blame] | 1628 | static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) |
| 1629 | { |
| 1630 | struct request_queue *q = bdev_get_queue(bdev); |
| 1631 | |
| 1632 | if (q) |
| 1633 | return blk_queue_zoned_model(q); |
| 1634 | |
| 1635 | return BLK_ZONED_NONE; |
| 1636 | } |
| 1637 | |
| 1638 | static inline bool bdev_is_zoned(struct block_device *bdev) |
| 1639 | { |
| 1640 | struct request_queue *q = bdev_get_queue(bdev); |
| 1641 | |
| 1642 | if (q) |
| 1643 | return blk_queue_is_zoned(q); |
| 1644 | |
| 1645 | return false; |
| 1646 | } |
| 1647 | |
Damien Le Moal | f99e864 | 2017-01-12 07:58:32 -0700 | [diff] [blame] | 1648 | static inline unsigned int bdev_zone_sectors(struct block_device *bdev) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 1649 | { |
| 1650 | struct request_queue *q = bdev_get_queue(bdev); |
| 1651 | |
| 1652 | if (q) |
Damien Le Moal | f99e864 | 2017-01-12 07:58:32 -0700 | [diff] [blame] | 1653 | return blk_queue_zone_sectors(q); |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 1654 | return 0; |
| 1655 | } |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 1656 | |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 1657 | static inline unsigned int bdev_nr_zones(struct block_device *bdev) |
| 1658 | { |
| 1659 | struct request_queue *q = bdev_get_queue(bdev); |
| 1660 | |
| 1661 | if (q) |
| 1662 | return blk_queue_nr_zones(q); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 1663 | return 0; |
| 1664 | } |
| 1665 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1666 | static inline int queue_dma_alignment(struct request_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | { |
Pete Wyckoff | 482eb68 | 2008-01-01 10:23:02 -0500 | [diff] [blame] | 1668 | return q ? q->dma_alignment : 511; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1669 | } |
| 1670 | |
Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 1671 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, |
FUJITA Tomonori | 8790407 | 2008-08-28 15:05:58 +0900 | [diff] [blame] | 1672 | unsigned int len) |
| 1673 | { |
| 1674 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 1675 | return !(addr & alignment) && !(len & alignment); |
FUJITA Tomonori | 8790407 | 2008-08-28 15:05:58 +0900 | [diff] [blame] | 1676 | } |
| 1677 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1678 | /* assumes size > 256 */ |
| 1679 | static inline unsigned int blksize_bits(unsigned int size) |
| 1680 | { |
| 1681 | unsigned int bits = 8; |
| 1682 | do { |
| 1683 | bits++; |
| 1684 | size >>= 1; |
| 1685 | } while (size > 256); |
| 1686 | return bits; |
| 1687 | } |
| 1688 | |
Adrian Bunk | 2befb9e | 2005-09-10 00:27:17 -0700 | [diff] [blame] | 1689 | static inline unsigned int block_size(struct block_device *bdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | { |
| 1691 | return bdev->bd_block_size; |
| 1692 | } |
| 1693 | |
shaohua.li@intel.com | f387693 | 2011-05-06 11:34:32 -0600 | [diff] [blame] | 1694 | static inline bool queue_flush_queueable(struct request_queue *q) |
| 1695 | { |
Jens Axboe | c888a8f | 2016-04-13 13:33:19 -0600 | [diff] [blame] | 1696 | return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); |
shaohua.li@intel.com | f387693 | 2011-05-06 11:34:32 -0600 | [diff] [blame] | 1697 | } |
| 1698 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1699 | typedef struct {struct page *v;} Sector; |
| 1700 | |
| 1701 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); |
| 1702 | |
| 1703 | static inline void put_dev_sector(Sector p) |
| 1704 | { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1705 | put_page(p.v); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1706 | } |
| 1707 | |
Ming Lei | e0af291 | 2016-02-26 23:40:51 +0800 | [diff] [blame] | 1708 | static inline bool __bvec_gap_to_prev(struct request_queue *q, |
| 1709 | struct bio_vec *bprv, unsigned int offset) |
| 1710 | { |
| 1711 | return offset || |
| 1712 | ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); |
| 1713 | } |
| 1714 | |
Keith Busch | 03100aa | 2015-08-19 14:24:05 -0700 | [diff] [blame] | 1715 | /* |
| 1716 | * Check if adding a bio_vec after bprv with offset would create a gap in |
| 1717 | * the SG list. Most drivers don't care about this, but some do. |
| 1718 | */ |
| 1719 | static inline bool bvec_gap_to_prev(struct request_queue *q, |
| 1720 | struct bio_vec *bprv, unsigned int offset) |
| 1721 | { |
| 1722 | if (!queue_virt_boundary(q)) |
| 1723 | return false; |
Ming Lei | e0af291 | 2016-02-26 23:40:51 +0800 | [diff] [blame] | 1724 | return __bvec_gap_to_prev(q, bprv, offset); |
Keith Busch | 03100aa | 2015-08-19 14:24:05 -0700 | [diff] [blame] | 1725 | } |
| 1726 | |
Ming Lei | 729204e | 2016-12-17 18:49:09 +0800 | [diff] [blame] | 1727 | /* |
| 1728 | * Check if the two bvecs from two bios can be merged to one segment. |
| 1729 | * If yes, no need to check gap between the two bios since the 1st bio |
| 1730 | * and the 1st bvec in the 2nd bio can be handled in one segment. |
| 1731 | */ |
| 1732 | static inline bool bios_segs_mergeable(struct request_queue *q, |
| 1733 | struct bio *prev, struct bio_vec *prev_last_bv, |
| 1734 | struct bio_vec *next_first_bv) |
| 1735 | { |
| 1736 | if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv)) |
| 1737 | return false; |
| 1738 | if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv)) |
| 1739 | return false; |
| 1740 | if (prev->bi_seg_back_size + next_first_bv->bv_len > |
| 1741 | queue_max_segment_size(q)) |
| 1742 | return false; |
| 1743 | return true; |
| 1744 | } |
| 1745 | |
Ming Lei | 5a8d75a | 2017-04-14 13:58:29 -0600 | [diff] [blame] | 1746 | static inline bool bio_will_gap(struct request_queue *q, |
| 1747 | struct request *prev_rq, |
| 1748 | struct bio *prev, |
| 1749 | struct bio *next) |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 1750 | { |
Ming Lei | 25e71a9 | 2016-02-26 23:40:52 +0800 | [diff] [blame] | 1751 | if (bio_has_data(prev) && queue_virt_boundary(q)) { |
| 1752 | struct bio_vec pb, nb; |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 1753 | |
Ming Lei | 5a8d75a | 2017-04-14 13:58:29 -0600 | [diff] [blame] | 1754 | /* |
| 1755 | * don't merge if the 1st bio starts with non-zero |
| 1756 | * offset, otherwise it is quite difficult to respect |
| 1757 | * sg gap limit. We work hard to merge a huge number of small |
| 1758 | * single bios in case of mkfs. |
| 1759 | */ |
| 1760 | if (prev_rq) |
| 1761 | bio_get_first_bvec(prev_rq->bio, &pb); |
| 1762 | else |
| 1763 | bio_get_first_bvec(prev, &pb); |
| 1764 | if (pb.bv_offset) |
| 1765 | return true; |
| 1766 | |
| 1767 | /* |
| 1768 | * We don't need to worry about the situation that the |
| 1769 | * merged segment ends in unaligned virt boundary: |
| 1770 | * |
| 1771 | * - if 'pb' ends aligned, the merged segment ends aligned |
| 1772 | * - if 'pb' ends unaligned, the next bio must include |
| 1773 | * one single bvec of 'nb', otherwise the 'nb' can't |
| 1774 | * merge with 'pb' |
| 1775 | */ |
Ming Lei | 25e71a9 | 2016-02-26 23:40:52 +0800 | [diff] [blame] | 1776 | bio_get_last_bvec(prev, &pb); |
| 1777 | bio_get_first_bvec(next, &nb); |
| 1778 | |
Ming Lei | 729204e | 2016-12-17 18:49:09 +0800 | [diff] [blame] | 1779 | if (!bios_segs_mergeable(q, prev, &pb, &nb)) |
| 1780 | return __bvec_gap_to_prev(q, &pb, nb.bv_offset); |
Ming Lei | 25e71a9 | 2016-02-26 23:40:52 +0800 | [diff] [blame] | 1781 | } |
| 1782 | |
| 1783 | return false; |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 1784 | } |
| 1785 | |
| 1786 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) |
| 1787 | { |
Ming Lei | 5a8d75a | 2017-04-14 13:58:29 -0600 | [diff] [blame] | 1788 | return bio_will_gap(req->q, req, req->biotail, bio); |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 1789 | } |
| 1790 | |
| 1791 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) |
| 1792 | { |
Ming Lei | 5a8d75a | 2017-04-14 13:58:29 -0600 | [diff] [blame] | 1793 | return bio_will_gap(req->q, NULL, bio, req->bio); |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 1794 | } |
| 1795 | |
Jens Axboe | 59c3d45 | 2014-04-08 09:15:35 -0600 | [diff] [blame] | 1796 | int kblockd_schedule_work(struct work_struct *work); |
Jens Axboe | ee63cfa | 2016-08-24 15:52:48 -0600 | [diff] [blame] | 1797 | int kblockd_schedule_work_on(int cpu, struct work_struct *work); |
Jens Axboe | 818cd1c | 2017-04-10 09:54:55 -0600 | [diff] [blame] | 1798 | int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
| 1801 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
| 1802 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
| 1803 | MODULE_ALIAS("block-major-" __stringify(major) "-*") |
| 1804 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1805 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 1806 | |
Martin K. Petersen | 8288f49 | 2014-09-26 19:20:02 -0400 | [diff] [blame] | 1807 | enum blk_integrity_flags { |
| 1808 | BLK_INTEGRITY_VERIFY = 1 << 0, |
| 1809 | BLK_INTEGRITY_GENERATE = 1 << 1, |
Martin K. Petersen | 3aec2f4 | 2014-09-26 19:20:03 -0400 | [diff] [blame] | 1810 | BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, |
Martin K. Petersen | aae7df5 | 2014-09-26 19:20:05 -0400 | [diff] [blame] | 1811 | BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, |
Martin K. Petersen | 8288f49 | 2014-09-26 19:20:02 -0400 | [diff] [blame] | 1812 | }; |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1813 | |
Martin K. Petersen | 1859308 | 2014-09-26 19:20:01 -0400 | [diff] [blame] | 1814 | struct blk_integrity_iter { |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1815 | void *prot_buf; |
| 1816 | void *data_buf; |
Martin K. Petersen | 3be91c4 | 2014-09-26 19:19:59 -0400 | [diff] [blame] | 1817 | sector_t seed; |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1818 | unsigned int data_size; |
Martin K. Petersen | 3be91c4 | 2014-09-26 19:19:59 -0400 | [diff] [blame] | 1819 | unsigned short interval; |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1820 | const char *disk_name; |
| 1821 | }; |
| 1822 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1823 | typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1824 | |
Martin K. Petersen | 0f8087e | 2015-10-21 13:19:33 -0400 | [diff] [blame] | 1825 | struct blk_integrity_profile { |
| 1826 | integrity_processing_fn *generate_fn; |
| 1827 | integrity_processing_fn *verify_fn; |
| 1828 | const char *name; |
| 1829 | }; |
| 1830 | |
Martin K. Petersen | 25520d5 | 2015-10-21 13:19:49 -0400 | [diff] [blame] | 1831 | extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1832 | extern void blk_integrity_unregister(struct gendisk *); |
Martin K. Petersen | ad7fce9 | 2008-10-01 03:38:39 -0400 | [diff] [blame] | 1833 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1834 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
| 1835 | struct scatterlist *); |
| 1836 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 1837 | extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, |
| 1838 | struct request *); |
| 1839 | extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, |
| 1840 | struct bio *); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1841 | |
Martin K. Petersen | 25520d5 | 2015-10-21 13:19:49 -0400 | [diff] [blame] | 1842 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
| 1843 | { |
Dan Williams | ac6fc48 | 2015-10-21 13:20:18 -0400 | [diff] [blame] | 1844 | struct blk_integrity *bi = &disk->queue->integrity; |
Martin K. Petersen | 25520d5 | 2015-10-21 13:19:49 -0400 | [diff] [blame] | 1845 | |
| 1846 | if (!bi->profile) |
| 1847 | return NULL; |
| 1848 | |
| 1849 | return bi; |
| 1850 | } |
| 1851 | |
Jens Axboe | b04accc | 2008-10-02 12:53:22 +0200 | [diff] [blame] | 1852 | static inline |
| 1853 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
| 1854 | { |
Martin K. Petersen | 25520d5 | 2015-10-21 13:19:49 -0400 | [diff] [blame] | 1855 | return blk_get_integrity(bdev->bd_disk); |
Martin K. Petersen | b02739b | 2008-10-02 18:47:49 +0200 | [diff] [blame] | 1856 | } |
| 1857 | |
Martin K. Petersen | 180b2f9 | 2014-09-26 19:19:56 -0400 | [diff] [blame] | 1858 | static inline bool blk_integrity_rq(struct request *rq) |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1859 | { |
Martin K. Petersen | 180b2f9 | 2014-09-26 19:19:56 -0400 | [diff] [blame] | 1860 | return rq->cmd_flags & REQ_INTEGRITY; |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1861 | } |
| 1862 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1863 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, |
| 1864 | unsigned int segs) |
| 1865 | { |
| 1866 | q->limits.max_integrity_segments = segs; |
| 1867 | } |
| 1868 | |
| 1869 | static inline unsigned short |
| 1870 | queue_max_integrity_segments(struct request_queue *q) |
| 1871 | { |
| 1872 | return q->limits.max_integrity_segments; |
| 1873 | } |
| 1874 | |
Sagi Grimberg | 7f39add | 2015-09-11 09:03:04 -0600 | [diff] [blame] | 1875 | static inline bool integrity_req_gap_back_merge(struct request *req, |
| 1876 | struct bio *next) |
| 1877 | { |
| 1878 | struct bio_integrity_payload *bip = bio_integrity(req->bio); |
| 1879 | struct bio_integrity_payload *bip_next = bio_integrity(next); |
| 1880 | |
| 1881 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], |
| 1882 | bip_next->bip_vec[0].bv_offset); |
| 1883 | } |
| 1884 | |
| 1885 | static inline bool integrity_req_gap_front_merge(struct request *req, |
| 1886 | struct bio *bio) |
| 1887 | { |
| 1888 | struct bio_integrity_payload *bip = bio_integrity(bio); |
| 1889 | struct bio_integrity_payload *bip_next = bio_integrity(req->bio); |
| 1890 | |
| 1891 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], |
| 1892 | bip_next->bip_vec[0].bv_offset); |
| 1893 | } |
| 1894 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1895 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 1896 | |
Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1897 | struct bio; |
| 1898 | struct block_device; |
| 1899 | struct gendisk; |
| 1900 | struct blk_integrity; |
| 1901 | |
| 1902 | static inline int blk_integrity_rq(struct request *rq) |
| 1903 | { |
| 1904 | return 0; |
| 1905 | } |
| 1906 | static inline int blk_rq_count_integrity_sg(struct request_queue *q, |
| 1907 | struct bio *b) |
| 1908 | { |
| 1909 | return 0; |
| 1910 | } |
| 1911 | static inline int blk_rq_map_integrity_sg(struct request_queue *q, |
| 1912 | struct bio *b, |
| 1913 | struct scatterlist *s) |
| 1914 | { |
| 1915 | return 0; |
| 1916 | } |
| 1917 | static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) |
| 1918 | { |
Michele Curti | 61a04e5 | 2014-10-09 15:30:17 -0700 | [diff] [blame] | 1919 | return NULL; |
Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1920 | } |
| 1921 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
| 1922 | { |
| 1923 | return NULL; |
| 1924 | } |
| 1925 | static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) |
| 1926 | { |
| 1927 | return 0; |
| 1928 | } |
Martin K. Petersen | 25520d5 | 2015-10-21 13:19:49 -0400 | [diff] [blame] | 1929 | static inline void blk_integrity_register(struct gendisk *d, |
Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1930 | struct blk_integrity *b) |
| 1931 | { |
Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1932 | } |
| 1933 | static inline void blk_integrity_unregister(struct gendisk *d) |
| 1934 | { |
| 1935 | } |
| 1936 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, |
| 1937 | unsigned int segs) |
| 1938 | { |
| 1939 | } |
| 1940 | static inline unsigned short queue_max_integrity_segments(struct request_queue *q) |
| 1941 | { |
| 1942 | return 0; |
| 1943 | } |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 1944 | static inline bool blk_integrity_merge_rq(struct request_queue *rq, |
| 1945 | struct request *r1, |
| 1946 | struct request *r2) |
Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1947 | { |
Martin K. Petersen | cb1a5ab | 2014-10-28 20:27:43 -0600 | [diff] [blame] | 1948 | return true; |
Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1949 | } |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 1950 | static inline bool blk_integrity_merge_bio(struct request_queue *rq, |
| 1951 | struct request *r, |
| 1952 | struct bio *b) |
Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1953 | { |
Martin K. Petersen | cb1a5ab | 2014-10-28 20:27:43 -0600 | [diff] [blame] | 1954 | return true; |
Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1955 | } |
Martin K. Petersen | 25520d5 | 2015-10-21 13:19:49 -0400 | [diff] [blame] | 1956 | |
Sagi Grimberg | 7f39add | 2015-09-11 09:03:04 -0600 | [diff] [blame] | 1957 | static inline bool integrity_req_gap_back_merge(struct request *req, |
| 1958 | struct bio *next) |
| 1959 | { |
| 1960 | return false; |
| 1961 | } |
| 1962 | static inline bool integrity_req_gap_front_merge(struct request *req, |
| 1963 | struct bio *bio) |
| 1964 | { |
| 1965 | return false; |
| 1966 | } |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1967 | |
| 1968 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 1969 | |
Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1970 | struct block_device_operations { |
Al Viro | d4430d6 | 2008-03-02 09:09:22 -0500 | [diff] [blame] | 1971 | int (*open) (struct block_device *, fmode_t); |
Al Viro | db2a144 | 2013-05-05 21:52:57 -0400 | [diff] [blame] | 1972 | void (*release) (struct gendisk *, fmode_t); |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1973 | int (*rw_page)(struct block_device *, sector_t, struct page *, bool); |
Al Viro | d4430d6 | 2008-03-02 09:09:22 -0500 | [diff] [blame] | 1974 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
| 1975 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
Tejun Heo | 77ea887 | 2010-12-08 20:57:37 +0100 | [diff] [blame] | 1976 | unsigned int (*check_events) (struct gendisk *disk, |
| 1977 | unsigned int clearing); |
| 1978 | /* ->media_changed() is DEPRECATED, use ->check_events() instead */ |
Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1979 | int (*media_changed) (struct gendisk *); |
Tejun Heo | c3e33e0 | 2010-05-15 20:09:29 +0200 | [diff] [blame] | 1980 | void (*unlock_native_capacity) (struct gendisk *); |
Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1981 | int (*revalidate_disk) (struct gendisk *); |
| 1982 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
Nitin Gupta | b3a27d0 | 2010-05-17 11:02:43 +0530 | [diff] [blame] | 1983 | /* this callback is with swap_lock and sometimes page table lock held */ |
| 1984 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); |
Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1985 | struct module *owner; |
Christoph Hellwig | bbd3e06 | 2015-10-15 14:10:48 +0200 | [diff] [blame] | 1986 | const struct pr_ops *pr_ops; |
Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1987 | }; |
| 1988 | |
Al Viro | 633a08b | 2007-08-29 20:34:12 -0400 | [diff] [blame] | 1989 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, |
| 1990 | unsigned long); |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 1991 | extern int bdev_read_page(struct block_device *, sector_t, struct page *); |
| 1992 | extern int bdev_write_page(struct block_device *, sector_t, struct page *, |
| 1993 | struct writeback_control *); |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 1994 | |
| 1995 | #ifdef CONFIG_BLK_DEV_ZONED |
| 1996 | bool blk_req_needs_zone_write_lock(struct request *rq); |
| 1997 | void __blk_req_zone_write_lock(struct request *rq); |
| 1998 | void __blk_req_zone_write_unlock(struct request *rq); |
| 1999 | |
| 2000 | static inline void blk_req_zone_write_lock(struct request *rq) |
| 2001 | { |
| 2002 | if (blk_req_needs_zone_write_lock(rq)) |
| 2003 | __blk_req_zone_write_lock(rq); |
| 2004 | } |
| 2005 | |
| 2006 | static inline void blk_req_zone_write_unlock(struct request *rq) |
| 2007 | { |
| 2008 | if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) |
| 2009 | __blk_req_zone_write_unlock(rq); |
| 2010 | } |
| 2011 | |
| 2012 | static inline bool blk_req_zone_is_write_locked(struct request *rq) |
| 2013 | { |
| 2014 | return rq->q->seq_zones_wlock && |
| 2015 | test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); |
| 2016 | } |
| 2017 | |
| 2018 | static inline bool blk_req_can_dispatch_to_zone(struct request *rq) |
| 2019 | { |
| 2020 | if (!blk_req_needs_zone_write_lock(rq)) |
| 2021 | return true; |
| 2022 | return !blk_req_zone_is_write_locked(rq); |
| 2023 | } |
| 2024 | #else |
| 2025 | static inline bool blk_req_needs_zone_write_lock(struct request *rq) |
| 2026 | { |
| 2027 | return false; |
| 2028 | } |
| 2029 | |
| 2030 | static inline void blk_req_zone_write_lock(struct request *rq) |
| 2031 | { |
| 2032 | } |
| 2033 | |
| 2034 | static inline void blk_req_zone_write_unlock(struct request *rq) |
| 2035 | { |
| 2036 | } |
| 2037 | static inline bool blk_req_zone_is_write_locked(struct request *rq) |
| 2038 | { |
| 2039 | return false; |
| 2040 | } |
| 2041 | |
| 2042 | static inline bool blk_req_can_dispatch_to_zone(struct request *rq) |
| 2043 | { |
| 2044 | return true; |
| 2045 | } |
| 2046 | #endif /* CONFIG_BLK_DEV_ZONED */ |
| 2047 | |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 2048 | #else /* CONFIG_BLOCK */ |
Fabian Frederick | ac13a82 | 2014-06-04 16:06:27 -0700 | [diff] [blame] | 2049 | |
| 2050 | struct block_device; |
| 2051 | |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 2052 | /* |
| 2053 | * stubs for when the block layer is configured out |
| 2054 | */ |
| 2055 | #define buffer_heads_over_limit 0 |
| 2056 | |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 2057 | static inline long nr_blockdev_pages(void) |
| 2058 | { |
| 2059 | return 0; |
| 2060 | } |
| 2061 | |
Jens Axboe | 1f940bd | 2011-03-11 20:17:08 +0100 | [diff] [blame] | 2062 | struct blk_plug { |
| 2063 | }; |
| 2064 | |
| 2065 | static inline void blk_start_plug(struct blk_plug *plug) |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2066 | { |
| 2067 | } |
| 2068 | |
Jens Axboe | 1f940bd | 2011-03-11 20:17:08 +0100 | [diff] [blame] | 2069 | static inline void blk_finish_plug(struct blk_plug *plug) |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2070 | { |
| 2071 | } |
| 2072 | |
Jens Axboe | 1f940bd | 2011-03-11 20:17:08 +0100 | [diff] [blame] | 2073 | static inline void blk_flush_plug(struct task_struct *task) |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2074 | { |
| 2075 | } |
| 2076 | |
Jens Axboe | a237c1c | 2011-04-16 13:27:55 +0200 | [diff] [blame] | 2077 | static inline void blk_schedule_flush_plug(struct task_struct *task) |
| 2078 | { |
| 2079 | } |
| 2080 | |
| 2081 | |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2082 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
| 2083 | { |
| 2084 | return false; |
| 2085 | } |
| 2086 | |
Fabian Frederick | ac13a82 | 2014-06-04 16:06:27 -0700 | [diff] [blame] | 2087 | static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
| 2088 | sector_t *error_sector) |
| 2089 | { |
| 2090 | return 0; |
| 2091 | } |
| 2092 | |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 2093 | #endif /* CONFIG_BLOCK */ |
| 2094 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2095 | #endif |