Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Functions related to sysfs handling |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 6 | #include <linux/slab.h> |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 7 | #include <linux/module.h> |
| 8 | #include <linux/bio.h> |
| 9 | #include <linux/blkdev.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 10 | #include <linux/backing-dev.h> |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 11 | #include <linux/blktrace_api.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 12 | #include <linux/blk-mq.h> |
Tejun Heo | eea8f41 | 2015-05-22 17:13:17 -0400 | [diff] [blame] | 13 | #include <linux/blk-cgroup.h> |
Luis Chamberlain | 85e0cbb | 2020-06-19 20:47:30 +0000 | [diff] [blame] | 14 | #include <linux/debugfs.h> |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 15 | |
| 16 | #include "blk.h" |
Ming Lei | 3edcc0c | 2013-12-26 21:31:38 +0800 | [diff] [blame] | 17 | #include "blk-mq.h" |
Omar Sandoval | d173a25 | 2017-05-04 00:31:30 -0700 | [diff] [blame] | 18 | #include "blk-mq-debugfs.h" |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 19 | #include "blk-wbt.h" |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 20 | |
| 21 | struct queue_sysfs_entry { |
| 22 | struct attribute attr; |
| 23 | ssize_t (*show)(struct request_queue *, char *); |
| 24 | ssize_t (*store)(struct request_queue *, const char *, size_t); |
| 25 | }; |
| 26 | |
| 27 | static ssize_t |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 28 | queue_var_show(unsigned long var, char *page) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 29 | { |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 30 | return sprintf(page, "%lu\n", var); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 31 | } |
| 32 | |
| 33 | static ssize_t |
| 34 | queue_var_store(unsigned long *var, const char *page, size_t count) |
| 35 | { |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 36 | int err; |
| 37 | unsigned long v; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 38 | |
Jingoo Han | ed751e6 | 2013-09-11 14:20:08 -0700 | [diff] [blame] | 39 | err = kstrtoul(page, 10, &v); |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 40 | if (err || v > UINT_MAX) |
| 41 | return -EINVAL; |
| 42 | |
| 43 | *var = v; |
| 44 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 45 | return count; |
| 46 | } |
| 47 | |
Jens Axboe | 80e091d | 2016-11-28 09:22:47 -0700 | [diff] [blame] | 48 | static ssize_t queue_var_store64(s64 *var, const char *page) |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 49 | { |
| 50 | int err; |
Jens Axboe | 80e091d | 2016-11-28 09:22:47 -0700 | [diff] [blame] | 51 | s64 v; |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 52 | |
Jens Axboe | 80e091d | 2016-11-28 09:22:47 -0700 | [diff] [blame] | 53 | err = kstrtos64(page, 10, &v); |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 54 | if (err < 0) |
| 55 | return err; |
| 56 | |
| 57 | *var = v; |
| 58 | return 0; |
| 59 | } |
| 60 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 61 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
| 62 | { |
Max Gurtovoy | 28af742 | 2021-04-05 13:20:12 +0000 | [diff] [blame] | 63 | return queue_var_show(q->nr_requests, page); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | static ssize_t |
| 67 | queue_requests_store(struct request_queue *q, const char *page, size_t count) |
| 68 | { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 69 | unsigned long nr; |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 70 | int ret, err; |
Jens Axboe | b8a9ae7 | 2009-09-11 22:44:29 +0200 | [diff] [blame] | 71 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 72 | if (!queue_is_mq(q)) |
Jens Axboe | b8a9ae7 | 2009-09-11 22:44:29 +0200 | [diff] [blame] | 73 | return -EINVAL; |
| 74 | |
| 75 | ret = queue_var_store(&nr, page, count); |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 76 | if (ret < 0) |
| 77 | return ret; |
| 78 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 79 | if (nr < BLKDEV_MIN_RQ) |
| 80 | nr = BLKDEV_MIN_RQ; |
| 81 | |
Jens Axboe | a1ce35f | 2018-10-29 10:23:51 -0600 | [diff] [blame] | 82 | err = blk_mq_update_nr_requests(q, nr); |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 83 | if (err) |
| 84 | return err; |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 85 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 86 | return ret; |
| 87 | } |
| 88 | |
| 89 | static ssize_t queue_ra_show(struct request_queue *q, char *page) |
| 90 | { |
Jan Kara | dc3b17c | 2017-02-02 15:56:50 +0100 | [diff] [blame] | 91 | unsigned long ra_kb = q->backing_dev_info->ra_pages << |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 92 | (PAGE_SHIFT - 10); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 93 | |
| 94 | return queue_var_show(ra_kb, (page)); |
| 95 | } |
| 96 | |
| 97 | static ssize_t |
| 98 | queue_ra_store(struct request_queue *q, const char *page, size_t count) |
| 99 | { |
| 100 | unsigned long ra_kb; |
| 101 | ssize_t ret = queue_var_store(&ra_kb, page, count); |
| 102 | |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 103 | if (ret < 0) |
| 104 | return ret; |
| 105 | |
Jan Kara | dc3b17c | 2017-02-02 15:56:50 +0100 | [diff] [blame] | 106 | q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 107 | |
| 108 | return ret; |
| 109 | } |
| 110 | |
| 111 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) |
| 112 | { |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 113 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 114 | |
| 115 | return queue_var_show(max_sectors_kb, (page)); |
| 116 | } |
| 117 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 118 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
| 119 | { |
| 120 | return queue_var_show(queue_max_segments(q), (page)); |
| 121 | } |
| 122 | |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 123 | static ssize_t queue_max_discard_segments_show(struct request_queue *q, |
| 124 | char *page) |
| 125 | { |
| 126 | return queue_var_show(queue_max_discard_segments(q), (page)); |
| 127 | } |
| 128 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 129 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
| 130 | { |
| 131 | return queue_var_show(q->limits.max_integrity_segments, (page)); |
| 132 | } |
| 133 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 134 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
| 135 | { |
Christoph Hellwig | 3841746 | 2018-12-13 16:17:10 +0100 | [diff] [blame] | 136 | return queue_var_show(queue_max_segment_size(q), (page)); |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 137 | } |
| 138 | |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 139 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 140 | { |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 141 | return queue_var_show(queue_logical_block_size(q), page); |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 142 | } |
| 143 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 144 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
| 145 | { |
| 146 | return queue_var_show(queue_physical_block_size(q), page); |
| 147 | } |
| 148 | |
Hannes Reinecke | 87caf97 | 2016-10-18 15:40:30 +0900 | [diff] [blame] | 149 | static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) |
| 150 | { |
| 151 | return queue_var_show(q->limits.chunk_sectors, page); |
| 152 | } |
| 153 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 154 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
| 155 | { |
| 156 | return queue_var_show(queue_io_min(q), page); |
| 157 | } |
| 158 | |
| 159 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) |
| 160 | { |
| 161 | return queue_var_show(queue_io_opt(q), page); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 162 | } |
| 163 | |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 164 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
| 165 | { |
| 166 | return queue_var_show(q->limits.discard_granularity, page); |
| 167 | } |
| 168 | |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 169 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
| 170 | { |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 171 | |
Alan | 18f922d0 | 2016-02-17 14:15:30 +0000 | [diff] [blame] | 172 | return sprintf(page, "%llu\n", |
| 173 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 174 | } |
| 175 | |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 176 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
| 177 | { |
Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 178 | return sprintf(page, "%llu\n", |
| 179 | (unsigned long long)q->limits.max_discard_sectors << 9); |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 180 | } |
| 181 | |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 182 | static ssize_t queue_discard_max_store(struct request_queue *q, |
| 183 | const char *page, size_t count) |
| 184 | { |
| 185 | unsigned long max_discard; |
| 186 | ssize_t ret = queue_var_store(&max_discard, page, count); |
| 187 | |
| 188 | if (ret < 0) |
| 189 | return ret; |
| 190 | |
| 191 | if (max_discard & (q->limits.discard_granularity - 1)) |
| 192 | return -EINVAL; |
| 193 | |
| 194 | max_discard >>= 9; |
| 195 | if (max_discard > UINT_MAX) |
| 196 | return -EINVAL; |
| 197 | |
| 198 | if (max_discard > q->limits.max_hw_discard_sectors) |
| 199 | max_discard = q->limits.max_hw_discard_sectors; |
| 200 | |
| 201 | q->limits.max_discard_sectors = max_discard; |
| 202 | return ret; |
| 203 | } |
| 204 | |
Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 205 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
| 206 | { |
Christoph Hellwig | 48920ff | 2017-04-05 19:21:23 +0200 | [diff] [blame] | 207 | return queue_var_show(0, page); |
Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 208 | } |
| 209 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 210 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
| 211 | { |
| 212 | return sprintf(page, "%llu\n", |
| 213 | (unsigned long long)q->limits.max_write_same_sectors << 9); |
| 214 | } |
| 215 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 216 | static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) |
| 217 | { |
| 218 | return sprintf(page, "%llu\n", |
| 219 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); |
| 220 | } |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 221 | |
Damien Le Moal | a805a4f | 2021-01-28 13:47:30 +0900 | [diff] [blame] | 222 | static ssize_t queue_zone_write_granularity_show(struct request_queue *q, |
| 223 | char *page) |
| 224 | { |
| 225 | return queue_var_show(queue_zone_write_granularity(q), page); |
| 226 | } |
| 227 | |
Keith Busch | 0512a75 | 2020-05-12 17:55:47 +0900 | [diff] [blame] | 228 | static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) |
| 229 | { |
| 230 | unsigned long long max_sectors = q->limits.max_zone_append_sectors; |
| 231 | |
| 232 | return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); |
| 233 | } |
| 234 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 235 | static ssize_t |
| 236 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) |
| 237 | { |
| 238 | unsigned long max_sectors_kb, |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 239 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 240 | page_kb = 1 << (PAGE_SHIFT - 10); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 241 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
| 242 | |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 243 | if (ret < 0) |
| 244 | return ret; |
| 245 | |
Martin K. Petersen | ca369d5 | 2015-11-13 16:46:48 -0500 | [diff] [blame] | 246 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
| 247 | q->limits.max_dev_sectors >> 1); |
| 248 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 249 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
| 250 | return -EINVAL; |
Wu Fengguang | 7c23951 | 2008-11-25 09:08:39 +0100 | [diff] [blame] | 251 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 252 | spin_lock_irq(&q->queue_lock); |
Nikanth Karthikesan | c295fc0 | 2009-09-01 22:40:15 +0200 | [diff] [blame] | 253 | q->limits.max_sectors = max_sectors_kb << 1; |
Jan Kara | dc3b17c | 2017-02-02 15:56:50 +0100 | [diff] [blame] | 254 | q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 255 | spin_unlock_irq(&q->queue_lock); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 256 | |
| 257 | return ret; |
| 258 | } |
| 259 | |
| 260 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) |
| 261 | { |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 262 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 263 | |
| 264 | return queue_var_show(max_hw_sectors_kb, (page)); |
| 265 | } |
| 266 | |
Max Gurtovoy | 28af742 | 2021-04-05 13:20:12 +0000 | [diff] [blame] | 267 | static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) |
| 268 | { |
| 269 | return queue_var_show(q->limits.virt_boundary_mask, (page)); |
| 270 | } |
| 271 | |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 272 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
| 273 | static ssize_t \ |
Christoph Hellwig | fc93fe1 | 2020-09-03 08:07:01 +0200 | [diff] [blame] | 274 | queue_##name##_show(struct request_queue *q, char *page) \ |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 275 | { \ |
| 276 | int bit; \ |
| 277 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ |
| 278 | return queue_var_show(neg ? !bit : bit, page); \ |
| 279 | } \ |
| 280 | static ssize_t \ |
Christoph Hellwig | fc93fe1 | 2020-09-03 08:07:01 +0200 | [diff] [blame] | 281 | queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 282 | { \ |
| 283 | unsigned long val; \ |
| 284 | ssize_t ret; \ |
| 285 | ret = queue_var_store(&val, page, count); \ |
Arnd Bergmann | c678ef5 | 2013-04-03 21:53:57 +0200 | [diff] [blame] | 286 | if (ret < 0) \ |
| 287 | return ret; \ |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 288 | if (neg) \ |
| 289 | val = !val; \ |
| 290 | \ |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 291 | if (val) \ |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 292 | blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 293 | else \ |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 294 | blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 295 | return ret; \ |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 296 | } |
| 297 | |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 298 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
| 299 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); |
| 300 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); |
Christoph Hellwig | 1cb039f | 2020-09-24 08:51:38 +0200 | [diff] [blame] | 301 | QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 302 | #undef QUEUE_SYSFS_BIT_FNS |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 303 | |
Damien Le Moal | 797476b | 2016-10-18 15:40:29 +0900 | [diff] [blame] | 304 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) |
| 305 | { |
| 306 | switch (blk_queue_zoned_model(q)) { |
| 307 | case BLK_ZONED_HA: |
| 308 | return sprintf(page, "host-aware\n"); |
| 309 | case BLK_ZONED_HM: |
| 310 | return sprintf(page, "host-managed\n"); |
| 311 | default: |
| 312 | return sprintf(page, "none\n"); |
| 313 | } |
| 314 | } |
| 315 | |
Damien Le Moal | 965b652 | 2018-10-12 19:08:48 +0900 | [diff] [blame] | 316 | static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) |
| 317 | { |
| 318 | return queue_var_show(blk_queue_nr_zones(q), page); |
| 319 | } |
| 320 | |
Niklas Cassel | e15864f | 2020-07-14 23:18:23 +0200 | [diff] [blame] | 321 | static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) |
| 322 | { |
| 323 | return queue_var_show(queue_max_open_zones(q), page); |
| 324 | } |
| 325 | |
Niklas Cassel | 659bf82 | 2020-07-14 23:18:24 +0200 | [diff] [blame] | 326 | static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) |
| 327 | { |
| 328 | return queue_var_show(queue_max_active_zones(q), page); |
| 329 | } |
| 330 | |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 331 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
| 332 | { |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 333 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
| 334 | blk_queue_noxmerges(q), page); |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 335 | } |
| 336 | |
| 337 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, |
| 338 | size_t count) |
| 339 | { |
| 340 | unsigned long nm; |
| 341 | ssize_t ret = queue_var_store(&nm, page, count); |
| 342 | |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 343 | if (ret < 0) |
| 344 | return ret; |
| 345 | |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 346 | blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
| 347 | blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 348 | if (nm == 2) |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 349 | blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 350 | else if (nm) |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 351 | blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 352 | |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 353 | return ret; |
| 354 | } |
| 355 | |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 356 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
| 357 | { |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 358 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 359 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 360 | |
Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 361 | return queue_var_show(set << force, page); |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 362 | } |
| 363 | |
| 364 | static ssize_t |
| 365 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) |
| 366 | { |
| 367 | ssize_t ret = -EINVAL; |
Christoph Hellwig | 0a06ff0 | 2013-11-14 14:32:07 -0800 | [diff] [blame] | 368 | #ifdef CONFIG_SMP |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 369 | unsigned long val; |
| 370 | |
| 371 | ret = queue_var_store(&val, page, count); |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 372 | if (ret < 0) |
| 373 | return ret; |
| 374 | |
Eric Seppanen | e8037d4 | 2011-08-23 21:25:12 +0200 | [diff] [blame] | 375 | if (val == 2) { |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 376 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
| 377 | blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
Eric Seppanen | e8037d4 | 2011-08-23 21:25:12 +0200 | [diff] [blame] | 378 | } else if (val == 1) { |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 379 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
| 380 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
Eric Seppanen | e8037d4 | 2011-08-23 21:25:12 +0200 | [diff] [blame] | 381 | } else if (val == 0) { |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 382 | blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
| 383 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 384 | } |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 385 | #endif |
| 386 | return ret; |
| 387 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 388 | |
Jens Axboe | 06426ad | 2016-11-14 13:01:59 -0700 | [diff] [blame] | 389 | static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) |
| 390 | { |
Jens Axboe | 64f1c21 | 2016-11-14 13:03:03 -0700 | [diff] [blame] | 391 | int val; |
| 392 | |
Yufen Yu | 29ece8b | 2019-03-18 22:44:41 +0800 | [diff] [blame] | 393 | if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) |
| 394 | val = BLK_MQ_POLL_CLASSIC; |
Jens Axboe | 64f1c21 | 2016-11-14 13:03:03 -0700 | [diff] [blame] | 395 | else |
| 396 | val = q->poll_nsec / 1000; |
| 397 | |
| 398 | return sprintf(page, "%d\n", val); |
Jens Axboe | 06426ad | 2016-11-14 13:01:59 -0700 | [diff] [blame] | 399 | } |
| 400 | |
| 401 | static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, |
| 402 | size_t count) |
| 403 | { |
Jens Axboe | 64f1c21 | 2016-11-14 13:03:03 -0700 | [diff] [blame] | 404 | int err, val; |
Jens Axboe | 06426ad | 2016-11-14 13:01:59 -0700 | [diff] [blame] | 405 | |
| 406 | if (!q->mq_ops || !q->mq_ops->poll) |
| 407 | return -EINVAL; |
| 408 | |
Jens Axboe | 64f1c21 | 2016-11-14 13:03:03 -0700 | [diff] [blame] | 409 | err = kstrtoint(page, 10, &val); |
| 410 | if (err < 0) |
| 411 | return err; |
Jens Axboe | 06426ad | 2016-11-14 13:01:59 -0700 | [diff] [blame] | 412 | |
Yufen Yu | 29ece8b | 2019-03-18 22:44:41 +0800 | [diff] [blame] | 413 | if (val == BLK_MQ_POLL_CLASSIC) |
| 414 | q->poll_nsec = BLK_MQ_POLL_CLASSIC; |
| 415 | else if (val >= 0) |
Jens Axboe | 64f1c21 | 2016-11-14 13:03:03 -0700 | [diff] [blame] | 416 | q->poll_nsec = val * 1000; |
Yufen Yu | 29ece8b | 2019-03-18 22:44:41 +0800 | [diff] [blame] | 417 | else |
| 418 | return -EINVAL; |
Jens Axboe | 64f1c21 | 2016-11-14 13:03:03 -0700 | [diff] [blame] | 419 | |
| 420 | return count; |
Jens Axboe | 06426ad | 2016-11-14 13:01:59 -0700 | [diff] [blame] | 421 | } |
| 422 | |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 423 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
| 424 | { |
| 425 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); |
| 426 | } |
| 427 | |
| 428 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, |
| 429 | size_t count) |
| 430 | { |
| 431 | unsigned long poll_on; |
| 432 | ssize_t ret; |
| 433 | |
Ming Lei | cd19181 | 2018-12-18 12:15:29 +0800 | [diff] [blame] | 434 | if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || |
| 435 | !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 436 | return -EINVAL; |
| 437 | |
| 438 | ret = queue_var_store(&poll_on, page, count); |
| 439 | if (ret < 0) |
| 440 | return ret; |
| 441 | |
Jeffle Xu | 6b09b4d | 2021-02-22 14:54:52 +0800 | [diff] [blame] | 442 | if (poll_on) { |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 443 | blk_queue_flag_set(QUEUE_FLAG_POLL, q); |
Jeffle Xu | 6b09b4d | 2021-02-22 14:54:52 +0800 | [diff] [blame] | 444 | } else { |
| 445 | blk_mq_freeze_queue(q); |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 446 | blk_queue_flag_clear(QUEUE_FLAG_POLL, q); |
Jeffle Xu | 6b09b4d | 2021-02-22 14:54:52 +0800 | [diff] [blame] | 447 | blk_mq_unfreeze_queue(q); |
| 448 | } |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 449 | |
| 450 | return ret; |
| 451 | } |
| 452 | |
Weiping Zhang | 65cd1d1 | 2018-11-29 00:04:39 +0800 | [diff] [blame] | 453 | static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) |
| 454 | { |
| 455 | return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); |
| 456 | } |
| 457 | |
| 458 | static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, |
| 459 | size_t count) |
| 460 | { |
| 461 | unsigned int val; |
| 462 | int err; |
| 463 | |
| 464 | err = kstrtou32(page, 10, &val); |
| 465 | if (err || val == 0) |
| 466 | return -EINVAL; |
| 467 | |
| 468 | blk_queue_rq_timeout(q, msecs_to_jiffies(val)); |
| 469 | |
| 470 | return count; |
| 471 | } |
| 472 | |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 473 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) |
| 474 | { |
Josef Bacik | a790504 | 2018-07-03 09:32:35 -0600 | [diff] [blame] | 475 | if (!wbt_rq_qos(q)) |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 476 | return -EINVAL; |
| 477 | |
Josef Bacik | a790504 | 2018-07-03 09:32:35 -0600 | [diff] [blame] | 478 | return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 479 | } |
| 480 | |
| 481 | static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, |
| 482 | size_t count) |
| 483 | { |
Josef Bacik | a790504 | 2018-07-03 09:32:35 -0600 | [diff] [blame] | 484 | struct rq_qos *rqos; |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 485 | ssize_t ret; |
Jens Axboe | 80e091d | 2016-11-28 09:22:47 -0700 | [diff] [blame] | 486 | s64 val; |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 487 | |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 488 | ret = queue_var_store64(&val, page); |
| 489 | if (ret < 0) |
| 490 | return ret; |
Jens Axboe | d62118b | 2016-11-28 09:40:34 -0700 | [diff] [blame] | 491 | if (val < -1) |
| 492 | return -EINVAL; |
| 493 | |
Josef Bacik | a790504 | 2018-07-03 09:32:35 -0600 | [diff] [blame] | 494 | rqos = wbt_rq_qos(q); |
| 495 | if (!rqos) { |
Jens Axboe | d62118b | 2016-11-28 09:40:34 -0700 | [diff] [blame] | 496 | ret = wbt_init(q); |
| 497 | if (ret) |
| 498 | return ret; |
Jens Axboe | d62118b | 2016-11-28 09:40:34 -0700 | [diff] [blame] | 499 | } |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 500 | |
Jens Axboe | 80e091d | 2016-11-28 09:22:47 -0700 | [diff] [blame] | 501 | if (val == -1) |
Josef Bacik | a790504 | 2018-07-03 09:32:35 -0600 | [diff] [blame] | 502 | val = wbt_default_latency_nsec(q); |
Jens Axboe | 80e091d | 2016-11-28 09:22:47 -0700 | [diff] [blame] | 503 | else if (val >= 0) |
Josef Bacik | a790504 | 2018-07-03 09:32:35 -0600 | [diff] [blame] | 504 | val *= 1000ULL; |
Jens Axboe | d62118b | 2016-11-28 09:40:34 -0700 | [diff] [blame] | 505 | |
Aleksei Zakharov | b7143fe | 2019-02-11 13:10:34 +0300 | [diff] [blame] | 506 | if (wbt_get_min_lat(q) == val) |
| 507 | return count; |
| 508 | |
Jens Axboe | c125311 | 2018-08-23 09:34:46 -0600 | [diff] [blame] | 509 | /* |
| 510 | * Ensure that the queue is idled, in case the latency update |
| 511 | * ends up either enabling or disabling wbt completely. We can't |
| 512 | * have IO inflight if that happens. |
| 513 | */ |
Jens Axboe | a1ce35f | 2018-10-29 10:23:51 -0600 | [diff] [blame] | 514 | blk_mq_freeze_queue(q); |
| 515 | blk_mq_quiesce_queue(q); |
Jens Axboe | 80e091d | 2016-11-28 09:22:47 -0700 | [diff] [blame] | 516 | |
Jens Axboe | c125311 | 2018-08-23 09:34:46 -0600 | [diff] [blame] | 517 | wbt_set_min_lat(q, val); |
Jens Axboe | c125311 | 2018-08-23 09:34:46 -0600 | [diff] [blame] | 518 | |
Jens Axboe | a1ce35f | 2018-10-29 10:23:51 -0600 | [diff] [blame] | 519 | blk_mq_unquiesce_queue(q); |
| 520 | blk_mq_unfreeze_queue(q); |
Jens Axboe | c125311 | 2018-08-23 09:34:46 -0600 | [diff] [blame] | 521 | |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 522 | return count; |
| 523 | } |
| 524 | |
Jens Axboe | 93e9d8e | 2016-04-12 12:32:46 -0600 | [diff] [blame] | 525 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
| 526 | { |
| 527 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) |
| 528 | return sprintf(page, "write back\n"); |
| 529 | |
| 530 | return sprintf(page, "write through\n"); |
| 531 | } |
| 532 | |
| 533 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, |
| 534 | size_t count) |
| 535 | { |
| 536 | int set = -1; |
| 537 | |
| 538 | if (!strncmp(page, "write back", 10)) |
| 539 | set = 1; |
| 540 | else if (!strncmp(page, "write through", 13) || |
| 541 | !strncmp(page, "none", 4)) |
| 542 | set = 0; |
| 543 | |
| 544 | if (set == -1) |
| 545 | return -EINVAL; |
| 546 | |
Jens Axboe | 93e9d8e | 2016-04-12 12:32:46 -0600 | [diff] [blame] | 547 | if (set) |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 548 | blk_queue_flag_set(QUEUE_FLAG_WC, q); |
Jens Axboe | 93e9d8e | 2016-04-12 12:32:46 -0600 | [diff] [blame] | 549 | else |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 550 | blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
Jens Axboe | 93e9d8e | 2016-04-12 12:32:46 -0600 | [diff] [blame] | 551 | |
| 552 | return count; |
| 553 | } |
| 554 | |
Kent Overstreet | 6fcefbe | 2018-05-08 21:33:58 -0400 | [diff] [blame] | 555 | static ssize_t queue_fua_show(struct request_queue *q, char *page) |
| 556 | { |
| 557 | return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); |
| 558 | } |
| 559 | |
Yigal Korman | ea6ca60 | 2016-06-23 17:05:51 -0400 | [diff] [blame] | 560 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
| 561 | { |
| 562 | return queue_var_show(blk_queue_dax(q), page); |
| 563 | } |
| 564 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 565 | #define QUEUE_RO_ENTRY(_prefix, _name) \ |
| 566 | static struct queue_sysfs_entry _prefix##_entry = { \ |
| 567 | .attr = { .name = _name, .mode = 0444 }, \ |
| 568 | .show = _prefix##_show, \ |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 569 | }; |
| 570 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 571 | #define QUEUE_RW_ENTRY(_prefix, _name) \ |
| 572 | static struct queue_sysfs_entry _prefix##_entry = { \ |
| 573 | .attr = { .name = _name, .mode = 0644 }, \ |
| 574 | .show = _prefix##_show, \ |
| 575 | .store = _prefix##_store, \ |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 576 | }; |
| 577 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 578 | QUEUE_RW_ENTRY(queue_requests, "nr_requests"); |
| 579 | QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); |
| 580 | QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); |
| 581 | QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); |
| 582 | QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); |
| 583 | QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); |
| 584 | QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); |
| 585 | QUEUE_RW_ENTRY(elv_iosched, "scheduler"); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 586 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 587 | QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); |
| 588 | QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); |
| 589 | QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); |
| 590 | QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); |
| 591 | QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 592 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 593 | QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); |
| 594 | QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); |
| 595 | QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); |
| 596 | QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); |
| 597 | QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 598 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 599 | QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); |
| 600 | QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); |
| 601 | QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); |
Damien Le Moal | a805a4f | 2021-01-28 13:47:30 +0900 | [diff] [blame] | 602 | QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 603 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 604 | QUEUE_RO_ENTRY(queue_zoned, "zoned"); |
| 605 | QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); |
| 606 | QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); |
| 607 | QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 608 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 609 | QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); |
| 610 | QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); |
| 611 | QUEUE_RW_ENTRY(queue_poll, "io_poll"); |
| 612 | QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); |
| 613 | QUEUE_RW_ENTRY(queue_wc, "write_cache"); |
| 614 | QUEUE_RO_ENTRY(queue_fua, "fua"); |
| 615 | QUEUE_RO_ENTRY(queue_dax, "dax"); |
| 616 | QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); |
| 617 | QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); |
Max Gurtovoy | 28af742 | 2021-04-05 13:20:12 +0000 | [diff] [blame] | 618 | QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 619 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 620 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
| 621 | QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); |
| 622 | #endif |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 623 | |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 624 | /* legacy alias for logical_block_size: */ |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 625 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
Joe Perches | 5657a81 | 2018-05-24 13:38:59 -0600 | [diff] [blame] | 626 | .attr = {.name = "hw_sector_size", .mode = 0444 }, |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 627 | .show = queue_logical_block_size_show, |
| 628 | }; |
| 629 | |
Christoph Hellwig | fc93fe1 | 2020-09-03 08:07:01 +0200 | [diff] [blame] | 630 | QUEUE_RW_ENTRY(queue_nonrot, "rotational"); |
| 631 | QUEUE_RW_ENTRY(queue_iostats, "iostats"); |
| 632 | QUEUE_RW_ENTRY(queue_random, "add_random"); |
Christoph Hellwig | 1cb039f | 2020-09-24 08:51:38 +0200 | [diff] [blame] | 633 | QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 634 | |
Weiping Zhang | 4d25339 | 2019-04-02 21:14:30 +0800 | [diff] [blame] | 635 | static struct attribute *queue_attrs[] = { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 636 | &queue_requests_entry.attr, |
| 637 | &queue_ra_entry.attr, |
| 638 | &queue_max_hw_sectors_entry.attr, |
| 639 | &queue_max_sectors_entry.attr, |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 640 | &queue_max_segments_entry.attr, |
Christoph Hellwig | 1e73973 | 2017-02-08 14:46:49 +0100 | [diff] [blame] | 641 | &queue_max_discard_segments_entry.attr, |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 642 | &queue_max_integrity_segments_entry.attr, |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 643 | &queue_max_segment_size_entry.attr, |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 644 | &elv_iosched_entry.attr, |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 645 | &queue_hw_sector_size_entry.attr, |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 646 | &queue_logical_block_size_entry.attr, |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 647 | &queue_physical_block_size_entry.attr, |
Hannes Reinecke | 87caf97 | 2016-10-18 15:40:30 +0900 | [diff] [blame] | 648 | &queue_chunk_sectors_entry.attr, |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 649 | &queue_io_min_entry.attr, |
| 650 | &queue_io_opt_entry.attr, |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 651 | &queue_discard_granularity_entry.attr, |
| 652 | &queue_discard_max_entry.attr, |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 653 | &queue_discard_max_hw_entry.attr, |
Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 654 | &queue_discard_zeroes_data_entry.attr, |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 655 | &queue_write_same_max_entry.attr, |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 656 | &queue_write_zeroes_max_entry.attr, |
Keith Busch | 0512a75 | 2020-05-12 17:55:47 +0900 | [diff] [blame] | 657 | &queue_zone_append_max_entry.attr, |
Damien Le Moal | a805a4f | 2021-01-28 13:47:30 +0900 | [diff] [blame] | 658 | &queue_zone_write_granularity_entry.attr, |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 659 | &queue_nonrot_entry.attr, |
Damien Le Moal | 797476b | 2016-10-18 15:40:29 +0900 | [diff] [blame] | 660 | &queue_zoned_entry.attr, |
Damien Le Moal | 965b652 | 2018-10-12 19:08:48 +0900 | [diff] [blame] | 661 | &queue_nr_zones_entry.attr, |
Niklas Cassel | e15864f | 2020-07-14 23:18:23 +0200 | [diff] [blame] | 662 | &queue_max_open_zones_entry.attr, |
Niklas Cassel | 659bf82 | 2020-07-14 23:18:24 +0200 | [diff] [blame] | 663 | &queue_max_active_zones_entry.attr, |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 664 | &queue_nomerges_entry.attr, |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 665 | &queue_rq_affinity_entry.attr, |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 666 | &queue_iostats_entry.attr, |
Christoph Hellwig | 1cb039f | 2020-09-24 08:51:38 +0200 | [diff] [blame] | 667 | &queue_stable_writes_entry.attr, |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 668 | &queue_random_entry.attr, |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 669 | &queue_poll_entry.attr, |
Jens Axboe | 93e9d8e | 2016-04-12 12:32:46 -0600 | [diff] [blame] | 670 | &queue_wc_entry.attr, |
Kent Overstreet | 6fcefbe | 2018-05-08 21:33:58 -0400 | [diff] [blame] | 671 | &queue_fua_entry.attr, |
Yigal Korman | ea6ca60 | 2016-06-23 17:05:51 -0400 | [diff] [blame] | 672 | &queue_dax_entry.attr, |
Jens Axboe | 87760e5 | 2016-11-09 12:38:14 -0700 | [diff] [blame] | 673 | &queue_wb_lat_entry.attr, |
Jens Axboe | 06426ad | 2016-11-14 13:01:59 -0700 | [diff] [blame] | 674 | &queue_poll_delay_entry.attr, |
Weiping Zhang | 65cd1d1 | 2018-11-29 00:04:39 +0800 | [diff] [blame] | 675 | &queue_io_timeout_entry.attr, |
Shaohua Li | 297e3d8 | 2017-03-27 10:51:37 -0700 | [diff] [blame] | 676 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
Christoph Hellwig | 3562614 | 2020-09-03 08:07:00 +0200 | [diff] [blame] | 677 | &blk_throtl_sample_time_entry.attr, |
Shaohua Li | 297e3d8 | 2017-03-27 10:51:37 -0700 | [diff] [blame] | 678 | #endif |
Max Gurtovoy | 28af742 | 2021-04-05 13:20:12 +0000 | [diff] [blame] | 679 | &queue_virt_boundary_mask_entry.attr, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 680 | NULL, |
| 681 | }; |
| 682 | |
Weiping Zhang | 4d25339 | 2019-04-02 21:14:30 +0800 | [diff] [blame] | 683 | static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, |
| 684 | int n) |
| 685 | { |
| 686 | struct request_queue *q = |
| 687 | container_of(kobj, struct request_queue, kobj); |
| 688 | |
| 689 | if (attr == &queue_io_timeout_entry.attr && |
| 690 | (!q->mq_ops || !q->mq_ops->timeout)) |
| 691 | return 0; |
| 692 | |
Niklas Cassel | 659bf82 | 2020-07-14 23:18:24 +0200 | [diff] [blame] | 693 | if ((attr == &queue_max_open_zones_entry.attr || |
| 694 | attr == &queue_max_active_zones_entry.attr) && |
Niklas Cassel | e15864f | 2020-07-14 23:18:23 +0200 | [diff] [blame] | 695 | !blk_queue_is_zoned(q)) |
| 696 | return 0; |
| 697 | |
Weiping Zhang | 4d25339 | 2019-04-02 21:14:30 +0800 | [diff] [blame] | 698 | return attr->mode; |
| 699 | } |
| 700 | |
| 701 | static struct attribute_group queue_attr_group = { |
| 702 | .attrs = queue_attrs, |
| 703 | .is_visible = queue_attr_visible, |
| 704 | }; |
| 705 | |
| 706 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 707 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
| 708 | |
| 709 | static ssize_t |
| 710 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
| 711 | { |
| 712 | struct queue_sysfs_entry *entry = to_queue(attr); |
| 713 | struct request_queue *q = |
| 714 | container_of(kobj, struct request_queue, kobj); |
| 715 | ssize_t res; |
| 716 | |
| 717 | if (!entry->show) |
| 718 | return -EIO; |
| 719 | mutex_lock(&q->sysfs_lock); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 720 | res = entry->show(q, page); |
| 721 | mutex_unlock(&q->sysfs_lock); |
| 722 | return res; |
| 723 | } |
| 724 | |
| 725 | static ssize_t |
| 726 | queue_attr_store(struct kobject *kobj, struct attribute *attr, |
| 727 | const char *page, size_t length) |
| 728 | { |
| 729 | struct queue_sysfs_entry *entry = to_queue(attr); |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 730 | struct request_queue *q; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 731 | ssize_t res; |
| 732 | |
| 733 | if (!entry->store) |
| 734 | return -EIO; |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 735 | |
| 736 | q = container_of(kobj, struct request_queue, kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 737 | mutex_lock(&q->sysfs_lock); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 738 | res = entry->store(q, page, length); |
| 739 | mutex_unlock(&q->sysfs_lock); |
| 740 | return res; |
| 741 | } |
| 742 | |
Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 743 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
| 744 | { |
| 745 | struct request_queue *q = container_of(rcu_head, struct request_queue, |
| 746 | rcu_head); |
| 747 | kmem_cache_free(blk_requestq_cachep, q); |
| 748 | } |
| 749 | |
Ming Lei | 47cdee2 | 2019-05-15 11:03:08 +0800 | [diff] [blame] | 750 | /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ |
| 751 | static void blk_exit_queue(struct request_queue *q) |
| 752 | { |
| 753 | /* |
| 754 | * Since the I/O scheduler exit code may access cgroup information, |
| 755 | * perform I/O scheduler exit before disassociating from the block |
| 756 | * cgroup controller. |
| 757 | */ |
| 758 | if (q->elevator) { |
| 759 | ioc_clear_queue(q); |
Ming Lei | c3e2219 | 2019-06-04 21:08:02 +0800 | [diff] [blame] | 760 | __elevator_exit(q, q->elevator); |
Ming Lei | 47cdee2 | 2019-05-15 11:03:08 +0800 | [diff] [blame] | 761 | } |
| 762 | |
| 763 | /* |
| 764 | * Remove all references to @q from the block cgroup controller before |
| 765 | * restoring @q->queue_lock to avoid that restoring this pointer causes |
| 766 | * e.g. blkcg_print_blkgs() to crash. |
| 767 | */ |
| 768 | blkcg_exit_queue(q); |
| 769 | |
| 770 | /* |
| 771 | * Since the cgroup code may dereference the @q->backing_dev_info |
| 772 | * pointer, only decrease its reference count after having removed the |
| 773 | * association with the block cgroup controller. |
| 774 | */ |
| 775 | bdi_put(q->backing_dev_info); |
| 776 | } |
| 777 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 778 | /** |
Luis Chamberlain | e8c7d14 | 2020-06-19 20:47:25 +0000 | [diff] [blame] | 779 | * blk_release_queue - releases all allocated resources of the request_queue |
| 780 | * @kobj: pointer to a kobject, whose container is a request_queue |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 781 | * |
Luis Chamberlain | e8c7d14 | 2020-06-19 20:47:25 +0000 | [diff] [blame] | 782 | * This function releases all allocated resources of the request queue. |
| 783 | * |
| 784 | * The struct request_queue refcount is incremented with blk_get_queue() and |
| 785 | * decremented with blk_put_queue(). Once the refcount reaches 0 this function |
| 786 | * is called. |
| 787 | * |
| 788 | * For drivers that have a request_queue on a gendisk and added with |
| 789 | * __device_add_disk() the refcount to request_queue will reach 0 with |
| 790 | * the last put_disk() called by the driver. For drivers which don't use |
| 791 | * __device_add_disk() this happens with blk_cleanup_queue(). |
| 792 | * |
| 793 | * Drivers exist which depend on the release of the request_queue to be |
| 794 | * synchronous, it should not be deferred. |
| 795 | * |
| 796 | * Context: can sleep |
Bart Van Assche | dc9edc4 | 2017-06-14 13:27:50 -0600 | [diff] [blame] | 797 | */ |
Luis Chamberlain | e8c7d14 | 2020-06-19 20:47:25 +0000 | [diff] [blame] | 798 | static void blk_release_queue(struct kobject *kobj) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 799 | { |
Luis Chamberlain | e8c7d14 | 2020-06-19 20:47:25 +0000 | [diff] [blame] | 800 | struct request_queue *q = |
| 801 | container_of(kobj, struct request_queue, kobj); |
| 802 | |
| 803 | might_sleep(); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 804 | |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 805 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) |
| 806 | blk_stat_remove_callback(q, q->poll_cb); |
| 807 | blk_stat_free_callback(q->poll_cb); |
Hannes Reinecke | 777eb1b | 2011-09-28 08:07:01 -0600 | [diff] [blame] | 808 | |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 809 | blk_free_queue_stats(q->stats); |
| 810 | |
Yang Yang | 47ce030 | 2020-10-09 01:00:14 -0700 | [diff] [blame] | 811 | if (queue_is_mq(q)) { |
| 812 | struct blk_mq_hw_ctx *hctx; |
| 813 | int i; |
| 814 | |
zhengbin | e26cc08 | 2019-08-12 20:36:55 +0800 | [diff] [blame] | 815 | cancel_delayed_work_sync(&q->requeue_work); |
| 816 | |
Yang Yang | 47ce030 | 2020-10-09 01:00:14 -0700 | [diff] [blame] | 817 | queue_for_each_hw_ctx(q, hctx, i) |
| 818 | cancel_delayed_work_sync(&hctx->run_work); |
| 819 | } |
| 820 | |
Ming Lei | 47cdee2 | 2019-05-15 11:03:08 +0800 | [diff] [blame] | 821 | blk_exit_queue(q); |
| 822 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 823 | blk_queue_free_zone_bitmaps(q); |
| 824 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 825 | if (queue_is_mq(q)) |
Ming Lei | e09aae7 | 2015-01-29 20:17:27 +0800 | [diff] [blame] | 826 | blk_mq_release(q); |
Christoph Hellwig | 1874198 | 2014-02-10 09:29:00 -0700 | [diff] [blame] | 827 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 828 | blk_trace_shutdown(q); |
Luis Chamberlain | 85e0cbb | 2020-06-19 20:47:30 +0000 | [diff] [blame] | 829 | mutex_lock(&q->debugfs_mutex); |
| 830 | debugfs_remove_recursive(q->debugfs_dir); |
| 831 | mutex_unlock(&q->debugfs_mutex); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 832 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 833 | if (queue_is_mq(q)) |
Omar Sandoval | 62ebce16 | 2017-01-31 14:53:21 -0800 | [diff] [blame] | 834 | blk_mq_debugfs_unregister(q); |
| 835 | |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 836 | bioset_exit(&q->bio_split); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 837 | |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 838 | ida_simple_remove(&blk_queue_ida, q->id); |
Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 839 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 840 | } |
| 841 | |
Emese Revfy | 52cf25d | 2010-01-19 02:58:23 +0100 | [diff] [blame] | 842 | static const struct sysfs_ops queue_sysfs_ops = { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 843 | .show = queue_attr_show, |
| 844 | .store = queue_attr_store, |
| 845 | }; |
| 846 | |
| 847 | struct kobj_type blk_queue_ktype = { |
| 848 | .sysfs_ops = &queue_sysfs_ops, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 849 | .release = blk_release_queue, |
| 850 | }; |
| 851 | |
Bart Van Assche | 2c2086a | 2018-01-17 11:48:10 -0800 | [diff] [blame] | 852 | /** |
| 853 | * blk_register_queue - register a block layer queue with sysfs |
| 854 | * @disk: Disk of which the request queue should be registered with sysfs. |
| 855 | */ |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 856 | int blk_register_queue(struct gendisk *disk) |
| 857 | { |
| 858 | int ret; |
Li Zefan | 1d54ad6 | 2009-04-14 14:00:05 +0800 | [diff] [blame] | 859 | struct device *dev = disk_to_dev(disk); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 860 | struct request_queue *q = disk->queue; |
| 861 | |
Akinobu Mita | fb19974 | 2008-04-21 09:51:06 +0200 | [diff] [blame] | 862 | if (WARN_ON(!q)) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 863 | return -ENXIO; |
| 864 | |
Ming Lei | 58c898b | 2019-08-27 19:01:47 +0800 | [diff] [blame] | 865 | WARN_ONCE(blk_queue_registered(q), |
Omar Sandoval | 334335d | 2017-03-28 16:12:15 -0700 | [diff] [blame] | 866 | "%s is registering an already registered queue\n", |
| 867 | kobject_name(&dev->kobj)); |
Omar Sandoval | 334335d | 2017-03-28 16:12:15 -0700 | [diff] [blame] | 868 | |
Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 869 | /* |
Tejun Heo | 17497ac | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 870 | * SCSI probing may synchronously create and destroy a lot of |
| 871 | * request_queues for non-existent devices. Shutting down a fully |
| 872 | * functional queue takes measureable wallclock time as RCU grace |
| 873 | * periods are involved. To avoid excessive latency in these |
| 874 | * cases, a request_queue starts out in a degraded mode which is |
| 875 | * faster to shut down and is made fully functional here as |
| 876 | * request_queues for non-existent devices never get registered. |
Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 877 | */ |
Alan Stern | df35c7c | 2014-09-09 11:50:58 -0400 | [diff] [blame] | 878 | if (!blk_queue_init_done(q)) { |
Christoph Hellwig | 57d74df | 2018-11-14 17:02:07 +0100 | [diff] [blame] | 879 | blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 880 | percpu_ref_switch_to_percpu(&q->q_usage_counter); |
Alan Stern | df35c7c | 2014-09-09 11:50:58 -0400 | [diff] [blame] | 881 | } |
Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 882 | |
Christoph Hellwig | c2e4cd5 | 2020-09-24 08:51:34 +0200 | [diff] [blame] | 883 | blk_queue_update_readahead(q); |
| 884 | |
Li Zefan | 1d54ad6 | 2009-04-14 14:00:05 +0800 | [diff] [blame] | 885 | ret = blk_trace_init_sysfs(dev); |
| 886 | if (ret) |
| 887 | return ret; |
| 888 | |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 889 | mutex_lock(&q->sysfs_dir_lock); |
Tahsin Erdogan | b410aff | 2017-02-14 19:27:38 -0800 | [diff] [blame] | 890 | |
Linus Torvalds | c905959 | 2009-06-11 10:52:27 -0700 | [diff] [blame] | 891 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
Liu Yuan | ed5302d | 2011-04-19 13:47:58 +0200 | [diff] [blame] | 892 | if (ret < 0) { |
| 893 | blk_trace_remove_sysfs(dev); |
Tahsin Erdogan | b410aff | 2017-02-14 19:27:38 -0800 | [diff] [blame] | 894 | goto unlock; |
Liu Yuan | ed5302d | 2011-04-19 13:47:58 +0200 | [diff] [blame] | 895 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 896 | |
Weiping Zhang | 4d25339 | 2019-04-02 21:14:30 +0800 | [diff] [blame] | 897 | ret = sysfs_create_group(&q->kobj, &queue_attr_group); |
| 898 | if (ret) { |
| 899 | blk_trace_remove_sysfs(dev); |
| 900 | kobject_del(&q->kobj); |
| 901 | kobject_put(&dev->kobj); |
| 902 | goto unlock; |
| 903 | } |
| 904 | |
Luis Chamberlain | 85e0cbb | 2020-06-19 20:47:30 +0000 | [diff] [blame] | 905 | mutex_lock(&q->debugfs_mutex); |
| 906 | q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), |
| 907 | blk_debugfs_root); |
| 908 | mutex_unlock(&q->debugfs_mutex); |
| 909 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 910 | if (queue_is_mq(q)) { |
Bart Van Assche | 2d0364c | 2017-04-26 13:47:48 -0700 | [diff] [blame] | 911 | __blk_mq_register_dev(dev, q); |
Bart Van Assche | a8ecdd7 | 2017-05-25 16:38:06 -0700 | [diff] [blame] | 912 | blk_mq_debugfs_register(q); |
| 913 | } |
Omar Sandoval | 9c1051a | 2017-05-04 08:17:21 -0600 | [diff] [blame] | 914 | |
Ming Lei | b89f625 | 2019-09-23 23:12:09 +0800 | [diff] [blame] | 915 | mutex_lock(&q->sysfs_lock); |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 916 | if (q->elevator) { |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 917 | ret = elv_register_queue(q, false); |
Omar Sandoval | 80c6b15 | 2017-02-06 12:52:24 -0800 | [diff] [blame] | 918 | if (ret) { |
Ming Lei | b89f625 | 2019-09-23 23:12:09 +0800 | [diff] [blame] | 919 | mutex_unlock(&q->sysfs_lock); |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 920 | mutex_unlock(&q->sysfs_dir_lock); |
Omar Sandoval | 80c6b15 | 2017-02-06 12:52:24 -0800 | [diff] [blame] | 921 | kobject_del(&q->kobj); |
| 922 | blk_trace_remove_sysfs(dev); |
| 923 | kobject_put(&dev->kobj); |
Bart Van Assche | 2c2086a | 2018-01-17 11:48:10 -0800 | [diff] [blame] | 924 | return ret; |
Omar Sandoval | 80c6b15 | 2017-02-06 12:52:24 -0800 | [diff] [blame] | 925 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 926 | } |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 927 | |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 928 | blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); |
| 929 | wbt_enable_default(q); |
| 930 | blk_throtl_register_queue(q); |
| 931 | |
| 932 | /* Now everything is ready and send out KOBJ_ADD uevent */ |
| 933 | kobject_uevent(&q->kobj, KOBJ_ADD); |
Yufen Yu | 0546858 | 2020-10-08 23:26:32 -0400 | [diff] [blame] | 934 | if (q->elevator) |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 935 | kobject_uevent(&q->elevator->kobj, KOBJ_ADD); |
| 936 | mutex_unlock(&q->sysfs_lock); |
| 937 | |
Tahsin Erdogan | b410aff | 2017-02-14 19:27:38 -0800 | [diff] [blame] | 938 | ret = 0; |
| 939 | unlock: |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 940 | mutex_unlock(&q->sysfs_dir_lock); |
Tahsin Erdogan | b410aff | 2017-02-14 19:27:38 -0800 | [diff] [blame] | 941 | return ret; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 942 | } |
Mike Snitzer | fa70d2e | 2018-01-08 22:01:13 -0500 | [diff] [blame] | 943 | EXPORT_SYMBOL_GPL(blk_register_queue); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 944 | |
Bart Van Assche | 2c2086a | 2018-01-17 11:48:10 -0800 | [diff] [blame] | 945 | /** |
| 946 | * blk_unregister_queue - counterpart of blk_register_queue() |
| 947 | * @disk: Disk of which the request queue should be unregistered from sysfs. |
| 948 | * |
| 949 | * Note: the caller is responsible for guaranteeing that this function is called |
| 950 | * after blk_register_queue() has finished. |
| 951 | */ |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 952 | void blk_unregister_queue(struct gendisk *disk) |
| 953 | { |
| 954 | struct request_queue *q = disk->queue; |
| 955 | |
Akinobu Mita | fb19974 | 2008-04-21 09:51:06 +0200 | [diff] [blame] | 956 | if (WARN_ON(!q)) |
| 957 | return; |
| 958 | |
Mike Snitzer | fa70d2e | 2018-01-08 22:01:13 -0500 | [diff] [blame] | 959 | /* Return early if disk->queue was never registered. */ |
Ming Lei | 58c898b | 2019-08-27 19:01:47 +0800 | [diff] [blame] | 960 | if (!blk_queue_registered(q)) |
Mike Snitzer | fa70d2e | 2018-01-08 22:01:13 -0500 | [diff] [blame] | 961 | return; |
| 962 | |
Mike Snitzer | 667257e | 2018-01-11 14:11:01 -0500 | [diff] [blame] | 963 | /* |
Bart Van Assche | 2c2086a | 2018-01-17 11:48:10 -0800 | [diff] [blame] | 964 | * Since sysfs_remove_dir() prevents adding new directory entries |
| 965 | * before removal of existing entries starts, protect against |
| 966 | * concurrent elv_iosched_store() calls. |
Mike Snitzer | 667257e | 2018-01-11 14:11:01 -0500 | [diff] [blame] | 967 | */ |
David Jeffery | e9a823f | 2017-08-28 10:52:44 -0600 | [diff] [blame] | 968 | mutex_lock(&q->sysfs_lock); |
Bart Van Assche | 8814ce8 | 2018-03-07 17:10:04 -0800 | [diff] [blame] | 969 | blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 970 | mutex_unlock(&q->sysfs_lock); |
Omar Sandoval | 334335d | 2017-03-28 16:12:15 -0700 | [diff] [blame] | 971 | |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 972 | mutex_lock(&q->sysfs_dir_lock); |
Bart Van Assche | 2c2086a | 2018-01-17 11:48:10 -0800 | [diff] [blame] | 973 | /* |
| 974 | * Remove the sysfs attributes before unregistering the queue data |
| 975 | * structures that can be modified through sysfs. |
| 976 | */ |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 977 | if (queue_is_mq(q)) |
Matias Bjørling | b21d5b3 | 2016-09-16 14:25:06 +0200 | [diff] [blame] | 978 | blk_mq_unregister_dev(disk_to_dev(disk), q); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 979 | |
Zdenek Kabelac | 48c0d4d | 2009-09-25 06:19:26 +0200 | [diff] [blame] | 980 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 981 | kobject_del(&q->kobj); |
| 982 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
Mike Snitzer | 667257e | 2018-01-11 14:11:01 -0500 | [diff] [blame] | 983 | |
Ming Lei | b89f625 | 2019-09-23 23:12:09 +0800 | [diff] [blame] | 984 | mutex_lock(&q->sysfs_lock); |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 985 | if (q->elevator) |
Bart Van Assche | 2c2086a | 2018-01-17 11:48:10 -0800 | [diff] [blame] | 986 | elv_unregister_queue(q); |
Ming Lei | b89f625 | 2019-09-23 23:12:09 +0800 | [diff] [blame] | 987 | mutex_unlock(&q->sysfs_lock); |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 988 | mutex_unlock(&q->sysfs_dir_lock); |
Bart Van Assche | 2c2086a | 2018-01-17 11:48:10 -0800 | [diff] [blame] | 989 | |
| 990 | kobject_put(&disk_to_dev(disk)->kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 991 | } |