Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to sysfs handling |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 5 | #include <linux/slab.h> |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 9 | #include <linux/backing-dev.h> |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 10 | #include <linux/blktrace_api.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 11 | #include <linux/blk-mq.h> |
Tejun Heo | eea8f41 | 2015-05-22 17:13:17 -0400 | [diff] [blame] | 12 | #include <linux/blk-cgroup.h> |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 13 | |
| 14 | #include "blk.h" |
Ming Lei | 3edcc0c | 2013-12-26 21:31:38 +0800 | [diff] [blame] | 15 | #include "blk-mq.h" |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 16 | |
| 17 | struct queue_sysfs_entry { |
| 18 | struct attribute attr; |
| 19 | ssize_t (*show)(struct request_queue *, char *); |
| 20 | ssize_t (*store)(struct request_queue *, const char *, size_t); |
| 21 | }; |
| 22 | |
| 23 | static ssize_t |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 24 | queue_var_show(unsigned long var, char *page) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 25 | { |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 26 | return sprintf(page, "%lu\n", var); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 27 | } |
| 28 | |
| 29 | static ssize_t |
| 30 | queue_var_store(unsigned long *var, const char *page, size_t count) |
| 31 | { |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 32 | int err; |
| 33 | unsigned long v; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 34 | |
Jingoo Han | ed751e6 | 2013-09-11 14:20:08 -0700 | [diff] [blame] | 35 | err = kstrtoul(page, 10, &v); |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 36 | if (err || v > UINT_MAX) |
| 37 | return -EINVAL; |
| 38 | |
| 39 | *var = v; |
| 40 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 41 | return count; |
| 42 | } |
| 43 | |
| 44 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
| 45 | { |
| 46 | return queue_var_show(q->nr_requests, (page)); |
| 47 | } |
| 48 | |
| 49 | static ssize_t |
| 50 | queue_requests_store(struct request_queue *q, const char *page, size_t count) |
| 51 | { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 52 | unsigned long nr; |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 53 | int ret, err; |
Jens Axboe | b8a9ae7 | 2009-09-11 22:44:29 +0200 | [diff] [blame] | 54 | |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 55 | if (!q->request_fn && !q->mq_ops) |
Jens Axboe | b8a9ae7 | 2009-09-11 22:44:29 +0200 | [diff] [blame] | 56 | return -EINVAL; |
| 57 | |
| 58 | ret = queue_var_store(&nr, page, count); |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 59 | if (ret < 0) |
| 60 | return ret; |
| 61 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 62 | if (nr < BLKDEV_MIN_RQ) |
| 63 | nr = BLKDEV_MIN_RQ; |
| 64 | |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 65 | if (q->request_fn) |
| 66 | err = blk_update_nr_requests(q, nr); |
| 67 | else |
| 68 | err = blk_mq_update_nr_requests(q, nr); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 69 | |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 70 | if (err) |
| 71 | return err; |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 72 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 73 | return ret; |
| 74 | } |
| 75 | |
| 76 | static ssize_t queue_ra_show(struct request_queue *q, char *page) |
| 77 | { |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 78 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
| 79 | (PAGE_CACHE_SHIFT - 10); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 80 | |
| 81 | return queue_var_show(ra_kb, (page)); |
| 82 | } |
| 83 | |
| 84 | static ssize_t |
| 85 | queue_ra_store(struct request_queue *q, const char *page, size_t count) |
| 86 | { |
| 87 | unsigned long ra_kb; |
| 88 | ssize_t ret = queue_var_store(&ra_kb, page, count); |
| 89 | |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 90 | if (ret < 0) |
| 91 | return ret; |
| 92 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 93 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 94 | |
| 95 | return ret; |
| 96 | } |
| 97 | |
| 98 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) |
| 99 | { |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 100 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 101 | |
| 102 | return queue_var_show(max_sectors_kb, (page)); |
| 103 | } |
| 104 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 105 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
| 106 | { |
| 107 | return queue_var_show(queue_max_segments(q), (page)); |
| 108 | } |
| 109 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 110 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
| 111 | { |
| 112 | return queue_var_show(q->limits.max_integrity_segments, (page)); |
| 113 | } |
| 114 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 115 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
| 116 | { |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 117 | if (blk_queue_cluster(q)) |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 118 | return queue_var_show(queue_max_segment_size(q), (page)); |
| 119 | |
| 120 | return queue_var_show(PAGE_CACHE_SIZE, (page)); |
| 121 | } |
| 122 | |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 123 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 124 | { |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 125 | return queue_var_show(queue_logical_block_size(q), page); |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 126 | } |
| 127 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 128 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
| 129 | { |
| 130 | return queue_var_show(queue_physical_block_size(q), page); |
| 131 | } |
| 132 | |
| 133 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
| 134 | { |
| 135 | return queue_var_show(queue_io_min(q), page); |
| 136 | } |
| 137 | |
| 138 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) |
| 139 | { |
| 140 | return queue_var_show(queue_io_opt(q), page); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 141 | } |
| 142 | |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 143 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
| 144 | { |
| 145 | return queue_var_show(q->limits.discard_granularity, page); |
| 146 | } |
| 147 | |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 148 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
| 149 | { |
| 150 | unsigned long long val; |
| 151 | |
| 152 | val = q->limits.max_hw_discard_sectors << 9; |
| 153 | return sprintf(page, "%llu\n", val); |
| 154 | } |
| 155 | |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 156 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
| 157 | { |
Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 158 | return sprintf(page, "%llu\n", |
| 159 | (unsigned long long)q->limits.max_discard_sectors << 9); |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 160 | } |
| 161 | |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 162 | static ssize_t queue_discard_max_store(struct request_queue *q, |
| 163 | const char *page, size_t count) |
| 164 | { |
| 165 | unsigned long max_discard; |
| 166 | ssize_t ret = queue_var_store(&max_discard, page, count); |
| 167 | |
| 168 | if (ret < 0) |
| 169 | return ret; |
| 170 | |
| 171 | if (max_discard & (q->limits.discard_granularity - 1)) |
| 172 | return -EINVAL; |
| 173 | |
| 174 | max_discard >>= 9; |
| 175 | if (max_discard > UINT_MAX) |
| 176 | return -EINVAL; |
| 177 | |
| 178 | if (max_discard > q->limits.max_hw_discard_sectors) |
| 179 | max_discard = q->limits.max_hw_discard_sectors; |
| 180 | |
| 181 | q->limits.max_discard_sectors = max_discard; |
| 182 | return ret; |
| 183 | } |
| 184 | |
Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 185 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
| 186 | { |
| 187 | return queue_var_show(queue_discard_zeroes_data(q), page); |
| 188 | } |
| 189 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 190 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
| 191 | { |
| 192 | return sprintf(page, "%llu\n", |
| 193 | (unsigned long long)q->limits.max_write_same_sectors << 9); |
| 194 | } |
| 195 | |
| 196 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 197 | static ssize_t |
| 198 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) |
| 199 | { |
| 200 | unsigned long max_sectors_kb, |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 201 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 202 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); |
| 203 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
| 204 | |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 205 | if (ret < 0) |
| 206 | return ret; |
| 207 | |
Martin K. Petersen | ca369d5 | 2015-11-13 16:46:48 -0500 | [diff] [blame] | 208 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
| 209 | q->limits.max_dev_sectors >> 1); |
| 210 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 211 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
| 212 | return -EINVAL; |
Wu Fengguang | 7c23951 | 2008-11-25 09:08:39 +0100 | [diff] [blame] | 213 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 214 | spin_lock_irq(q->queue_lock); |
Nikanth Karthikesan | c295fc0 | 2009-09-01 22:40:15 +0200 | [diff] [blame] | 215 | q->limits.max_sectors = max_sectors_kb << 1; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 216 | spin_unlock_irq(q->queue_lock); |
| 217 | |
| 218 | return ret; |
| 219 | } |
| 220 | |
| 221 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) |
| 222 | { |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 223 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 224 | |
| 225 | return queue_var_show(max_hw_sectors_kb, (page)); |
| 226 | } |
| 227 | |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 228 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
| 229 | static ssize_t \ |
| 230 | queue_show_##name(struct request_queue *q, char *page) \ |
| 231 | { \ |
| 232 | int bit; \ |
| 233 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ |
| 234 | return queue_var_show(neg ? !bit : bit, page); \ |
| 235 | } \ |
| 236 | static ssize_t \ |
| 237 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ |
| 238 | { \ |
| 239 | unsigned long val; \ |
| 240 | ssize_t ret; \ |
| 241 | ret = queue_var_store(&val, page, count); \ |
Arnd Bergmann | c678ef5 | 2013-04-03 21:53:57 +0200 | [diff] [blame] | 242 | if (ret < 0) \ |
| 243 | return ret; \ |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 244 | if (neg) \ |
| 245 | val = !val; \ |
| 246 | \ |
| 247 | spin_lock_irq(q->queue_lock); \ |
| 248 | if (val) \ |
| 249 | queue_flag_set(QUEUE_FLAG_##flag, q); \ |
| 250 | else \ |
| 251 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
| 252 | spin_unlock_irq(q->queue_lock); \ |
| 253 | return ret; \ |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 254 | } |
| 255 | |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 256 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
| 257 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); |
| 258 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); |
| 259 | #undef QUEUE_SYSFS_BIT_FNS |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 260 | |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 261 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
| 262 | { |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 263 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
| 264 | blk_queue_noxmerges(q), page); |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, |
| 268 | size_t count) |
| 269 | { |
| 270 | unsigned long nm; |
| 271 | ssize_t ret = queue_var_store(&nm, page, count); |
| 272 | |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 273 | if (ret < 0) |
| 274 | return ret; |
| 275 | |
Jens Axboe | bf0f970 | 2008-05-07 09:09:39 +0200 | [diff] [blame] | 276 | spin_lock_irq(q->queue_lock); |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 277 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
| 278 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); |
| 279 | if (nm == 2) |
Jens Axboe | bf0f970 | 2008-05-07 09:09:39 +0200 | [diff] [blame] | 280 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 281 | else if (nm) |
| 282 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
Jens Axboe | bf0f970 | 2008-05-07 09:09:39 +0200 | [diff] [blame] | 283 | spin_unlock_irq(q->queue_lock); |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 284 | |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 285 | return ret; |
| 286 | } |
| 287 | |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 288 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
| 289 | { |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 290 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 291 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 292 | |
Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 293 | return queue_var_show(set << force, page); |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | static ssize_t |
| 297 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) |
| 298 | { |
| 299 | ssize_t ret = -EINVAL; |
Christoph Hellwig | 0a06ff0 | 2013-11-14 14:32:07 -0800 | [diff] [blame] | 300 | #ifdef CONFIG_SMP |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 301 | unsigned long val; |
| 302 | |
| 303 | ret = queue_var_store(&val, page, count); |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 304 | if (ret < 0) |
| 305 | return ret; |
| 306 | |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 307 | spin_lock_irq(q->queue_lock); |
Eric Seppanen | e8037d4 | 2011-08-23 21:25:12 +0200 | [diff] [blame] | 308 | if (val == 2) { |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 309 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
Eric Seppanen | e8037d4 | 2011-08-23 21:25:12 +0200 | [diff] [blame] | 310 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
| 311 | } else if (val == 1) { |
| 312 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
| 313 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
| 314 | } else if (val == 0) { |
Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 315 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
| 316 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
| 317 | } |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 318 | spin_unlock_irq(q->queue_lock); |
| 319 | #endif |
| 320 | return ret; |
| 321 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 322 | |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 323 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
| 324 | { |
| 325 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); |
| 326 | } |
| 327 | |
| 328 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, |
| 329 | size_t count) |
| 330 | { |
| 331 | unsigned long poll_on; |
| 332 | ssize_t ret; |
| 333 | |
| 334 | if (!q->mq_ops || !q->mq_ops->poll) |
| 335 | return -EINVAL; |
| 336 | |
| 337 | ret = queue_var_store(&poll_on, page, count); |
| 338 | if (ret < 0) |
| 339 | return ret; |
| 340 | |
| 341 | spin_lock_irq(q->queue_lock); |
| 342 | if (poll_on) |
| 343 | queue_flag_set(QUEUE_FLAG_POLL, q); |
| 344 | else |
| 345 | queue_flag_clear(QUEUE_FLAG_POLL, q); |
| 346 | spin_unlock_irq(q->queue_lock); |
| 347 | |
| 348 | return ret; |
| 349 | } |
| 350 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 351 | static struct queue_sysfs_entry queue_requests_entry = { |
| 352 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
| 353 | .show = queue_requests_show, |
| 354 | .store = queue_requests_store, |
| 355 | }; |
| 356 | |
| 357 | static struct queue_sysfs_entry queue_ra_entry = { |
| 358 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, |
| 359 | .show = queue_ra_show, |
| 360 | .store = queue_ra_store, |
| 361 | }; |
| 362 | |
| 363 | static struct queue_sysfs_entry queue_max_sectors_entry = { |
| 364 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, |
| 365 | .show = queue_max_sectors_show, |
| 366 | .store = queue_max_sectors_store, |
| 367 | }; |
| 368 | |
| 369 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { |
| 370 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, |
| 371 | .show = queue_max_hw_sectors_show, |
| 372 | }; |
| 373 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 374 | static struct queue_sysfs_entry queue_max_segments_entry = { |
| 375 | .attr = {.name = "max_segments", .mode = S_IRUGO }, |
| 376 | .show = queue_max_segments_show, |
| 377 | }; |
| 378 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 379 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
| 380 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, |
| 381 | .show = queue_max_integrity_segments_show, |
| 382 | }; |
| 383 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 384 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
| 385 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, |
| 386 | .show = queue_max_segment_size_show, |
| 387 | }; |
| 388 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 389 | static struct queue_sysfs_entry queue_iosched_entry = { |
| 390 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, |
| 391 | .show = elv_iosched_show, |
| 392 | .store = elv_iosched_store, |
| 393 | }; |
| 394 | |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 395 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
| 396 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 397 | .show = queue_logical_block_size_show, |
| 398 | }; |
| 399 | |
| 400 | static struct queue_sysfs_entry queue_logical_block_size_entry = { |
| 401 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, |
| 402 | .show = queue_logical_block_size_show, |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 403 | }; |
| 404 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 405 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
| 406 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, |
| 407 | .show = queue_physical_block_size_show, |
| 408 | }; |
| 409 | |
| 410 | static struct queue_sysfs_entry queue_io_min_entry = { |
| 411 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, |
| 412 | .show = queue_io_min_show, |
| 413 | }; |
| 414 | |
| 415 | static struct queue_sysfs_entry queue_io_opt_entry = { |
| 416 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, |
| 417 | .show = queue_io_opt_show, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 418 | }; |
| 419 | |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 420 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
| 421 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, |
| 422 | .show = queue_discard_granularity_show, |
| 423 | }; |
| 424 | |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 425 | static struct queue_sysfs_entry queue_discard_max_hw_entry = { |
| 426 | .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, |
| 427 | .show = queue_discard_max_hw_show, |
| 428 | }; |
| 429 | |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 430 | static struct queue_sysfs_entry queue_discard_max_entry = { |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 431 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 432 | .show = queue_discard_max_show, |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 433 | .store = queue_discard_max_store, |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 434 | }; |
| 435 | |
Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 436 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
| 437 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, |
| 438 | .show = queue_discard_zeroes_data_show, |
| 439 | }; |
| 440 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 441 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
| 442 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, |
| 443 | .show = queue_write_same_max_show, |
| 444 | }; |
| 445 | |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 446 | static struct queue_sysfs_entry queue_nonrot_entry = { |
| 447 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 448 | .show = queue_show_nonrot, |
| 449 | .store = queue_store_nonrot, |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 450 | }; |
| 451 | |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 452 | static struct queue_sysfs_entry queue_nomerges_entry = { |
| 453 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, |
| 454 | .show = queue_nomerges_show, |
| 455 | .store = queue_nomerges_store, |
| 456 | }; |
| 457 | |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 458 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
| 459 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, |
| 460 | .show = queue_rq_affinity_show, |
| 461 | .store = queue_rq_affinity_store, |
| 462 | }; |
| 463 | |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 464 | static struct queue_sysfs_entry queue_iostats_entry = { |
| 465 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 466 | .show = queue_show_iostats, |
| 467 | .store = queue_store_iostats, |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 468 | }; |
| 469 | |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 470 | static struct queue_sysfs_entry queue_random_entry = { |
| 471 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 472 | .show = queue_show_random, |
| 473 | .store = queue_store_random, |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 474 | }; |
| 475 | |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 476 | static struct queue_sysfs_entry queue_poll_entry = { |
| 477 | .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, |
| 478 | .show = queue_poll_show, |
| 479 | .store = queue_poll_store, |
| 480 | }; |
| 481 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 482 | static struct attribute *default_attrs[] = { |
| 483 | &queue_requests_entry.attr, |
| 484 | &queue_ra_entry.attr, |
| 485 | &queue_max_hw_sectors_entry.attr, |
| 486 | &queue_max_sectors_entry.attr, |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 487 | &queue_max_segments_entry.attr, |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 488 | &queue_max_integrity_segments_entry.attr, |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 489 | &queue_max_segment_size_entry.attr, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 490 | &queue_iosched_entry.attr, |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 491 | &queue_hw_sector_size_entry.attr, |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 492 | &queue_logical_block_size_entry.attr, |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 493 | &queue_physical_block_size_entry.attr, |
| 494 | &queue_io_min_entry.attr, |
| 495 | &queue_io_opt_entry.attr, |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 496 | &queue_discard_granularity_entry.attr, |
| 497 | &queue_discard_max_entry.attr, |
Jens Axboe | 0034af0 | 2015-07-16 09:14:26 -0600 | [diff] [blame] | 498 | &queue_discard_max_hw_entry.attr, |
Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 499 | &queue_discard_zeroes_data_entry.attr, |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 500 | &queue_write_same_max_entry.attr, |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 501 | &queue_nonrot_entry.attr, |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 502 | &queue_nomerges_entry.attr, |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 503 | &queue_rq_affinity_entry.attr, |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 504 | &queue_iostats_entry.attr, |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 505 | &queue_random_entry.attr, |
Jens Axboe | 05229beed | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 506 | &queue_poll_entry.attr, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 507 | NULL, |
| 508 | }; |
| 509 | |
| 510 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
| 511 | |
| 512 | static ssize_t |
| 513 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
| 514 | { |
| 515 | struct queue_sysfs_entry *entry = to_queue(attr); |
| 516 | struct request_queue *q = |
| 517 | container_of(kobj, struct request_queue, kobj); |
| 518 | ssize_t res; |
| 519 | |
| 520 | if (!entry->show) |
| 521 | return -EIO; |
| 522 | mutex_lock(&q->sysfs_lock); |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 523 | if (blk_queue_dying(q)) { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 524 | mutex_unlock(&q->sysfs_lock); |
| 525 | return -ENOENT; |
| 526 | } |
| 527 | res = entry->show(q, page); |
| 528 | mutex_unlock(&q->sysfs_lock); |
| 529 | return res; |
| 530 | } |
| 531 | |
| 532 | static ssize_t |
| 533 | queue_attr_store(struct kobject *kobj, struct attribute *attr, |
| 534 | const char *page, size_t length) |
| 535 | { |
| 536 | struct queue_sysfs_entry *entry = to_queue(attr); |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 537 | struct request_queue *q; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 538 | ssize_t res; |
| 539 | |
| 540 | if (!entry->store) |
| 541 | return -EIO; |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 542 | |
| 543 | q = container_of(kobj, struct request_queue, kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 544 | mutex_lock(&q->sysfs_lock); |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 545 | if (blk_queue_dying(q)) { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 546 | mutex_unlock(&q->sysfs_lock); |
| 547 | return -ENOENT; |
| 548 | } |
| 549 | res = entry->store(q, page, length); |
| 550 | mutex_unlock(&q->sysfs_lock); |
| 551 | return res; |
| 552 | } |
| 553 | |
Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 554 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
| 555 | { |
| 556 | struct request_queue *q = container_of(rcu_head, struct request_queue, |
| 557 | rcu_head); |
| 558 | kmem_cache_free(blk_requestq_cachep, q); |
| 559 | } |
| 560 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 561 | /** |
Andrew Morton | 499337b | 2011-09-21 10:01:22 +0200 | [diff] [blame] | 562 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
| 563 | * @kobj: the kobj belonging to the request queue to be released |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 564 | * |
| 565 | * Description: |
Andrew Morton | 499337b | 2011-09-21 10:01:22 +0200 | [diff] [blame] | 566 | * blk_release_queue is the pair to blk_init_queue() or |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 567 | * blk_queue_make_request(). It should be called when a request queue is |
| 568 | * being released; typically when a block device is being de-registered. |
| 569 | * Currently, its primary task it to free all the &struct request |
| 570 | * structures that were allocated to the queue and the queue itself. |
| 571 | * |
Bart Van Assche | 45a9c9d | 2014-12-09 16:57:48 +0100 | [diff] [blame] | 572 | * Note: |
| 573 | * The low level driver must have finished any outstanding requests first |
| 574 | * via blk_cleanup_queue(). |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 575 | **/ |
| 576 | static void blk_release_queue(struct kobject *kobj) |
| 577 | { |
| 578 | struct request_queue *q = |
| 579 | container_of(kobj, struct request_queue, kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 580 | |
Tejun Heo | b02176f | 2015-09-08 12:20:22 -0400 | [diff] [blame] | 581 | bdi_exit(&q->backing_dev_info); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 582 | blkcg_exit_queue(q); |
| 583 | |
Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 584 | if (q->elevator) { |
| 585 | spin_lock_irq(q->queue_lock); |
| 586 | ioc_clear_queue(q); |
| 587 | spin_unlock_irq(q->queue_lock); |
Hannes Reinecke | 777eb1b | 2011-09-28 08:07:01 -0600 | [diff] [blame] | 588 | elevator_exit(q->elevator); |
Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 589 | } |
Hannes Reinecke | 777eb1b | 2011-09-28 08:07:01 -0600 | [diff] [blame] | 590 | |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 591 | blk_exit_rl(&q->root_rl); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 592 | |
| 593 | if (q->queue_tags) |
| 594 | __blk_queue_free_tags(q); |
| 595 | |
Bart Van Assche | 45a9c9d | 2014-12-09 16:57:48 +0100 | [diff] [blame] | 596 | if (!q->mq_ops) |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 597 | blk_free_flush_queue(q->fq); |
Ming Lei | e09aae7 | 2015-01-29 20:17:27 +0800 | [diff] [blame] | 598 | else |
| 599 | blk_mq_release(q); |
Christoph Hellwig | 1874198 | 2014-02-10 09:29:00 -0700 | [diff] [blame] | 600 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 601 | blk_trace_shutdown(q); |
| 602 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 603 | if (q->bio_split) |
| 604 | bioset_free(q->bio_split); |
| 605 | |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 606 | ida_simple_remove(&blk_queue_ida, q->id); |
Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 607 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 608 | } |
| 609 | |
Emese Revfy | 52cf25d | 2010-01-19 02:58:23 +0100 | [diff] [blame] | 610 | static const struct sysfs_ops queue_sysfs_ops = { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 611 | .show = queue_attr_show, |
| 612 | .store = queue_attr_store, |
| 613 | }; |
| 614 | |
| 615 | struct kobj_type blk_queue_ktype = { |
| 616 | .sysfs_ops = &queue_sysfs_ops, |
| 617 | .default_attrs = default_attrs, |
| 618 | .release = blk_release_queue, |
| 619 | }; |
| 620 | |
| 621 | int blk_register_queue(struct gendisk *disk) |
| 622 | { |
| 623 | int ret; |
Li Zefan | 1d54ad6 | 2009-04-14 14:00:05 +0800 | [diff] [blame] | 624 | struct device *dev = disk_to_dev(disk); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 625 | struct request_queue *q = disk->queue; |
| 626 | |
Akinobu Mita | fb19974 | 2008-04-21 09:51:06 +0200 | [diff] [blame] | 627 | if (WARN_ON(!q)) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 628 | return -ENXIO; |
| 629 | |
Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 630 | /* |
Tejun Heo | 17497ac | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 631 | * SCSI probing may synchronously create and destroy a lot of |
| 632 | * request_queues for non-existent devices. Shutting down a fully |
| 633 | * functional queue takes measureable wallclock time as RCU grace |
| 634 | * periods are involved. To avoid excessive latency in these |
| 635 | * cases, a request_queue starts out in a degraded mode which is |
| 636 | * faster to shut down and is made fully functional here as |
| 637 | * request_queues for non-existent devices never get registered. |
Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 638 | */ |
Alan Stern | df35c7c | 2014-09-09 11:50:58 -0400 | [diff] [blame] | 639 | if (!blk_queue_init_done(q)) { |
| 640 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 641 | percpu_ref_switch_to_percpu(&q->q_usage_counter); |
Alan Stern | df35c7c | 2014-09-09 11:50:58 -0400 | [diff] [blame] | 642 | blk_queue_bypass_end(q); |
| 643 | } |
Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 644 | |
Li Zefan | 1d54ad6 | 2009-04-14 14:00:05 +0800 | [diff] [blame] | 645 | ret = blk_trace_init_sysfs(dev); |
| 646 | if (ret) |
| 647 | return ret; |
| 648 | |
Linus Torvalds | c905959 | 2009-06-11 10:52:27 -0700 | [diff] [blame] | 649 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
Liu Yuan | ed5302d | 2011-04-19 13:47:58 +0200 | [diff] [blame] | 650 | if (ret < 0) { |
| 651 | blk_trace_remove_sysfs(dev); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 652 | return ret; |
Liu Yuan | ed5302d | 2011-04-19 13:47:58 +0200 | [diff] [blame] | 653 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 654 | |
| 655 | kobject_uevent(&q->kobj, KOBJ_ADD); |
| 656 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 657 | if (q->mq_ops) |
| 658 | blk_mq_register_disk(disk); |
| 659 | |
Martin K. Petersen | cd43e26 | 2009-05-22 17:17:52 -0400 | [diff] [blame] | 660 | if (!q->request_fn) |
| 661 | return 0; |
| 662 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 663 | ret = elv_register_queue(q); |
| 664 | if (ret) { |
| 665 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 666 | kobject_del(&q->kobj); |
Liu Yuan | 80656b6 | 2011-04-13 22:14:54 +0200 | [diff] [blame] | 667 | blk_trace_remove_sysfs(dev); |
Xiaotian Feng | c87ffbb | 2010-08-23 12:30:29 +0200 | [diff] [blame] | 668 | kobject_put(&dev->kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 669 | return ret; |
| 670 | } |
| 671 | |
| 672 | return 0; |
| 673 | } |
| 674 | |
| 675 | void blk_unregister_queue(struct gendisk *disk) |
| 676 | { |
| 677 | struct request_queue *q = disk->queue; |
| 678 | |
Akinobu Mita | fb19974 | 2008-04-21 09:51:06 +0200 | [diff] [blame] | 679 | if (WARN_ON(!q)) |
| 680 | return; |
| 681 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 682 | if (q->mq_ops) |
| 683 | blk_mq_unregister_disk(disk); |
| 684 | |
Zdenek Kabelac | 48c0d4d | 2009-09-25 06:19:26 +0200 | [diff] [blame] | 685 | if (q->request_fn) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 686 | elv_unregister_queue(q); |
| 687 | |
Zdenek Kabelac | 48c0d4d | 2009-09-25 06:19:26 +0200 | [diff] [blame] | 688 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 689 | kobject_del(&q->kobj); |
| 690 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
| 691 | kobject_put(&disk_to_dev(disk)->kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 692 | } |