Christoph Hellwig | bc50ad7 | 2019-02-18 09:36:29 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2011-2014, Intel Corporation. |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _NVME_H |
| 7 | #define _NVME_H |
| 8 | |
| 9 | #include <linux/nvme.h> |
Christoph Hellwig | a6a5149 | 2017-10-18 16:59:25 +0200 | [diff] [blame] | 10 | #include <linux/cdev.h> |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 11 | #include <linux/pci.h> |
| 12 | #include <linux/kref.h> |
| 13 | #include <linux/blk-mq.h> |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 14 | #include <linux/sed-opal.h> |
Thomas Tai | b9e0385 | 2018-02-08 13:38:29 -0500 | [diff] [blame] | 15 | #include <linux/fault-inject.h> |
Johannes Thumshirn | 978628e | 2018-05-17 13:52:50 +0200 | [diff] [blame] | 16 | #include <linux/rcupdate.h> |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 17 | #include <linux/wait.h> |
James Smart | 4d2ce68 | 2020-05-19 17:05:51 +0300 | [diff] [blame] | 18 | #include <linux/t10-pi.h> |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 19 | |
Hannes Reinecke | 35fe0d1 | 2019-07-24 15:47:55 +0200 | [diff] [blame] | 20 | #include <trace/events/block.h> |
| 21 | |
Marc Olson | 8ae4e44 | 2017-09-06 17:23:56 -0700 | [diff] [blame] | 22 | extern unsigned int nvme_io_timeout; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 23 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) |
| 24 | |
Marc Olson | 8ae4e44 | 2017-09-06 17:23:56 -0700 | [diff] [blame] | 25 | extern unsigned int admin_timeout; |
Chaitanya Kulkarni | dc96f93 | 2020-11-09 16:33:45 -0800 | [diff] [blame] | 26 | #define NVME_ADMIN_TIMEOUT (admin_timeout * HZ) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 27 | |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 28 | #define NVME_DEFAULT_KATO 5 |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 29 | |
Israel Rukshin | 38e1800 | 2019-11-24 18:38:30 +0200 | [diff] [blame] | 30 | #ifdef CONFIG_ARCH_NO_SG_CHAIN |
| 31 | #define NVME_INLINE_SG_CNT 0 |
Israel Rukshin | ba7ca2a | 2020-05-19 17:05:54 +0300 | [diff] [blame] | 32 | #define NVME_INLINE_METADATA_SG_CNT 0 |
Israel Rukshin | 38e1800 | 2019-11-24 18:38:30 +0200 | [diff] [blame] | 33 | #else |
| 34 | #define NVME_INLINE_SG_CNT 2 |
Israel Rukshin | ba7ca2a | 2020-05-19 17:05:54 +0300 | [diff] [blame] | 35 | #define NVME_INLINE_METADATA_SG_CNT 1 |
Israel Rukshin | 38e1800 | 2019-11-24 18:38:30 +0200 | [diff] [blame] | 36 | #endif |
| 37 | |
Chaitanya Kulkarni | 6c3c05b | 2020-07-16 17:51:37 -0700 | [diff] [blame] | 38 | /* |
| 39 | * Default to a 4K page size, with the intention to update this |
| 40 | * path in the future to accommodate architectures with differing |
| 41 | * kernel and IO page sizes. |
| 42 | */ |
| 43 | #define NVME_CTRL_PAGE_SHIFT 12 |
| 44 | #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT) |
| 45 | |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 46 | extern struct workqueue_struct *nvme_wq; |
Roy Shterman | b227c59 | 2018-01-14 12:39:02 +0200 | [diff] [blame] | 47 | extern struct workqueue_struct *nvme_reset_wq; |
| 48 | extern struct workqueue_struct *nvme_delete_wq; |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 49 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 50 | /* |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 51 | * List of workarounds for devices that required behavior not specified in |
| 52 | * the standard. |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 53 | */ |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 54 | enum nvme_quirks { |
| 55 | /* |
| 56 | * Prefers I/O aligned to a stripe size specified in a vendor |
| 57 | * specific Identify field. |
| 58 | */ |
| 59 | NVME_QUIRK_STRIPE_SIZE = (1 << 0), |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 60 | |
| 61 | /* |
| 62 | * The controller doesn't handle Identify value others than 0 or 1 |
| 63 | * correctly. |
| 64 | */ |
| 65 | NVME_QUIRK_IDENTIFY_CNS = (1 << 1), |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 66 | |
| 67 | /* |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 68 | * The controller deterministically returns O's on reads to |
| 69 | * logical blocks that deallocate was called on. |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 70 | */ |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 71 | NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 72 | |
| 73 | /* |
| 74 | * The controller needs a delay before starts checking the device |
| 75 | * readiness, which is done by reading the NVME_CSTS_RDY bit. |
| 76 | */ |
| 77 | NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 78 | |
| 79 | /* |
| 80 | * APST should not be used. |
| 81 | */ |
| 82 | NVME_QUIRK_NO_APST = (1 << 4), |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 83 | |
| 84 | /* |
| 85 | * The deepest sleep state should not be used. |
| 86 | */ |
| 87 | NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), |
Christoph Hellwig | 608cc4b | 2017-09-06 11:45:24 +0200 | [diff] [blame] | 88 | |
| 89 | /* |
Jens Axboe | 9abd68e | 2018-05-08 10:25:15 -0600 | [diff] [blame] | 90 | * Set MEDIUM priority on SQ creation |
| 91 | */ |
| 92 | NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), |
James Dingwall | 6299358 | 2019-01-08 10:20:51 -0700 | [diff] [blame] | 93 | |
| 94 | /* |
| 95 | * Ignore device provided subnqn. |
| 96 | */ |
| 97 | NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), |
Christoph Hellwig | 7b210e4 | 2019-03-13 18:55:05 +0100 | [diff] [blame] | 98 | |
| 99 | /* |
| 100 | * Broken Write Zeroes. |
| 101 | */ |
| 102 | NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), |
Mario Limonciello | cb32de1 | 2019-08-16 15:16:19 -0500 | [diff] [blame] | 103 | |
| 104 | /* |
| 105 | * Force simple suspend/resume path. |
| 106 | */ |
| 107 | NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), |
Linus Torvalds | 7ad67ca | 2019-09-17 16:57:47 -0700 | [diff] [blame] | 108 | |
| 109 | /* |
Benjamin Herrenschmidt | 6634133 | 2019-08-07 17:51:21 +1000 | [diff] [blame] | 110 | * Use only one interrupt vector for all queues |
| 111 | */ |
Linus Torvalds | 7ad67ca | 2019-09-17 16:57:47 -0700 | [diff] [blame] | 112 | NVME_QUIRK_SINGLE_VECTOR = (1 << 11), |
Benjamin Herrenschmidt | 6634133 | 2019-08-07 17:51:21 +1000 | [diff] [blame] | 113 | |
| 114 | /* |
| 115 | * Use non-standard 128 bytes SQEs. |
| 116 | */ |
Linus Torvalds | 7ad67ca | 2019-09-17 16:57:47 -0700 | [diff] [blame] | 117 | NVME_QUIRK_128_BYTES_SQES = (1 << 12), |
Benjamin Herrenschmidt | d38e9f0 | 2019-08-07 17:51:22 +1000 | [diff] [blame] | 118 | |
| 119 | /* |
| 120 | * Prevent tag overlap between queues |
| 121 | */ |
Linus Torvalds | 7ad67ca | 2019-09-17 16:57:47 -0700 | [diff] [blame] | 122 | NVME_QUIRK_SHARED_TAGS = (1 << 13), |
Akinobu Mita | 6c6aa2f | 2019-11-15 00:40:01 +0900 | [diff] [blame] | 123 | |
| 124 | /* |
| 125 | * Don't change the value of the temperature threshold feature |
| 126 | */ |
| 127 | NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), |
Christoph Hellwig | 5bedd3a | 2020-07-28 13:09:03 +0200 | [diff] [blame] | 128 | |
| 129 | /* |
| 130 | * The controller doesn't handle the Identify Namespace |
| 131 | * Identification Descriptor list subcommand despite claiming |
| 132 | * NVMe 1.3 compliance. |
| 133 | */ |
| 134 | NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), |
Filippo Sironi | 4bdf260 | 2021-02-10 01:39:42 +0100 | [diff] [blame] | 135 | |
| 136 | /* |
| 137 | * The controller does not properly handle DMA addresses over |
| 138 | * 48 bits. |
| 139 | */ |
| 140 | NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), |
Keith Busch | a2941f6 | 2021-09-27 08:43:06 -0700 | [diff] [blame] | 141 | |
| 142 | /* |
| 143 | * The controller requires the command_id value be be limited, so skip |
| 144 | * encoding the generation sequence number. |
| 145 | */ |
| 146 | NVME_QUIRK_SKIP_CID_GEN = (1 << 17), |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 147 | }; |
| 148 | |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 149 | /* |
| 150 | * Common request structure for NVMe passthrough. All drivers must have |
| 151 | * this structure as the first member of their request-private data. |
| 152 | */ |
| 153 | struct nvme_request { |
| 154 | struct nvme_command *cmd; |
| 155 | union nvme_result result; |
Sagi Grimberg | e7006de | 2021-06-16 14:19:36 -0700 | [diff] [blame] | 156 | u8 genctr; |
Christoph Hellwig | 44e44b2 | 2017-04-05 19:18:11 +0200 | [diff] [blame] | 157 | u8 retries; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 158 | u8 flags; |
| 159 | u16 status; |
Sagi Grimberg | 59e29ce | 2018-06-29 16:50:00 -0600 | [diff] [blame] | 160 | struct nvme_ctrl *ctrl; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 161 | }; |
| 162 | |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 163 | /* |
| 164 | * Mark a bio as coming in through the mpath node. |
| 165 | */ |
| 166 | #define REQ_NVME_MPATH REQ_DRV |
| 167 | |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 168 | enum { |
| 169 | NVME_REQ_CANCELLED = (1 << 0), |
James Smart | bb06ec31 | 2018-04-12 09:16:15 -0600 | [diff] [blame] | 170 | NVME_REQ_USERCMD = (1 << 1), |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 171 | }; |
| 172 | |
| 173 | static inline struct nvme_request *nvme_req(struct request *req) |
| 174 | { |
| 175 | return blk_mq_rq_to_pdu(req); |
| 176 | } |
| 177 | |
Keith Busch | 5d87eb9 | 2018-06-29 16:50:01 -0600 | [diff] [blame] | 178 | static inline u16 nvme_req_qid(struct request *req) |
| 179 | { |
Keith Busch | 643c476 | 2020-10-15 11:36:29 -0700 | [diff] [blame] | 180 | if (!req->q->queuedata) |
Keith Busch | 5d87eb9 | 2018-06-29 16:50:01 -0600 | [diff] [blame] | 181 | return 0; |
Baolin Wang | 84115d6 | 2020-10-27 16:15:16 +0800 | [diff] [blame] | 182 | |
| 183 | return req->mq_hctx->queue_num + 1; |
Keith Busch | 5d87eb9 | 2018-06-29 16:50:01 -0600 | [diff] [blame] | 184 | } |
| 185 | |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 186 | /* The below value is the specific amount of delay needed before checking |
| 187 | * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the |
| 188 | * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was |
| 189 | * found empirically. |
| 190 | */ |
Jeff Lien | 8c97eec | 2017-11-21 10:44:37 -0600 | [diff] [blame] | 191 | #define NVME_QUIRK_DELAY_AMOUNT 2300 |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 192 | |
Sagi Grimberg | 4212f4e | 2020-07-22 16:32:18 -0700 | [diff] [blame] | 193 | /* |
| 194 | * enum nvme_ctrl_state: Controller state |
| 195 | * |
| 196 | * @NVME_CTRL_NEW: New controller just allocated, initial state |
| 197 | * @NVME_CTRL_LIVE: Controller is connected and I/O capable |
| 198 | * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset) |
| 199 | * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the |
| 200 | * transport |
| 201 | * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion) |
Sagi Grimberg | ecca390e | 2020-07-22 16:32:19 -0700 | [diff] [blame] | 202 | * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not |
| 203 | * disabled/failed immediately. This state comes |
| 204 | * after all async event processing took place and |
| 205 | * before ns removal and the controller deletion |
| 206 | * progress |
Sagi Grimberg | 4212f4e | 2020-07-22 16:32:18 -0700 | [diff] [blame] | 207 | * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during |
| 208 | * shutdown or removal. In this case we forcibly |
| 209 | * kill all inflight I/O as they have no chance to |
| 210 | * complete |
| 211 | */ |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 212 | enum nvme_ctrl_state { |
| 213 | NVME_CTRL_NEW, |
| 214 | NVME_CTRL_LIVE, |
| 215 | NVME_CTRL_RESETTING, |
Max Gurtovoy | ad6a0a5 | 2018-01-31 18:31:24 +0200 | [diff] [blame] | 216 | NVME_CTRL_CONNECTING, |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 217 | NVME_CTRL_DELETING, |
Sagi Grimberg | ecca390e | 2020-07-22 16:32:19 -0700 | [diff] [blame] | 218 | NVME_CTRL_DELETING_NOIO, |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 219 | NVME_CTRL_DEAD, |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 220 | }; |
| 221 | |
Akinobu Mita | a364645 | 2019-06-20 08:49:02 +0200 | [diff] [blame] | 222 | struct nvme_fault_inject { |
| 223 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS |
| 224 | struct fault_attr attr; |
| 225 | struct dentry *parent; |
| 226 | bool dont_retry; /* DNR, do not retry */ |
| 227 | u16 status; /* status code */ |
| 228 | #endif |
| 229 | }; |
| 230 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 231 | struct nvme_ctrl { |
Sagi Grimberg | 6e3ca03e | 2018-11-02 10:28:15 -0700 | [diff] [blame] | 232 | bool comp_seen; |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 233 | enum nvme_ctrl_state state; |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 234 | bool identified; |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 235 | spinlock_t lock; |
Keith Busch | e7ad43c | 2019-01-28 09:46:07 -0700 | [diff] [blame] | 236 | struct mutex scan_lock; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 237 | const struct nvme_ctrl_ops *ops; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 238 | struct request_queue *admin_q; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 239 | struct request_queue *connect_q; |
Sagi Grimberg | e7832cb | 2019-08-02 19:33:59 -0700 | [diff] [blame] | 240 | struct request_queue *fabrics_q; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 241 | struct device *dev; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 242 | int instance; |
Hannes Reinecke | 103e515 | 2018-11-16 09:22:29 +0100 | [diff] [blame] | 243 | int numa_node; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 244 | struct blk_mq_tag_set *tagset; |
Sagi Grimberg | 34b6c23 | 2017-07-10 09:22:29 +0300 | [diff] [blame] | 245 | struct blk_mq_tag_set *admin_tagset; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 246 | struct list_head namespaces; |
Jianchao Wang | 765cc031 | 2018-02-12 20:54:46 +0800 | [diff] [blame] | 247 | struct rw_semaphore namespaces_rwsem; |
Christoph Hellwig | d22524a | 2017-10-18 13:25:42 +0200 | [diff] [blame] | 248 | struct device ctrl_device; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 249 | struct device *device; /* char device */ |
Hannes Reinecke | ed7770f | 2021-01-19 07:43:18 +0100 | [diff] [blame] | 250 | #ifdef CONFIG_NVME_HWMON |
| 251 | struct device *hwmon_device; |
| 252 | #endif |
Christoph Hellwig | a6a5149 | 2017-10-18 16:59:25 +0200 | [diff] [blame] | 253 | struct cdev cdev; |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 254 | struct work_struct reset_work; |
Christoph Hellwig | c5017e8 | 2017-10-29 10:44:29 +0200 | [diff] [blame] | 255 | struct work_struct delete_work; |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 256 | wait_queue_head_t state_wq; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 257 | |
Christoph Hellwig | ab9e00c | 2017-11-09 13:48:55 +0100 | [diff] [blame] | 258 | struct nvme_subsystem *subsys; |
| 259 | struct list_head subsys_entry; |
| 260 | |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 261 | struct opal_dev *opal_dev; |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 262 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 263 | char name[12]; |
Christoph Hellwig | 76e3914 | 2016-04-16 14:57:58 -0400 | [diff] [blame] | 264 | u16 cntlid; |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 265 | |
| 266 | u32 ctrl_config; |
Arnav Dawn | b6dccf7 | 2017-07-12 16:10:40 +0530 | [diff] [blame] | 267 | u16 mtfa; |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 268 | u32 queue_count; |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 269 | |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 270 | u64 cap; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 271 | u32 max_hw_sectors; |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 272 | u32 max_segments; |
Max Gurtovoy | 9509335 | 2020-05-19 17:05:52 +0300 | [diff] [blame] | 273 | u32 max_integrity_segments; |
Keith Busch | 5befc7c | 2021-03-24 16:18:05 -0700 | [diff] [blame] | 274 | u32 max_discard_sectors; |
| 275 | u32 max_discard_segments; |
| 276 | u32 max_zeroes_sectors; |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 277 | #ifdef CONFIG_BLK_DEV_ZONED |
| 278 | u32 max_zone_append; |
| 279 | #endif |
Keith Busch | 49cd84b | 2018-11-27 09:40:57 -0700 | [diff] [blame] | 280 | u16 crdt[3]; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 281 | u16 oncs; |
Scott Bauer | 8a9ae52 | 2017-02-17 13:59:40 +0100 | [diff] [blame] | 282 | u16 oacs; |
Jens Axboe | f5d1184 | 2017-06-27 12:03:06 -0600 | [diff] [blame] | 283 | u16 nssa; |
| 284 | u16 nr_streams; |
Keith Busch | f968688 | 2019-09-26 12:44:39 +0900 | [diff] [blame] | 285 | u16 sqsize; |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 286 | u32 max_namespaces; |
Christoph Hellwig | 6bf25d1 | 2015-11-20 09:36:44 +0100 | [diff] [blame] | 287 | atomic_t abort_limit; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 288 | u8 vwc; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 289 | u32 vs; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 290 | u32 sgls; |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 291 | u16 kas; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 292 | u8 npss; |
| 293 | u8 apsta; |
Guenter Roeck | 400b6a7 | 2019-11-06 06:35:18 -0800 | [diff] [blame] | 294 | u16 wctemp; |
| 295 | u16 cctemp; |
Hannes Reinecke | c0561f8 | 2018-05-22 11:09:55 +0200 | [diff] [blame] | 296 | u32 oaes; |
Keith Busch | e3d7874 | 2017-11-07 15:13:14 -0700 | [diff] [blame] | 297 | u32 aen_result; |
Sagi Grimberg | 3e53ba3 | 2018-11-02 10:28:14 -0700 | [diff] [blame] | 298 | u32 ctratt; |
Martin K. Petersen | 07fbd32 | 2017-08-25 19:14:50 -0400 | [diff] [blame] | 299 | unsigned int shutdown_timeout; |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 300 | unsigned int kato; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 301 | bool subsystem; |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 302 | unsigned long quirks; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 303 | struct nvme_id_power_state psd[32]; |
Keith Busch | 84fef62 | 2017-11-07 10:28:32 -0700 | [diff] [blame] | 304 | struct nvme_effects_log *effects; |
Chaitanya Kulkarni | 1cf7a12 | 2020-09-22 14:05:29 -0700 | [diff] [blame] | 305 | struct xarray cels; |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 306 | struct work_struct scan_work; |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 307 | struct work_struct async_event_work; |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 308 | struct delayed_work ka_work; |
Victor Gladkov | 8c4dfea | 2020-11-24 18:34:59 +0000 | [diff] [blame] | 309 | struct delayed_work failfast_work; |
Roland Dreier | 0a34e46 | 2018-01-11 13:38:15 -0800 | [diff] [blame] | 310 | struct nvme_command ka_cmd; |
Arnav Dawn | b6dccf7 | 2017-07-12 16:10:40 +0530 | [diff] [blame] | 311 | struct work_struct fw_act_work; |
Christoph Hellwig | 30d9096 | 2018-05-25 18:17:41 +0200 | [diff] [blame] | 312 | unsigned long events; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 313 | |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 314 | #ifdef CONFIG_NVME_MULTIPATH |
| 315 | /* asymmetric namespace access: */ |
| 316 | u8 anacap; |
| 317 | u8 anatt; |
| 318 | u32 anagrpmax; |
| 319 | u32 nanagrpid; |
| 320 | struct mutex ana_lock; |
| 321 | struct nvme_ana_rsp_hdr *ana_log_buf; |
| 322 | size_t ana_log_size; |
| 323 | struct timer_list anatt_timer; |
| 324 | struct work_struct ana_work; |
| 325 | #endif |
| 326 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 327 | /* Power saving configuration */ |
| 328 | u64 ps_max_latency_us; |
Kai-Heng Feng | 76a5af8 | 2017-06-26 16:39:54 -0400 | [diff] [blame] | 329 | bool apst_enabled; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 330 | |
Christoph Hellwig | 044a9df | 2017-09-11 12:09:28 -0400 | [diff] [blame] | 331 | /* PCIe only: */ |
Christoph Hellwig | fe6d53c | 2017-05-12 17:16:10 +0200 | [diff] [blame] | 332 | u32 hmpre; |
| 333 | u32 hmmin; |
Christoph Hellwig | 044a9df | 2017-09-11 12:09:28 -0400 | [diff] [blame] | 334 | u32 hmminds; |
| 335 | u16 hmmaxd; |
Christoph Hellwig | fe6d53c | 2017-05-12 17:16:10 +0200 | [diff] [blame] | 336 | |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 337 | /* Fabrics only */ |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 338 | u32 ioccsz; |
| 339 | u32 iorcsz; |
| 340 | u16 icdoff; |
| 341 | u16 maxcmd; |
Sagi Grimberg | fdf9dfa | 2017-05-04 13:33:15 +0300 | [diff] [blame] | 342 | int nr_reconnects; |
Victor Gladkov | 8c4dfea | 2020-11-24 18:34:59 +0000 | [diff] [blame] | 343 | unsigned long flags; |
| 344 | #define NVME_CTRL_FAILFAST_EXPIRED 0 |
Ming Lei | 9e6a6b1 | 2021-10-14 16:17:08 +0800 | [diff] [blame^] | 345 | #define NVME_CTRL_ADMIN_Q_STOPPED 1 |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 346 | struct nvmf_ctrl_options *opts; |
Jens Axboe | cb5b726 | 2018-12-12 09:18:11 -0700 | [diff] [blame] | 347 | |
| 348 | struct page *discard_page; |
| 349 | unsigned long discard_page_busy; |
Akinobu Mita | f79d5fd | 2019-06-09 23:17:01 +0900 | [diff] [blame] | 350 | |
| 351 | struct nvme_fault_inject fault_inject; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 352 | }; |
| 353 | |
Hannes Reinecke | 75c10e7 | 2019-02-18 11:43:26 +0100 | [diff] [blame] | 354 | enum nvme_iopolicy { |
| 355 | NVME_IOPOLICY_NUMA, |
| 356 | NVME_IOPOLICY_RR, |
| 357 | }; |
| 358 | |
Christoph Hellwig | ab9e00c | 2017-11-09 13:48:55 +0100 | [diff] [blame] | 359 | struct nvme_subsystem { |
| 360 | int instance; |
| 361 | struct device dev; |
| 362 | /* |
| 363 | * Because we unregister the device on the last put we need |
| 364 | * a separate refcount. |
| 365 | */ |
| 366 | struct kref ref; |
| 367 | struct list_head entry; |
| 368 | struct mutex lock; |
| 369 | struct list_head ctrls; |
Christoph Hellwig | ed754e5 | 2017-11-09 13:50:43 +0100 | [diff] [blame] | 370 | struct list_head nsheads; |
Christoph Hellwig | ab9e00c | 2017-11-09 13:48:55 +0100 | [diff] [blame] | 371 | char subnqn[NVMF_NQN_SIZE]; |
| 372 | char serial[20]; |
| 373 | char model[40]; |
| 374 | char firmware_rev[8]; |
| 375 | u8 cmic; |
| 376 | u16 vendor_id; |
Bart Van Assche | 81adb86 | 2019-06-28 09:53:31 -0700 | [diff] [blame] | 377 | u16 awupf; /* 0's based awupf value. */ |
Christoph Hellwig | ed754e5 | 2017-11-09 13:50:43 +0100 | [diff] [blame] | 378 | struct ida ns_ida; |
Hannes Reinecke | 75c10e7 | 2019-02-18 11:43:26 +0100 | [diff] [blame] | 379 | #ifdef CONFIG_NVME_MULTIPATH |
| 380 | enum nvme_iopolicy iopolicy; |
| 381 | #endif |
Christoph Hellwig | ab9e00c | 2017-11-09 13:48:55 +0100 | [diff] [blame] | 382 | }; |
| 383 | |
Christoph Hellwig | 002fab0 | 2017-11-09 13:50:16 +0100 | [diff] [blame] | 384 | /* |
| 385 | * Container structure for uniqueue namespace identifiers. |
| 386 | */ |
| 387 | struct nvme_ns_ids { |
| 388 | u8 eui64[8]; |
| 389 | u8 nguid[16]; |
| 390 | uuid_t uuid; |
Niklas Cassel | 71010c3 | 2020-06-29 12:06:39 -0700 | [diff] [blame] | 391 | u8 csi; |
Christoph Hellwig | 002fab0 | 2017-11-09 13:50:16 +0100 | [diff] [blame] | 392 | }; |
| 393 | |
Christoph Hellwig | ed754e5 | 2017-11-09 13:50:43 +0100 | [diff] [blame] | 394 | /* |
| 395 | * Anchor structure for namespaces. There is one for each namespace in a |
| 396 | * NVMe subsystem that any of our controllers can see, and the namespace |
| 397 | * structure for each controller is chained of it. For private namespaces |
| 398 | * there is a 1:1 relation to our namespace structures, that is ->list |
| 399 | * only ever has a single entry for private namespaces. |
| 400 | */ |
| 401 | struct nvme_ns_head { |
| 402 | struct list_head list; |
| 403 | struct srcu_struct srcu; |
| 404 | struct nvme_subsystem *subsys; |
| 405 | unsigned ns_id; |
| 406 | struct nvme_ns_ids ids; |
| 407 | struct list_head entry; |
| 408 | struct kref ref; |
Keith Busch | 0c284db | 2020-04-09 09:09:02 -0700 | [diff] [blame] | 409 | bool shared; |
Christoph Hellwig | ed754e5 | 2017-11-09 13:50:43 +0100 | [diff] [blame] | 410 | int instance; |
Keith Busch | be93e87 | 2020-06-29 12:06:40 -0700 | [diff] [blame] | 411 | struct nvme_effects_log *effects; |
Minwoo Im | 2637bae | 2021-04-21 16:45:04 +0900 | [diff] [blame] | 412 | |
| 413 | struct cdev cdev; |
| 414 | struct device cdev_device; |
| 415 | |
Christoph Hellwig | f333444 | 2018-09-11 09:51:29 +0200 | [diff] [blame] | 416 | struct gendisk *disk; |
Minwoo Im | 3089738 | 2021-04-07 17:49:29 +0200 | [diff] [blame] | 417 | #ifdef CONFIG_NVME_MULTIPATH |
Christoph Hellwig | f333444 | 2018-09-11 09:51:29 +0200 | [diff] [blame] | 418 | struct bio_list requeue_list; |
| 419 | spinlock_t requeue_lock; |
| 420 | struct work_struct requeue_work; |
| 421 | struct mutex lock; |
Anton Eidelman | d8a22f8 | 2020-06-24 01:53:11 -0700 | [diff] [blame] | 422 | unsigned long flags; |
| 423 | #define NVME_NSHEAD_DISK_LIVE 0 |
Christoph Hellwig | f333444 | 2018-09-11 09:51:29 +0200 | [diff] [blame] | 424 | struct nvme_ns __rcu *current_path[]; |
| 425 | #endif |
Christoph Hellwig | ed754e5 | 2017-11-09 13:50:43 +0100 | [diff] [blame] | 426 | }; |
| 427 | |
Minwoo Im | 3089738 | 2021-04-07 17:49:29 +0200 | [diff] [blame] | 428 | static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head) |
| 429 | { |
| 430 | return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk; |
| 431 | } |
| 432 | |
Max Gurtovoy | ffc89b1 | 2020-05-19 17:05:49 +0300 | [diff] [blame] | 433 | enum nvme_ns_features { |
| 434 | NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */ |
Max Gurtovoy | b29f848 | 2020-05-19 17:05:50 +0300 | [diff] [blame] | 435 | NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */ |
Max Gurtovoy | ffc89b1 | 2020-05-19 17:05:49 +0300 | [diff] [blame] | 436 | }; |
| 437 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 438 | struct nvme_ns { |
| 439 | struct list_head list; |
| 440 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 441 | struct nvme_ctrl *ctrl; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 442 | struct request_queue *queue; |
| 443 | struct gendisk *disk; |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 444 | #ifdef CONFIG_NVME_MULTIPATH |
| 445 | enum nvme_ana_state ana_state; |
| 446 | u32 ana_grpid; |
| 447 | #endif |
Christoph Hellwig | ed754e5 | 2017-11-09 13:50:43 +0100 | [diff] [blame] | 448 | struct list_head siblings; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 449 | struct kref kref; |
Christoph Hellwig | ed754e5 | 2017-11-09 13:50:43 +0100 | [diff] [blame] | 450 | struct nvme_ns_head *head; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 451 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 452 | int lba_shift; |
| 453 | u16 ms; |
Jens Axboe | f5d1184 | 2017-06-27 12:03:06 -0600 | [diff] [blame] | 454 | u16 sgs; |
| 455 | u32 sws; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 456 | u8 pi_type; |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 457 | #ifdef CONFIG_BLK_DEV_ZONED |
| 458 | u64 zsze; |
| 459 | #endif |
Max Gurtovoy | ffc89b1 | 2020-05-19 17:05:49 +0300 | [diff] [blame] | 460 | unsigned long features; |
Keith Busch | 646017a | 2016-02-24 09:15:54 -0700 | [diff] [blame] | 461 | unsigned long flags; |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 462 | #define NVME_NS_REMOVING 0 |
| 463 | #define NVME_NS_DEAD 1 |
| 464 | #define NVME_NS_ANA_PENDING 2 |
Javier González | 2f4c9ba | 2020-12-01 13:02:21 +0100 | [diff] [blame] | 465 | #define NVME_NS_FORCE_RO 3 |
Hannes Reinecke | e7d6580 | 2021-08-24 16:57:42 +0200 | [diff] [blame] | 466 | #define NVME_NS_READY 4 |
Ming Lei | 9e6a6b1 | 2021-10-14 16:17:08 +0800 | [diff] [blame^] | 467 | #define NVME_NS_STOPPED 5 |
Thomas Tai | b9e0385 | 2018-02-08 13:38:29 -0500 | [diff] [blame] | 468 | |
Minwoo Im | 2637bae | 2021-04-21 16:45:04 +0900 | [diff] [blame] | 469 | struct cdev cdev; |
| 470 | struct device cdev_device; |
| 471 | |
Thomas Tai | b9e0385 | 2018-02-08 13:38:29 -0500 | [diff] [blame] | 472 | struct nvme_fault_inject fault_inject; |
Thomas Tai | b9e0385 | 2018-02-08 13:38:29 -0500 | [diff] [blame] | 473 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 474 | }; |
| 475 | |
James Smart | 4d2ce68 | 2020-05-19 17:05:51 +0300 | [diff] [blame] | 476 | /* NVMe ns supports metadata actions by the controller (generate/strip) */ |
| 477 | static inline bool nvme_ns_has_pi(struct nvme_ns *ns) |
| 478 | { |
| 479 | return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); |
| 480 | } |
| 481 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 482 | struct nvme_ctrl_ops { |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 483 | const char *name; |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 484 | struct module *module; |
Christoph Hellwig | d3d5b87 | 2017-05-20 15:14:44 +0200 | [diff] [blame] | 485 | unsigned int flags; |
| 486 | #define NVME_F_FABRICS (1 << 0) |
Christoph Hellwig | c81bfba | 2017-05-20 15:14:45 +0200 | [diff] [blame] | 487 | #define NVME_F_METADATA_SUPPORTED (1 << 1) |
Logan Gunthorpe | e0596ab | 2018-10-04 15:27:44 -0600 | [diff] [blame] | 488 | #define NVME_F_PCI_P2PDMA (1 << 2) |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 489 | int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 490 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 491 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 492 | void (*free_ctrl)(struct nvme_ctrl *ctrl); |
Keith Busch | ad22c35 | 2017-11-07 15:13:12 -0700 | [diff] [blame] | 493 | void (*submit_async_event)(struct nvme_ctrl *ctrl); |
Christoph Hellwig | c5017e8 | 2017-10-29 10:44:29 +0200 | [diff] [blame] | 494 | void (*delete_ctrl)(struct nvme_ctrl *ctrl); |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 495 | int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 496 | }; |
| 497 | |
Sagi Grimberg | e7006de | 2021-06-16 14:19:36 -0700 | [diff] [blame] | 498 | /* |
| 499 | * nvme command_id is constructed as such: |
| 500 | * | xxxx | xxxxxxxxxxxx | |
| 501 | * gen request tag |
| 502 | */ |
| 503 | #define nvme_genctr_mask(gen) (gen & 0xf) |
| 504 | #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12) |
| 505 | #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12) |
| 506 | #define nvme_tag_from_cid(cid) (cid & 0xfff) |
| 507 | |
| 508 | static inline u16 nvme_cid(struct request *rq) |
| 509 | { |
| 510 | return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; |
| 511 | } |
| 512 | |
| 513 | static inline struct request *nvme_find_rq(struct blk_mq_tags *tags, |
| 514 | u16 command_id) |
| 515 | { |
| 516 | u8 genctr = nvme_genctr_from_cid(command_id); |
| 517 | u16 tag = nvme_tag_from_cid(command_id); |
| 518 | struct request *rq; |
| 519 | |
| 520 | rq = blk_mq_tag_to_rq(tags, tag); |
| 521 | if (unlikely(!rq)) { |
| 522 | pr_err("could not locate request for tag %#x\n", |
| 523 | tag); |
| 524 | return NULL; |
| 525 | } |
| 526 | if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) { |
| 527 | dev_err(nvme_req(rq)->ctrl->device, |
| 528 | "request %#x genctr mismatch (got %#x expected %#x)\n", |
| 529 | tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr)); |
| 530 | return NULL; |
| 531 | } |
| 532 | return rq; |
| 533 | } |
| 534 | |
| 535 | static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags, |
| 536 | u16 command_id) |
| 537 | { |
| 538 | return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id)); |
| 539 | } |
| 540 | |
Thomas Tai | b9e0385 | 2018-02-08 13:38:29 -0500 | [diff] [blame] | 541 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS |
Akinobu Mita | a364645 | 2019-06-20 08:49:02 +0200 | [diff] [blame] | 542 | void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, |
| 543 | const char *dev_name); |
| 544 | void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject); |
Thomas Tai | b9e0385 | 2018-02-08 13:38:29 -0500 | [diff] [blame] | 545 | void nvme_should_fail(struct request *req); |
| 546 | #else |
Akinobu Mita | a364645 | 2019-06-20 08:49:02 +0200 | [diff] [blame] | 547 | static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, |
| 548 | const char *dev_name) |
| 549 | { |
| 550 | } |
| 551 | static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) |
| 552 | { |
| 553 | } |
Thomas Tai | b9e0385 | 2018-02-08 13:38:29 -0500 | [diff] [blame] | 554 | static inline void nvme_should_fail(struct request *req) {} |
| 555 | #endif |
| 556 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 557 | static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) |
| 558 | { |
| 559 | if (!ctrl->subsystem) |
| 560 | return -ENOTTY; |
| 561 | return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); |
| 562 | } |
| 563 | |
Damien Le Moal | 314d48d | 2019-10-21 12:40:03 +0900 | [diff] [blame] | 564 | /* |
| 565 | * Convert a 512B sector number to a device logical block number. |
| 566 | */ |
| 567 | static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 568 | { |
Damien Le Moal | 314d48d | 2019-10-21 12:40:03 +0900 | [diff] [blame] | 569 | return sector >> (ns->lba_shift - SECTOR_SHIFT); |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 570 | } |
| 571 | |
Damien Le Moal | e08f2ae | 2019-10-21 12:40:04 +0900 | [diff] [blame] | 572 | /* |
| 573 | * Convert a device logical block number to a 512B sector number. |
| 574 | */ |
| 575 | static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) |
| 576 | { |
| 577 | return lba << (ns->lba_shift - SECTOR_SHIFT); |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 578 | } |
| 579 | |
Keith Busch | 71fb90e | 2020-04-03 09:24:01 -0700 | [diff] [blame] | 580 | /* |
| 581 | * Convert byte length to nvme's 0-based num dwords |
| 582 | */ |
| 583 | static inline u32 nvme_bytes_to_numd(size_t len) |
| 584 | { |
| 585 | return (len >> 2) - 1; |
| 586 | } |
| 587 | |
Christoph Hellwig | 5ddaabe | 2020-08-18 09:11:30 +0200 | [diff] [blame] | 588 | static inline bool nvme_is_ana_error(u16 status) |
| 589 | { |
| 590 | switch (status & 0x7ff) { |
| 591 | case NVME_SC_ANA_TRANSITION: |
| 592 | case NVME_SC_ANA_INACCESSIBLE: |
| 593 | case NVME_SC_ANA_PERSISTENT_LOSS: |
| 594 | return true; |
| 595 | default: |
| 596 | return false; |
| 597 | } |
| 598 | } |
| 599 | |
| 600 | static inline bool nvme_is_path_error(u16 status) |
| 601 | { |
Christoph Hellwig | 1e41f3b | 2020-08-18 09:11:31 +0200 | [diff] [blame] | 602 | /* check for a status code type of 'path related status' */ |
| 603 | return (status & 0x700) == 0x300; |
Christoph Hellwig | 5ddaabe | 2020-08-18 09:11:30 +0200 | [diff] [blame] | 604 | } |
| 605 | |
Christoph Hellwig | 2eb81a3 | 2020-08-18 09:11:29 +0200 | [diff] [blame] | 606 | /* |
| 607 | * Fill in the status and result information from the CQE, and then figure out |
| 608 | * if blk-mq will need to use IPI magic to complete the request, and if yes do |
| 609 | * so. If not let the caller complete the request without an indirect function |
| 610 | * call. |
| 611 | */ |
| 612 | static inline bool nvme_try_complete_req(struct request *req, __le16 status, |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 613 | union nvme_result result) |
Christoph Hellwig | 15a190f7 | 2015-10-16 07:58:39 +0200 | [diff] [blame] | 614 | { |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 615 | struct nvme_request *rq = nvme_req(req); |
| 616 | |
| 617 | rq->status = le16_to_cpu(status) >> 1; |
| 618 | rq->result = result; |
Thomas Tai | b9e0385 | 2018-02-08 13:38:29 -0500 | [diff] [blame] | 619 | /* inject error when permitted by fault injection framework */ |
| 620 | nvme_should_fail(req); |
Christoph Hellwig | ff02945 | 2020-06-11 08:44:52 +0200 | [diff] [blame] | 621 | if (unlikely(blk_should_fake_timeout(req->q))) |
| 622 | return true; |
| 623 | return blk_mq_complete_request_remote(req); |
Christoph Hellwig | 15a190f7 | 2015-10-16 07:58:39 +0200 | [diff] [blame] | 624 | } |
| 625 | |
Christoph Hellwig | d22524a | 2017-10-18 13:25:42 +0200 | [diff] [blame] | 626 | static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) |
| 627 | { |
| 628 | get_device(ctrl->device); |
| 629 | } |
| 630 | |
| 631 | static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) |
| 632 | { |
| 633 | put_device(ctrl->device); |
| 634 | } |
| 635 | |
Israel Rukshin | 58a8df6 | 2019-10-13 19:57:31 +0300 | [diff] [blame] | 636 | static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) |
| 637 | { |
Sagi Grimberg | e7006de | 2021-06-16 14:19:36 -0700 | [diff] [blame] | 638 | return !qid && |
| 639 | nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH; |
Israel Rukshin | 58a8df6 | 2019-10-13 19:57:31 +0300 | [diff] [blame] | 640 | } |
| 641 | |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 642 | void nvme_complete_rq(struct request *req); |
Jens Axboe | c234a65 | 2021-10-08 05:59:37 -0600 | [diff] [blame] | 643 | void nvme_complete_batch_req(struct request *req); |
| 644 | |
| 645 | static __always_inline void nvme_complete_batch(struct io_comp_batch *iob, |
| 646 | void (*fn)(struct request *rq)) |
| 647 | { |
| 648 | struct request *req; |
| 649 | |
| 650 | rq_list_for_each(&iob->req_list, req) { |
| 651 | fn(req); |
| 652 | nvme_complete_batch_req(req); |
| 653 | } |
| 654 | blk_mq_end_request_batch(iob); |
| 655 | } |
| 656 | |
Chao Leng | dda3248 | 2021-02-04 08:55:11 +0100 | [diff] [blame] | 657 | blk_status_t nvme_host_path_error(struct request *req); |
Jens Axboe | 7baa857 | 2018-11-08 10:24:07 -0700 | [diff] [blame] | 658 | bool nvme_cancel_request(struct request *req, void *data, bool reserved); |
Chao Leng | 2547906 | 2021-01-21 11:32:36 +0800 | [diff] [blame] | 659 | void nvme_cancel_tagset(struct nvme_ctrl *ctrl); |
| 660 | void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 661 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
| 662 | enum nvme_ctrl_state new_state); |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 663 | bool nvme_wait_reset(struct nvme_ctrl *ctrl); |
Sagi Grimberg | b5b0504 | 2019-07-22 17:06:54 -0700 | [diff] [blame] | 664 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl); |
Sagi Grimberg | c0f2f45 | 2019-07-22 17:06:53 -0700 | [diff] [blame] | 665 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 666 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 667 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
| 668 | const struct nvme_ctrl_ops *ops, unsigned long quirks); |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 669 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); |
Sagi Grimberg | d09f2b4 | 2017-07-02 10:56:43 +0300 | [diff] [blame] | 670 | void nvme_start_ctrl(struct nvme_ctrl *ctrl); |
| 671 | void nvme_stop_ctrl(struct nvme_ctrl *ctrl); |
Chaitanya Kulkarni | f21c4769 | 2021-02-28 18:06:04 -0800 | [diff] [blame] | 672 | int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 673 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 674 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 675 | |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 676 | int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, |
| 677 | bool send); |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 678 | |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 679 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, |
Christoph Hellwig | 287a63e | 2018-05-17 18:31:46 +0200 | [diff] [blame] | 680 | volatile union nvme_result *res); |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 681 | |
Keith Busch | 2564626 | 2016-01-04 09:10:57 -0700 | [diff] [blame] | 682 | void nvme_stop_queues(struct nvme_ctrl *ctrl); |
| 683 | void nvme_start_queues(struct nvme_ctrl *ctrl); |
Ming Lei | a277654 | 2021-10-14 16:17:05 +0800 | [diff] [blame] | 684 | void nvme_stop_admin_queue(struct nvme_ctrl *ctrl); |
| 685 | void nvme_start_admin_queue(struct nvme_ctrl *ctrl); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 686 | void nvme_kill_queues(struct nvme_ctrl *ctrl); |
Keith Busch | d6135c3a | 2019-05-14 14:46:09 -0600 | [diff] [blame] | 687 | void nvme_sync_queues(struct nvme_ctrl *ctrl); |
Chao Leng | 04800fb | 2020-10-22 10:15:00 +0800 | [diff] [blame] | 688 | void nvme_sync_io_queues(struct nvme_ctrl *ctrl); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 689 | void nvme_unfreeze(struct nvme_ctrl *ctrl); |
| 690 | void nvme_wait_freeze(struct nvme_ctrl *ctrl); |
Sagi Grimberg | 7cf0d7c | 2020-07-30 13:24:45 -0700 | [diff] [blame] | 691 | int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 692 | void nvme_start_freeze(struct nvme_ctrl *ctrl); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 693 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 694 | #define NVME_QID_ANY -1 |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 695 | struct request *nvme_alloc_request(struct request_queue *q, |
Chaitanya Kulkarni | 39dfe84 | 2020-11-09 18:24:00 -0800 | [diff] [blame] | 696 | struct nvme_command *cmd, blk_mq_req_flags_t flags); |
Max Gurtovoy | f7f1fc3 | 2018-07-30 00:15:33 +0300 | [diff] [blame] | 697 | void nvme_cleanup_cmd(struct request *req); |
Keith Busch | f4b9e6c | 2021-03-17 13:37:03 -0700 | [diff] [blame] | 698 | blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); |
Tao Chiu | a971574 | 2021-04-26 10:53:10 +0800 | [diff] [blame] | 699 | blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, |
| 700 | struct request *req); |
| 701 | bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, |
| 702 | bool queue_live); |
| 703 | |
| 704 | static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, |
| 705 | bool queue_live) |
| 706 | { |
| 707 | if (likely(ctrl->state == NVME_CTRL_LIVE)) |
| 708 | return true; |
| 709 | if (ctrl->ops->flags & NVME_F_FABRICS && |
| 710 | ctrl->state == NVME_CTRL_DELETING) |
| 711 | return true; |
| 712 | return __nvme_check_ready(ctrl, rq, queue_live); |
| 713 | } |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 714 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 715 | void *buf, unsigned bufflen); |
| 716 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 717 | union nvme_result *result, void *buffer, unsigned bufflen, |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 718 | unsigned timeout, int qid, int at_head, |
Keith Busch | be42a33 | 2021-06-10 14:44:35 -0700 | [diff] [blame] | 719 | blk_mq_req_flags_t flags); |
Keith Busch | 1a87ee6 | 2019-05-27 01:29:01 +0900 | [diff] [blame] | 720 | int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, |
| 721 | unsigned int dword11, void *buffer, size_t buflen, |
| 722 | u32 *result); |
| 723 | int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, |
| 724 | unsigned int dword11, void *buffer, size_t buflen, |
| 725 | u32 *result); |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 726 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 727 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 728 | int nvme_reset_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 2405252 | 2021-04-10 08:42:03 +0200 | [diff] [blame] | 729 | int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 730 | int nvme_try_sched_reset(struct nvme_ctrl *ctrl); |
Christoph Hellwig | c5017e8 | 2017-10-29 10:44:29 +0200 | [diff] [blame] | 731 | int nvme_delete_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 2405252 | 2021-04-10 08:42:03 +0200 | [diff] [blame] | 732 | void nvme_queue_scan(struct nvme_ctrl *ctrl); |
Keith Busch | be93e87 | 2020-06-29 12:06:40 -0700 | [diff] [blame] | 733 | int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, |
Christoph Hellwig | 0e98719 | 2018-06-06 14:39:00 +0200 | [diff] [blame] | 734 | void *log, size_t size, u64 offset); |
Christoph Hellwig | 1496bd4 | 2021-04-07 14:22:12 +0200 | [diff] [blame] | 735 | bool nvme_tryget_ns_head(struct nvme_ns_head *head); |
| 736 | void nvme_put_ns_head(struct nvme_ns_head *head); |
Minwoo Im | 2637bae | 2021-04-21 16:45:04 +0900 | [diff] [blame] | 737 | int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, |
| 738 | const struct file_operations *fops, struct module *owner); |
| 739 | void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device); |
Christoph Hellwig | 2405252 | 2021-04-10 08:42:03 +0200 | [diff] [blame] | 740 | int nvme_ioctl(struct block_device *bdev, fmode_t mode, |
| 741 | unsigned int cmd, unsigned long arg); |
Minwoo Im | 2637bae | 2021-04-21 16:45:04 +0900 | [diff] [blame] | 742 | long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
Christoph Hellwig | 2405252 | 2021-04-10 08:42:03 +0200 | [diff] [blame] | 743 | int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, |
| 744 | unsigned int cmd, unsigned long arg); |
Minwoo Im | 2637bae | 2021-04-21 16:45:04 +0900 | [diff] [blame] | 745 | long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, |
| 746 | unsigned long arg); |
Christoph Hellwig | 2405252 | 2021-04-10 08:42:03 +0200 | [diff] [blame] | 747 | long nvme_dev_ioctl(struct file *file, unsigned int cmd, |
| 748 | unsigned long arg); |
Christoph Hellwig | 1496bd4 | 2021-04-07 14:22:12 +0200 | [diff] [blame] | 749 | int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); |
Matias Bjørling | d558fb5 | 2018-03-21 20:27:07 +0100 | [diff] [blame] | 750 | |
Hannes Reinecke | 33b14f67 | 2018-09-28 08:17:20 +0200 | [diff] [blame] | 751 | extern const struct attribute_group *nvme_ns_id_attr_groups[]; |
Christoph Hellwig | 1496bd4 | 2021-04-07 14:22:12 +0200 | [diff] [blame] | 752 | extern const struct pr_ops nvme_pr_ops; |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 753 | extern const struct block_device_operations nvme_ns_head_ops; |
| 754 | |
Christoph Hellwig | f1cf35e | 2021-05-19 09:22:35 +0200 | [diff] [blame] | 755 | struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 756 | #ifdef CONFIG_NVME_MULTIPATH |
Marta Rybczynska | 66b20ac | 2019-07-23 07:41:20 +0200 | [diff] [blame] | 757 | static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) |
| 758 | { |
| 759 | return ctrl->ana_log_buf != NULL; |
| 760 | } |
| 761 | |
Sagi Grimberg | b9156da | 2019-07-31 11:00:26 -0700 | [diff] [blame] | 762 | void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); |
| 763 | void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); |
| 764 | void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); |
Christoph Hellwig | 9953ab0 | 2021-04-07 12:46:46 +0200 | [diff] [blame] | 765 | bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags); |
Christoph Hellwig | 5ddaabe | 2020-08-18 09:11:30 +0200 | [diff] [blame] | 766 | void nvme_failover_req(struct request *req); |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 767 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); |
| 768 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 769 | void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 770 | void nvme_mpath_remove_disk(struct nvme_ns_head *head); |
Christoph Hellwig | 5e1f689 | 2021-04-29 14:18:53 +0200 | [diff] [blame] | 771 | int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); |
| 772 | void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 773 | void nvme_mpath_uninit(struct nvme_ctrl *ctrl); |
| 774 | void nvme_mpath_stop(struct nvme_ctrl *ctrl); |
Sagi Grimberg | 0157ec8 | 2019-07-25 11:56:57 -0700 | [diff] [blame] | 775 | bool nvme_mpath_clear_current_path(struct nvme_ns *ns); |
Hannes Reinecke | e7d6580 | 2021-08-24 16:57:42 +0200 | [diff] [blame] | 776 | void nvme_mpath_revalidate_paths(struct nvme_ns *ns); |
Sagi Grimberg | 0157ec8 | 2019-07-25 11:56:57 -0700 | [diff] [blame] | 777 | void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); |
Hannes Reinecke | 5396fda | 2021-07-16 13:30:35 +0200 | [diff] [blame] | 778 | void nvme_mpath_shutdown_disk(struct nvme_ns_head *head); |
Sagi Grimberg | 479a322 | 2017-12-21 15:07:27 +0200 | [diff] [blame] | 779 | |
Max Gurtovoy | 2b59787 | 2021-01-05 10:34:02 +0000 | [diff] [blame] | 780 | static inline void nvme_trace_bio_complete(struct request *req) |
Hannes Reinecke | 35fe0d1 | 2019-07-24 15:47:55 +0200 | [diff] [blame] | 781 | { |
| 782 | struct nvme_ns *ns = req->q->queuedata; |
| 783 | |
| 784 | if (req->cmd_flags & REQ_NVME_MPATH) |
Christoph Hellwig | d24de76a | 2020-06-03 07:14:43 +0200 | [diff] [blame] | 785 | trace_block_bio_complete(ns->head->disk->queue, req->bio); |
Hannes Reinecke | 35fe0d1 | 2019-07-24 15:47:55 +0200 | [diff] [blame] | 786 | } |
| 787 | |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 788 | extern struct device_attribute dev_attr_ana_grpid; |
| 789 | extern struct device_attribute dev_attr_ana_state; |
Hannes Reinecke | 75c10e7 | 2019-02-18 11:43:26 +0100 | [diff] [blame] | 790 | extern struct device_attribute subsys_attr_iopolicy; |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 791 | |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 792 | #else |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 793 | static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) |
| 794 | { |
| 795 | return false; |
| 796 | } |
Christoph Hellwig | 9953ab0 | 2021-04-07 12:46:46 +0200 | [diff] [blame] | 797 | static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, |
| 798 | int *flags) |
Keith Busch | a785dbc | 2018-04-26 14:22:41 -0600 | [diff] [blame] | 799 | { |
Christoph Hellwig | 9953ab0 | 2021-04-07 12:46:46 +0200 | [diff] [blame] | 800 | return false; |
Keith Busch | a785dbc | 2018-04-26 14:22:41 -0600 | [diff] [blame] | 801 | } |
Christoph Hellwig | 5ddaabe | 2020-08-18 09:11:30 +0200 | [diff] [blame] | 802 | static inline void nvme_failover_req(struct request *req) |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 803 | { |
| 804 | } |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 805 | static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) |
| 806 | { |
| 807 | } |
| 808 | static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, |
| 809 | struct nvme_ns_head *head) |
| 810 | { |
| 811 | return 0; |
| 812 | } |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 813 | static inline void nvme_mpath_add_disk(struct nvme_ns *ns, |
| 814 | struct nvme_id_ns *id) |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 815 | { |
| 816 | } |
| 817 | static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) |
| 818 | { |
| 819 | } |
Sagi Grimberg | 0157ec8 | 2019-07-25 11:56:57 -0700 | [diff] [blame] | 820 | static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) |
| 821 | { |
| 822 | return false; |
| 823 | } |
Hannes Reinecke | e7d6580 | 2021-08-24 16:57:42 +0200 | [diff] [blame] | 824 | static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns) |
| 825 | { |
| 826 | } |
Sagi Grimberg | 0157ec8 | 2019-07-25 11:56:57 -0700 | [diff] [blame] | 827 | static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 828 | { |
| 829 | } |
Hannes Reinecke | 5396fda | 2021-07-16 13:30:35 +0200 | [diff] [blame] | 830 | static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) |
Sagi Grimberg | 479a322 | 2017-12-21 15:07:27 +0200 | [diff] [blame] | 831 | { |
| 832 | } |
Max Gurtovoy | 2b59787 | 2021-01-05 10:34:02 +0000 | [diff] [blame] | 833 | static inline void nvme_trace_bio_complete(struct request *req) |
Hannes Reinecke | 35fe0d1 | 2019-07-24 15:47:55 +0200 | [diff] [blame] | 834 | { |
| 835 | } |
Christoph Hellwig | 5e1f689 | 2021-04-29 14:18:53 +0200 | [diff] [blame] | 836 | static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) |
| 837 | { |
| 838 | } |
| 839 | static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 840 | struct nvme_id_ctrl *id) |
| 841 | { |
Kanchan Joshi | 2bd6430 | 2021-03-09 00:48:03 +0530 | [diff] [blame] | 842 | if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) |
Christoph Hellwig | 14a1336 | 2018-11-20 16:57:54 +0100 | [diff] [blame] | 843 | dev_warn(ctrl->device, |
| 844 | "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); |
Christoph Hellwig | 0d0b660 | 2018-05-14 08:48:54 +0200 | [diff] [blame] | 845 | return 0; |
| 846 | } |
| 847 | static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) |
| 848 | { |
| 849 | } |
| 850 | static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) |
| 851 | { |
| 852 | } |
Sagi Grimberg | b9156da | 2019-07-31 11:00:26 -0700 | [diff] [blame] | 853 | static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) |
| 854 | { |
| 855 | } |
| 856 | static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) |
| 857 | { |
| 858 | } |
| 859 | static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) |
| 860 | { |
| 861 | } |
Christoph Hellwig | 32acab3 | 2017-11-02 12:59:30 +0100 | [diff] [blame] | 862 | #endif /* CONFIG_NVME_MULTIPATH */ |
| 863 | |
Christoph Hellwig | 7fad20d | 2020-08-20 09:31:36 +0200 | [diff] [blame] | 864 | int nvme_revalidate_zones(struct nvme_ns *ns); |
Christoph Hellwig | 8b4fb0f | 2021-05-19 09:17:06 +0200 | [diff] [blame] | 865 | int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, |
| 866 | unsigned int nr_zones, report_zones_cb cb, void *data); |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 867 | #ifdef CONFIG_BLK_DEV_ZONED |
Christoph Hellwig | d525c3c | 2020-08-20 14:02:18 +0200 | [diff] [blame] | 868 | int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 869 | blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, |
| 870 | struct nvme_command *cmnd, |
| 871 | enum nvme_zone_mgmt_action action); |
| 872 | #else |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 873 | static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, |
| 874 | struct request *req, struct nvme_command *cmnd, |
| 875 | enum nvme_zone_mgmt_action action) |
| 876 | { |
| 877 | return BLK_STS_NOTSUPP; |
| 878 | } |
| 879 | |
Christoph Hellwig | d525c3c | 2020-08-20 14:02:18 +0200 | [diff] [blame] | 880 | static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 881 | { |
| 882 | dev_warn(ns->ctrl->device, |
| 883 | "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n"); |
| 884 | return -EPROTONOSUPPORT; |
| 885 | } |
| 886 | #endif |
| 887 | |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 888 | static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) |
| 889 | { |
| 890 | return dev_to_disk(dev)->private_data; |
| 891 | } |
Matias Bjørling | ca06408 | 2015-10-29 17:57:29 +0900 | [diff] [blame] | 892 | |
Guenter Roeck | 400b6a7 | 2019-11-06 06:35:18 -0800 | [diff] [blame] | 893 | #ifdef CONFIG_NVME_HWMON |
Keith Busch | 59e330f | 2020-09-17 08:50:25 -0700 | [diff] [blame] | 894 | int nvme_hwmon_init(struct nvme_ctrl *ctrl); |
Hannes Reinecke | ed7770f | 2021-01-19 07:43:18 +0100 | [diff] [blame] | 895 | void nvme_hwmon_exit(struct nvme_ctrl *ctrl); |
Guenter Roeck | 400b6a7 | 2019-11-06 06:35:18 -0800 | [diff] [blame] | 896 | #else |
Keith Busch | 59e330f | 2020-09-17 08:50:25 -0700 | [diff] [blame] | 897 | static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) |
| 898 | { |
| 899 | return 0; |
| 900 | } |
Hannes Reinecke | ed7770f | 2021-01-19 07:43:18 +0100 | [diff] [blame] | 901 | |
| 902 | static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) |
| 903 | { |
| 904 | } |
Guenter Roeck | 400b6a7 | 2019-11-06 06:35:18 -0800 | [diff] [blame] | 905 | #endif |
| 906 | |
Chaitanya Kulkarni | 73eefc2 | 2021-06-09 18:28:23 -0700 | [diff] [blame] | 907 | static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) |
| 908 | { |
| 909 | return ctrl->sgls & ((1 << 0) | (1 << 1)); |
| 910 | } |
| 911 | |
Logan Gunthorpe | df21b6b | 2020-07-24 11:25:13 -0600 | [diff] [blame] | 912 | u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, |
| 913 | u8 opcode); |
Keith Busch | ae5e688 | 2021-06-10 14:44:37 -0700 | [diff] [blame] | 914 | int nvme_execute_passthru_rq(struct request *rq); |
Chaitanya Kulkarni | b2702aa | 2020-09-16 18:11:02 -0700 | [diff] [blame] | 915 | struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); |
Logan Gunthorpe | 24493b8 | 2020-07-24 11:25:16 -0600 | [diff] [blame] | 916 | struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); |
| 917 | void nvme_put_ns(struct nvme_ns *ns); |
Logan Gunthorpe | df21b6b | 2020-07-24 11:25:13 -0600 | [diff] [blame] | 918 | |
Adam Manzanares | 43dc987 | 2021-08-26 21:15:45 +0000 | [diff] [blame] | 919 | static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) |
| 920 | { |
| 921 | return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; |
| 922 | } |
| 923 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 924 | #endif /* _NVME_H */ |