Christoph Hellwig | 77141dc | 2019-02-18 11:36:11 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _NVMET_H |
| 7 | #define _NVMET_H |
| 8 | |
| 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/device.h> |
| 12 | #include <linux/kref.h> |
| 13 | #include <linux/percpu-refcount.h> |
| 14 | #include <linux/list.h> |
| 15 | #include <linux/mutex.h> |
Christoph Hellwig | 8e41226 | 2017-05-17 09:54:27 +0200 | [diff] [blame] | 16 | #include <linux/uuid.h> |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 17 | #include <linux/nvme.h> |
| 18 | #include <linux/configfs.h> |
| 19 | #include <linux/rcupdate.h> |
| 20 | #include <linux/blkdev.h> |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 21 | #include <linux/radix-tree.h> |
Israel Rukshin | d2d1c45 | 2020-05-19 17:05:57 +0300 | [diff] [blame] | 22 | #include <linux/t10-pi.h> |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 23 | |
Logan Gunthorpe | ba76af6 | 2020-07-24 11:25:18 -0600 | [diff] [blame] | 24 | #define NVMET_DEFAULT_VS NVME_VS(1, 3, 0) |
| 25 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 26 | #define NVMET_ASYNC_EVENTS 4 |
| 27 | #define NVMET_ERROR_LOG_SLOTS 128 |
Chaitanya Kulkarni | 5698b80 | 2018-12-17 18:35:29 -0800 | [diff] [blame] | 28 | #define NVMET_NO_ERROR_LOC ((u16)-1) |
Mark Ruijter | 013b7eb | 2020-01-30 10:29:32 -0800 | [diff] [blame] | 29 | #define NVMET_DEFAULT_CTRL_MODEL "Linux" |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 30 | |
Christoph Hellwig | c86b8f7 | 2018-05-30 15:04:47 +0200 | [diff] [blame] | 31 | /* |
| 32 | * Supported optional AENs: |
| 33 | */ |
| 34 | #define NVMET_AEN_CFG_OPTIONAL \ |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 35 | (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE) |
Jay Sternberg | f301c2b | 2018-11-12 13:56:37 -0800 | [diff] [blame] | 36 | #define NVMET_DISC_AEN_CFG_OPTIONAL \ |
| 37 | (NVME_AEN_CFG_DISC_CHANGE) |
Christoph Hellwig | c86b8f7 | 2018-05-30 15:04:47 +0200 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * Plus mandatory SMART AENs (we'll never send them, but allow enabling them): |
| 41 | */ |
| 42 | #define NVMET_AEN_CFG_ALL \ |
| 43 | (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \ |
| 44 | NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \ |
| 45 | NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL) |
| 46 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 47 | /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM |
| 48 | * The 16 bit shift is to set IATTR bit to 1, which means offending |
| 49 | * offset starts in the data section of connect() |
| 50 | */ |
| 51 | #define IPO_IATTR_CONNECT_DATA(x) \ |
| 52 | (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x)))) |
| 53 | #define IPO_IATTR_CONNECT_SQE(x) \ |
| 54 | (cpu_to_le32(offsetof(struct nvmf_connect_command, x))) |
| 55 | |
| 56 | struct nvmet_ns { |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 57 | struct percpu_ref ref; |
| 58 | struct block_device *bdev; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 59 | struct file *file; |
Chaitanya Kulkarni | dedf0be | 2018-08-07 23:01:07 -0700 | [diff] [blame] | 60 | bool readonly; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 61 | u32 nsid; |
| 62 | u32 blksize_shift; |
| 63 | loff_t size; |
| 64 | u8 nguid[16]; |
Johannes Thumshirn | 637dc0f | 2017-06-07 11:45:32 +0200 | [diff] [blame] | 65 | uuid_t uuid; |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 66 | u32 anagrpid; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 67 | |
Chaitanya Kulkarni | 55eb942e | 2018-06-20 00:01:41 -0400 | [diff] [blame] | 68 | bool buffered_io; |
Solganik Alexander | e4fcf07 | 2016-10-30 10:35:15 +0200 | [diff] [blame] | 69 | bool enabled; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 70 | struct nvmet_subsys *subsys; |
| 71 | const char *device_path; |
| 72 | |
| 73 | struct config_group device_group; |
| 74 | struct config_group group; |
| 75 | |
| 76 | struct completion disable_done; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 77 | mempool_t *bvec_pool; |
| 78 | struct kmem_cache *bvec_cache; |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 79 | |
| 80 | int use_p2pmem; |
| 81 | struct pci_dev *p2p_dev; |
Israel Rukshin | d2d1c45 | 2020-05-19 17:05:57 +0300 | [diff] [blame] | 82 | int pi_type; |
| 83 | int metadata_size; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 84 | }; |
| 85 | |
| 86 | static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) |
| 87 | { |
| 88 | return container_of(to_config_group(item), struct nvmet_ns, group); |
| 89 | } |
| 90 | |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 91 | static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns) |
| 92 | { |
| 93 | return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL; |
| 94 | } |
| 95 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 96 | struct nvmet_cq { |
| 97 | u16 qid; |
| 98 | u16 size; |
| 99 | }; |
| 100 | |
| 101 | struct nvmet_sq { |
| 102 | struct nvmet_ctrl *ctrl; |
| 103 | struct percpu_ref ref; |
| 104 | u16 qid; |
| 105 | u16 size; |
James Smart | f9cf2a6 | 2017-10-18 14:33:59 -0700 | [diff] [blame] | 106 | u32 sqhd; |
Sagi Grimberg | e6a622f | 2018-11-19 14:11:12 -0800 | [diff] [blame] | 107 | bool sqhd_disabled; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 108 | struct completion free_done; |
Sagi Grimberg | 427242c | 2017-03-06 18:46:20 +0200 | [diff] [blame] | 109 | struct completion confirm_done; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 110 | }; |
| 111 | |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 112 | struct nvmet_ana_group { |
| 113 | struct config_group group; |
| 114 | struct nvmet_port *port; |
| 115 | u32 grpid; |
| 116 | }; |
| 117 | |
| 118 | static inline struct nvmet_ana_group *to_ana_group(struct config_item *item) |
| 119 | { |
| 120 | return container_of(to_config_group(item), struct nvmet_ana_group, |
| 121 | group); |
| 122 | } |
| 123 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 124 | /** |
| 125 | * struct nvmet_port - Common structure to keep port |
| 126 | * information for the target. |
Christoph Hellwig | fe4a979 | 2018-05-26 14:11:25 +0200 | [diff] [blame] | 127 | * @entry: Entry into referrals or transport list. |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 128 | * @disc_addr: Address information is stored in a format defined |
| 129 | * for a discovery log page entry. |
| 130 | * @group: ConfigFS group for this element's folder. |
| 131 | * @priv: Private data for the transport. |
| 132 | */ |
| 133 | struct nvmet_port { |
| 134 | struct list_head entry; |
| 135 | struct nvmf_disc_rsp_page_entry disc_addr; |
| 136 | struct config_group group; |
| 137 | struct config_group subsys_group; |
| 138 | struct list_head subsystems; |
| 139 | struct config_group referrals_group; |
| 140 | struct list_head referrals; |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 141 | struct list_head global_entry; |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 142 | struct config_group ana_groups_group; |
| 143 | struct nvmet_ana_group ana_default_group; |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 144 | enum nvme_ana_state *ana_state; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 145 | void *priv; |
| 146 | bool enabled; |
Steve Wise | 0d5ee2b | 2018-06-20 07:15:10 -0700 | [diff] [blame] | 147 | int inline_data_size; |
James Smart | 9d09dd8 | 2019-05-14 14:58:02 -0700 | [diff] [blame] | 148 | const struct nvmet_fabrics_ops *tr_ops; |
Israel Rukshin | ea52ac1 | 2020-05-19 17:06:01 +0300 | [diff] [blame] | 149 | bool pi_enable; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 150 | }; |
| 151 | |
| 152 | static inline struct nvmet_port *to_nvmet_port(struct config_item *item) |
| 153 | { |
| 154 | return container_of(to_config_group(item), struct nvmet_port, |
| 155 | group); |
| 156 | } |
| 157 | |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 158 | static inline struct nvmet_port *ana_groups_to_port( |
| 159 | struct config_item *item) |
| 160 | { |
| 161 | return container_of(to_config_group(item), struct nvmet_port, |
| 162 | ana_groups_group); |
| 163 | } |
| 164 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 165 | struct nvmet_ctrl { |
| 166 | struct nvmet_subsys *subsys; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 167 | struct nvmet_sq **sqs; |
| 168 | |
Sagi Grimberg | c09305a | 2018-11-02 10:28:13 -0700 | [diff] [blame] | 169 | bool cmd_seen; |
| 170 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 171 | struct mutex lock; |
| 172 | u64 cap; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 173 | u32 cc; |
| 174 | u32 csts; |
| 175 | |
Omri Mann | 28dd5cf | 2017-08-30 15:22:59 +0300 | [diff] [blame] | 176 | uuid_t hostid; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 177 | u16 cntlid; |
| 178 | u32 kato; |
| 179 | |
Christoph Hellwig | 4ee4328 | 2018-06-07 15:09:50 +0200 | [diff] [blame] | 180 | struct nvmet_port *port; |
| 181 | |
Christoph Hellwig | c86b8f7 | 2018-05-30 15:04:47 +0200 | [diff] [blame] | 182 | u32 aen_enabled; |
Christoph Hellwig | 55fdd6b | 2018-05-30 15:05:09 +0200 | [diff] [blame] | 183 | unsigned long aen_masked; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 184 | struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS]; |
| 185 | unsigned int nr_async_event_cmds; |
| 186 | struct list_head async_events; |
| 187 | struct work_struct async_event_work; |
| 188 | |
| 189 | struct list_head subsys_entry; |
| 190 | struct kref ref; |
| 191 | struct delayed_work ka_work; |
| 192 | struct work_struct fatal_err_work; |
| 193 | |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 194 | const struct nvmet_fabrics_ops *ops; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 195 | |
Christoph Hellwig | c16734e | 2018-05-25 17:16:09 +0200 | [diff] [blame] | 196 | __le32 *changed_ns_list; |
| 197 | u32 nr_changed_ns; |
| 198 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 199 | char subsysnqn[NVMF_NQN_FIELD_LEN]; |
| 200 | char hostnqn[NVMF_NQN_FIELD_LEN]; |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 201 | |
Chaitanya Kulkarni | 5a3a6d6 | 2018-11-19 15:16:39 -0800 | [diff] [blame] | 202 | struct device *p2p_client; |
| 203 | struct radix_tree_root p2p_ns_map; |
Chaitanya Kulkarni | e4a9762 | 2018-12-12 15:11:39 -0800 | [diff] [blame] | 204 | |
| 205 | spinlock_t error_lock; |
| 206 | u64 err_counter; |
| 207 | struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS]; |
Israel Rukshin | ea52ac1 | 2020-05-19 17:06:01 +0300 | [diff] [blame] | 208 | bool pi_support; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 209 | }; |
| 210 | |
Mark Ruijter | 013b7eb | 2020-01-30 10:29:32 -0800 | [diff] [blame] | 211 | struct nvmet_subsys_model { |
| 212 | struct rcu_head rcuhead; |
| 213 | char number[]; |
| 214 | }; |
| 215 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 216 | struct nvmet_subsys { |
| 217 | enum nvme_subsys_type type; |
| 218 | |
| 219 | struct mutex lock; |
| 220 | struct kref ref; |
| 221 | |
Chaitanya Kulkarni | 7774e77 | 2020-07-19 20:32:02 -0700 | [diff] [blame] | 222 | struct xarray namespaces; |
Christoph Hellwig | 793c7cf | 2018-05-13 19:00:13 +0200 | [diff] [blame] | 223 | unsigned int nr_namespaces; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 224 | unsigned int max_nsid; |
Chaitanya Kulkarni | 94a39d6 | 2020-01-30 10:29:31 -0800 | [diff] [blame] | 225 | u16 cntlid_min; |
| 226 | u16 cntlid_max; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 227 | |
| 228 | struct list_head ctrls; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 229 | |
| 230 | struct list_head hosts; |
| 231 | bool allow_any_host; |
| 232 | |
| 233 | u16 max_qid; |
| 234 | |
| 235 | u64 ver; |
Johannes Thumshirn | 2e7f5d2 | 2017-07-14 15:36:55 +0200 | [diff] [blame] | 236 | u64 serial; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 237 | char *subsysnqn; |
Israel Rukshin | ea52ac1 | 2020-05-19 17:06:01 +0300 | [diff] [blame] | 238 | bool pi_support; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 239 | |
| 240 | struct config_group group; |
| 241 | |
| 242 | struct config_group namespaces_group; |
| 243 | struct config_group allowed_hosts_group; |
Mark Ruijter | 013b7eb | 2020-01-30 10:29:32 -0800 | [diff] [blame] | 244 | |
| 245 | struct nvmet_subsys_model __rcu *model; |
Logan Gunthorpe | c1fef73f | 2020-07-24 11:25:17 -0600 | [diff] [blame] | 246 | |
| 247 | #ifdef CONFIG_NVME_TARGET_PASSTHRU |
| 248 | struct nvme_ctrl *passthru_ctrl; |
Logan Gunthorpe | ba76af6 | 2020-07-24 11:25:18 -0600 | [diff] [blame] | 249 | char *passthru_ctrl_path; |
Logan Gunthorpe | cae5b01 | 2020-07-24 11:25:19 -0600 | [diff] [blame] | 250 | struct config_group passthru_group; |
Chaitanya Kulkarni | a2f6a2b | 2020-11-09 16:33:43 -0800 | [diff] [blame] | 251 | unsigned int admin_timeout; |
Chaitanya Kulkarni | 47e9730 | 2020-11-09 16:33:44 -0800 | [diff] [blame] | 252 | unsigned int io_timeout; |
Logan Gunthorpe | c1fef73f | 2020-07-24 11:25:17 -0600 | [diff] [blame] | 253 | #endif /* CONFIG_NVME_TARGET_PASSTHRU */ |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 254 | }; |
| 255 | |
| 256 | static inline struct nvmet_subsys *to_subsys(struct config_item *item) |
| 257 | { |
| 258 | return container_of(to_config_group(item), struct nvmet_subsys, group); |
| 259 | } |
| 260 | |
| 261 | static inline struct nvmet_subsys *namespaces_to_subsys( |
| 262 | struct config_item *item) |
| 263 | { |
| 264 | return container_of(to_config_group(item), struct nvmet_subsys, |
| 265 | namespaces_group); |
| 266 | } |
| 267 | |
| 268 | struct nvmet_host { |
| 269 | struct config_group group; |
| 270 | }; |
| 271 | |
| 272 | static inline struct nvmet_host *to_host(struct config_item *item) |
| 273 | { |
| 274 | return container_of(to_config_group(item), struct nvmet_host, group); |
| 275 | } |
| 276 | |
| 277 | static inline char *nvmet_host_name(struct nvmet_host *host) |
| 278 | { |
| 279 | return config_item_name(&host->group.cg_item); |
| 280 | } |
| 281 | |
| 282 | struct nvmet_host_link { |
| 283 | struct list_head entry; |
| 284 | struct nvmet_host *host; |
| 285 | }; |
| 286 | |
| 287 | struct nvmet_subsys_link { |
| 288 | struct list_head entry; |
| 289 | struct nvmet_subsys *subsys; |
| 290 | }; |
| 291 | |
| 292 | struct nvmet_req; |
| 293 | struct nvmet_fabrics_ops { |
| 294 | struct module *owner; |
| 295 | unsigned int type; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 296 | unsigned int msdbd; |
Max Gurtovoy | 6fa350f | 2020-06-02 16:15:46 +0300 | [diff] [blame] | 297 | unsigned int flags; |
| 298 | #define NVMF_KEYED_SGLS (1 << 0) |
| 299 | #define NVMF_METADATA_SUPPORTED (1 << 1) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 300 | void (*queue_response)(struct nvmet_req *req); |
| 301 | int (*add_port)(struct nvmet_port *port); |
| 302 | void (*remove_port)(struct nvmet_port *port); |
| 303 | void (*delete_ctrl)(struct nvmet_ctrl *ctrl); |
Sagi Grimberg | 4c65268 | 2018-01-24 20:27:10 +0200 | [diff] [blame] | 304 | void (*disc_traddr)(struct nvmet_req *req, |
| 305 | struct nvmet_port *port, char *traddr); |
Sagi Grimberg | 1672ddb | 2018-12-03 17:52:11 -0800 | [diff] [blame] | 306 | u16 (*install_queue)(struct nvmet_sq *nvme_sq); |
James Smart | 9d09dd8 | 2019-05-14 14:58:02 -0700 | [diff] [blame] | 307 | void (*discovery_chg)(struct nvmet_port *port); |
Max Gurtovoy | 02cb00e | 2020-03-08 12:55:03 +0200 | [diff] [blame] | 308 | u8 (*get_mdts)(const struct nvmet_ctrl *ctrl); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 309 | }; |
| 310 | |
| 311 | #define NVMET_MAX_INLINE_BIOVEC 8 |
Sagi Grimberg | 73383ad | 2018-09-28 15:40:43 -0700 | [diff] [blame] | 312 | #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 313 | |
| 314 | struct nvmet_req { |
| 315 | struct nvme_command *cmd; |
Max Gurtovoy | fc6c973 | 2019-04-08 18:39:59 +0300 | [diff] [blame] | 316 | struct nvme_completion *cqe; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 317 | struct nvmet_sq *sq; |
| 318 | struct nvmet_cq *cq; |
| 319 | struct nvmet_ns *ns; |
| 320 | struct scatterlist *sg; |
Israel Rukshin | c6e3f13 | 2020-05-19 17:06:02 +0300 | [diff] [blame] | 321 | struct scatterlist *metadata_sg; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 322 | struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC]; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 323 | union { |
| 324 | struct { |
| 325 | struct bio inline_bio; |
| 326 | } b; |
| 327 | struct { |
| 328 | bool mpool_alloc; |
| 329 | struct kiocb iocb; |
| 330 | struct bio_vec *bvec; |
| 331 | struct work_struct work; |
| 332 | } f; |
Logan Gunthorpe | c1fef73f | 2020-07-24 11:25:17 -0600 | [diff] [blame] | 333 | struct { |
Chaitanya Kulkarni | dab3902 | 2020-11-09 18:24:05 -0800 | [diff] [blame] | 334 | struct bio inline_bio; |
Logan Gunthorpe | c1fef73f | 2020-07-24 11:25:17 -0600 | [diff] [blame] | 335 | struct request *rq; |
| 336 | struct work_struct work; |
| 337 | bool use_workqueue; |
| 338 | } p; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 339 | }; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 340 | int sg_cnt; |
Israel Rukshin | c6e3f13 | 2020-05-19 17:06:02 +0300 | [diff] [blame] | 341 | int metadata_sg_cnt; |
Christoph Hellwig | 5e62d5c | 2017-11-09 14:29:58 +0100 | [diff] [blame] | 342 | /* data length as parsed from the SGL descriptor: */ |
| 343 | size_t transfer_len; |
Israel Rukshin | c6e3f13 | 2020-05-19 17:06:02 +0300 | [diff] [blame] | 344 | size_t metadata_len; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 345 | |
| 346 | struct nvmet_port *port; |
| 347 | |
| 348 | void (*execute)(struct nvmet_req *req); |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 349 | const struct nvmet_fabrics_ops *ops; |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 350 | |
Chaitanya Kulkarni | 5a3a6d6 | 2018-11-19 15:16:39 -0800 | [diff] [blame] | 351 | struct pci_dev *p2p_dev; |
| 352 | struct device *p2p_client; |
Chaitanya Kulkarni | e4a9762 | 2018-12-12 15:11:39 -0800 | [diff] [blame] | 353 | u16 error_loc; |
| 354 | u64 error_slba; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 355 | }; |
| 356 | |
Chaitanya Kulkarni | 55eb942e | 2018-06-20 00:01:41 -0400 | [diff] [blame] | 357 | extern struct workqueue_struct *buffered_io_wq; |
| 358 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 359 | static inline void nvmet_set_result(struct nvmet_req *req, u32 result) |
| 360 | { |
Max Gurtovoy | fc6c973 | 2019-04-08 18:39:59 +0300 | [diff] [blame] | 361 | req->cqe->result.u32 = cpu_to_le32(result); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 362 | } |
| 363 | |
| 364 | /* |
| 365 | * NVMe command writes actually are DMA reads for us on the target side. |
| 366 | */ |
| 367 | static inline enum dma_data_direction |
| 368 | nvmet_data_dir(struct nvmet_req *req) |
| 369 | { |
| 370 | return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
| 371 | } |
| 372 | |
| 373 | struct nvmet_async_event { |
| 374 | struct list_head entry; |
| 375 | u8 event_type; |
| 376 | u8 event_info; |
| 377 | u8 log_page; |
| 378 | }; |
| 379 | |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 380 | static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn) |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 381 | { |
Chaitanya Kulkarni | b7c8f36 | 2018-12-12 15:11:37 -0800 | [diff] [blame] | 382 | int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15; |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 383 | |
| 384 | if (!rae) |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 385 | clear_bit(bn, &req->sq->ctrl->aen_masked); |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 386 | } |
| 387 | |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 388 | static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn) |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 389 | { |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 390 | if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn))) |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 391 | return true; |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 392 | return test_and_set_bit(bn, &ctrl->aen_masked); |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 393 | } |
| 394 | |
Jay Sternberg | 9010745 | 2018-11-12 13:56:36 -0800 | [diff] [blame] | 395 | void nvmet_get_feat_kato(struct nvmet_req *req); |
| 396 | void nvmet_get_feat_async_event(struct nvmet_req *req); |
| 397 | u16 nvmet_set_feat_kato(struct nvmet_req *req); |
| 398 | u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask); |
| 399 | void nvmet_execute_async_event(struct nvmet_req *req); |
Amit Engel | 4e683c4 | 2020-09-16 20:47:20 +0300 | [diff] [blame] | 400 | void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl); |
| 401 | void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl); |
Jay Sternberg | 9010745 | 2018-11-12 13:56:36 -0800 | [diff] [blame] | 402 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 403 | u16 nvmet_parse_connect_cmd(struct nvmet_req *req); |
Bart Van Assche | 9d05a96 | 2019-06-28 09:53:30 -0700 | [diff] [blame] | 404 | void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 405 | u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); |
| 406 | u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 407 | u16 nvmet_parse_admin_cmd(struct nvmet_req *req); |
| 408 | u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); |
| 409 | u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 410 | |
| 411 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 412 | struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); |
Vijay Immanuel | 549f01a | 2017-05-08 16:38:35 -0700 | [diff] [blame] | 413 | void nvmet_req_uninit(struct nvmet_req *req); |
Israel Rukshin | 136cc1f | 2020-05-19 17:05:59 +0300 | [diff] [blame] | 414 | bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len); |
Sagi Grimberg | b716e68 | 2020-01-26 23:23:28 -0800 | [diff] [blame] | 415 | bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 416 | void nvmet_req_complete(struct nvmet_req *req, u16 status); |
Israel Rukshin | c6e3f13 | 2020-05-19 17:06:02 +0300 | [diff] [blame] | 417 | int nvmet_req_alloc_sgls(struct nvmet_req *req); |
| 418 | void nvmet_req_free_sgls(struct nvmet_req *req); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 419 | |
Logan Gunthorpe | c1fef73f | 2020-07-24 11:25:17 -0600 | [diff] [blame] | 420 | void nvmet_execute_set_features(struct nvmet_req *req); |
| 421 | void nvmet_execute_get_features(struct nvmet_req *req); |
Jay Sternberg | f9362ac | 2018-11-12 13:56:35 -0800 | [diff] [blame] | 422 | void nvmet_execute_keep_alive(struct nvmet_req *req); |
| 423 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 424 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, |
| 425 | u16 size); |
| 426 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, |
| 427 | u16 size); |
| 428 | void nvmet_sq_destroy(struct nvmet_sq *sq); |
| 429 | int nvmet_sq_init(struct nvmet_sq *sq); |
| 430 | |
| 431 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); |
| 432 | |
| 433 | void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); |
| 434 | u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, |
| 435 | struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); |
| 436 | u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, |
| 437 | struct nvmet_req *req, struct nvmet_ctrl **ret); |
| 438 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 439 | u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 440 | |
| 441 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, |
| 442 | enum nvme_subsys_type type); |
| 443 | void nvmet_subsys_put(struct nvmet_subsys *subsys); |
Sagi Grimberg | 344770b | 2016-11-27 22:29:17 +0200 | [diff] [blame] | 444 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 445 | |
| 446 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); |
| 447 | void nvmet_put_namespace(struct nvmet_ns *ns); |
| 448 | int nvmet_ns_enable(struct nvmet_ns *ns); |
| 449 | void nvmet_ns_disable(struct nvmet_ns *ns); |
| 450 | struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid); |
| 451 | void nvmet_ns_free(struct nvmet_ns *ns); |
| 452 | |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 453 | void nvmet_send_ana_event(struct nvmet_subsys *subsys, |
| 454 | struct nvmet_port *port); |
| 455 | void nvmet_port_send_ana_event(struct nvmet_port *port); |
| 456 | |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 457 | int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); |
| 458 | void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 459 | |
Logan Gunthorpe | 3aed867 | 2019-07-31 17:35:31 -0600 | [diff] [blame] | 460 | void nvmet_port_del_ctrls(struct nvmet_port *port, |
| 461 | struct nvmet_subsys *subsys); |
| 462 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 463 | int nvmet_enable_port(struct nvmet_port *port); |
| 464 | void nvmet_disable_port(struct nvmet_port *port); |
| 465 | |
| 466 | void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 467 | void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 468 | |
| 469 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, |
| 470 | size_t len); |
| 471 | u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, |
| 472 | size_t len); |
Christoph Hellwig | c7759ff | 2018-05-22 11:10:02 +0200 | [diff] [blame] | 473 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 474 | |
| 475 | u32 nvmet_get_log_page_len(struct nvme_command *cmd); |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 476 | u64 nvmet_get_log_page_offset(struct nvme_command *cmd); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 477 | |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 478 | extern struct list_head *nvmet_ports; |
| 479 | void nvmet_port_disc_changed(struct nvmet_port *port, |
| 480 | struct nvmet_subsys *subsys); |
| 481 | void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, |
| 482 | struct nvmet_host *host); |
| 483 | void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, |
| 484 | u8 event_info, u8 log_page); |
| 485 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 486 | #define NVMET_QUEUE_SIZE 1024 |
James Smart | 3375e29 | 2017-09-11 16:14:50 -0700 | [diff] [blame] | 487 | #define NVMET_NR_QUEUES 128 |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 488 | #define NVMET_MAX_CMD NVMET_QUEUE_SIZE |
Christoph Hellwig | 793c7cf | 2018-05-13 19:00:13 +0200 | [diff] [blame] | 489 | |
| 490 | /* |
| 491 | * Nice round number that makes a list of nsids fit into a page. |
| 492 | * Should become tunable at some point in the future. |
| 493 | */ |
| 494 | #define NVMET_MAX_NAMESPACES 1024 |
| 495 | |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 496 | /* |
| 497 | * 0 is not a valid ANA group ID, so we start numbering at 1. |
| 498 | * |
| 499 | * ANA Group 1 exists without manual intervention, has namespaces assigned to it |
| 500 | * by default, and is available in an optimized state through all ports. |
| 501 | */ |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 502 | #define NVMET_MAX_ANAGRPS 128 |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 503 | #define NVMET_DEFAULT_ANA_GRPID 1 |
| 504 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 505 | #define NVMET_KAS 10 |
Jay Sternberg | f9362ac | 2018-11-12 13:56:35 -0800 | [diff] [blame] | 506 | #define NVMET_DISC_KATO_MS 120000 |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 507 | |
| 508 | int __init nvmet_init_configfs(void); |
| 509 | void __exit nvmet_exit_configfs(void); |
| 510 | |
| 511 | int __init nvmet_init_discovery(void); |
| 512 | void nvmet_exit_discovery(void); |
| 513 | |
| 514 | extern struct nvmet_subsys *nvmet_disc_subsys; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 515 | extern struct rw_semaphore nvmet_config_sem; |
| 516 | |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 517 | extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; |
| 518 | extern u64 nvmet_ana_chgcnt; |
| 519 | extern struct rw_semaphore nvmet_ana_sem; |
| 520 | |
Sagi Grimberg | 253928e | 2018-11-12 13:56:39 -0800 | [diff] [blame] | 521 | bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 522 | |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 523 | int nvmet_bdev_ns_enable(struct nvmet_ns *ns); |
| 524 | int nvmet_file_ns_enable(struct nvmet_ns *ns); |
| 525 | void nvmet_bdev_ns_disable(struct nvmet_ns *ns); |
| 526 | void nvmet_file_ns_disable(struct nvmet_ns *ns); |
Chaitanya Kulkarni | dedf0be | 2018-08-07 23:01:07 -0700 | [diff] [blame] | 527 | u16 nvmet_bdev_flush(struct nvmet_req *req); |
| 528 | u16 nvmet_file_flush(struct nvmet_req *req); |
| 529 | void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); |
Anthony Iliopoulos | e8cd1ff | 2020-04-19 16:48:50 -0700 | [diff] [blame] | 530 | void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns); |
| 531 | int nvmet_file_ns_revalidate(struct nvmet_ns *ns); |
Chaitanya Kulkarni | 463c5fa | 2020-05-19 01:06:27 -0700 | [diff] [blame] | 532 | void nvmet_ns_revalidate(struct nvmet_ns *ns); |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 533 | |
Israel Rukshin | 26af180 | 2020-05-19 17:05:58 +0300 | [diff] [blame] | 534 | static inline u32 nvmet_rw_data_len(struct nvmet_req *req) |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 535 | { |
| 536 | return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << |
| 537 | req->ns->blksize_shift; |
| 538 | } |
Chaitanya Kulkarni | c6aa354 | 2018-12-12 15:11:43 -0800 | [diff] [blame] | 539 | |
Israel Rukshin | ea52ac1 | 2020-05-19 17:06:01 +0300 | [diff] [blame] | 540 | static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req) |
| 541 | { |
| 542 | if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) |
| 543 | return 0; |
| 544 | return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) * |
| 545 | req->ns->metadata_size; |
| 546 | } |
| 547 | |
Christoph Hellwig | 59ef0ea | 2019-10-23 10:35:43 -0600 | [diff] [blame] | 548 | static inline u32 nvmet_dsm_len(struct nvmet_req *req) |
| 549 | { |
| 550 | return (le32_to_cpu(req->cmd->dsm.nr) + 1) * |
| 551 | sizeof(struct nvme_dsm_range); |
| 552 | } |
| 553 | |
Logan Gunthorpe | c1fef73f | 2020-07-24 11:25:17 -0600 | [diff] [blame] | 554 | #ifdef CONFIG_NVME_TARGET_PASSTHRU |
Logan Gunthorpe | ba76af6 | 2020-07-24 11:25:18 -0600 | [diff] [blame] | 555 | void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); |
| 556 | int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); |
| 557 | void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys); |
Logan Gunthorpe | c1fef73f | 2020-07-24 11:25:17 -0600 | [diff] [blame] | 558 | u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req); |
| 559 | u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req); |
| 560 | static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys) |
| 561 | { |
| 562 | return subsys->passthru_ctrl; |
| 563 | } |
| 564 | #else /* CONFIG_NVME_TARGET_PASSTHRU */ |
Logan Gunthorpe | ba76af6 | 2020-07-24 11:25:18 -0600 | [diff] [blame] | 565 | static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys) |
| 566 | { |
| 567 | } |
| 568 | static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) |
| 569 | { |
| 570 | } |
Logan Gunthorpe | c1fef73f | 2020-07-24 11:25:17 -0600 | [diff] [blame] | 571 | static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) |
| 572 | { |
| 573 | return 0; |
| 574 | } |
| 575 | static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) |
| 576 | { |
| 577 | return 0; |
| 578 | } |
| 579 | static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys) |
| 580 | { |
| 581 | return NULL; |
| 582 | } |
| 583 | #endif /* CONFIG_NVME_TARGET_PASSTHRU */ |
| 584 | |
| 585 | static inline struct nvme_ctrl * |
| 586 | nvmet_req_passthru_ctrl(struct nvmet_req *req) |
| 587 | { |
| 588 | return nvmet_passthru_ctrl(req->sq->ctrl->subsys); |
| 589 | } |
| 590 | |
Chaitanya Kulkarni | c6aa354 | 2018-12-12 15:11:43 -0800 | [diff] [blame] | 591 | u16 errno_to_nvme_status(struct nvmet_req *req, int errno); |
Bart Van Assche | 9d05a96 | 2019-06-28 09:53:30 -0700 | [diff] [blame] | 592 | |
| 593 | /* Convert a 32-bit number to a 16-bit 0's based number */ |
| 594 | static inline __le16 to0based(u32 a) |
| 595 | { |
| 596 | return cpu_to_le16(max(1U, min(1U << 16, a)) - 1); |
| 597 | } |
| 598 | |
Israel Rukshin | ea52ac1 | 2020-05-19 17:06:01 +0300 | [diff] [blame] | 599 | static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns) |
| 600 | { |
| 601 | if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) |
| 602 | return false; |
| 603 | return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple); |
| 604 | } |
| 605 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 606 | #endif /* _NVMET_H */ |