Christoph Hellwig | 77141dc | 2019-02-18 11:36:11 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _NVMET_H |
| 7 | #define _NVMET_H |
| 8 | |
| 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/device.h> |
| 12 | #include <linux/kref.h> |
| 13 | #include <linux/percpu-refcount.h> |
| 14 | #include <linux/list.h> |
| 15 | #include <linux/mutex.h> |
Christoph Hellwig | 8e41226 | 2017-05-17 09:54:27 +0200 | [diff] [blame] | 16 | #include <linux/uuid.h> |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 17 | #include <linux/nvme.h> |
| 18 | #include <linux/configfs.h> |
| 19 | #include <linux/rcupdate.h> |
| 20 | #include <linux/blkdev.h> |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 21 | #include <linux/radix-tree.h> |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 22 | |
| 23 | #define NVMET_ASYNC_EVENTS 4 |
| 24 | #define NVMET_ERROR_LOG_SLOTS 128 |
Chaitanya Kulkarni | 5698b80 | 2018-12-17 18:35:29 -0800 | [diff] [blame] | 25 | #define NVMET_NO_ERROR_LOC ((u16)-1) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 26 | |
Christoph Hellwig | c86b8f7 | 2018-05-30 15:04:47 +0200 | [diff] [blame] | 27 | /* |
| 28 | * Supported optional AENs: |
| 29 | */ |
| 30 | #define NVMET_AEN_CFG_OPTIONAL \ |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 31 | (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE) |
Jay Sternberg | f301c2b | 2018-11-12 13:56:37 -0800 | [diff] [blame] | 32 | #define NVMET_DISC_AEN_CFG_OPTIONAL \ |
| 33 | (NVME_AEN_CFG_DISC_CHANGE) |
Christoph Hellwig | c86b8f7 | 2018-05-30 15:04:47 +0200 | [diff] [blame] | 34 | |
| 35 | /* |
| 36 | * Plus mandatory SMART AENs (we'll never send them, but allow enabling them): |
| 37 | */ |
| 38 | #define NVMET_AEN_CFG_ALL \ |
| 39 | (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \ |
| 40 | NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \ |
| 41 | NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL) |
| 42 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 43 | /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM |
| 44 | * The 16 bit shift is to set IATTR bit to 1, which means offending |
| 45 | * offset starts in the data section of connect() |
| 46 | */ |
| 47 | #define IPO_IATTR_CONNECT_DATA(x) \ |
| 48 | (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x)))) |
| 49 | #define IPO_IATTR_CONNECT_SQE(x) \ |
| 50 | (cpu_to_le32(offsetof(struct nvmf_connect_command, x))) |
| 51 | |
| 52 | struct nvmet_ns { |
| 53 | struct list_head dev_link; |
| 54 | struct percpu_ref ref; |
| 55 | struct block_device *bdev; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 56 | struct file *file; |
Chaitanya Kulkarni | dedf0be | 2018-08-07 23:01:07 -0700 | [diff] [blame] | 57 | bool readonly; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 58 | u32 nsid; |
| 59 | u32 blksize_shift; |
| 60 | loff_t size; |
| 61 | u8 nguid[16]; |
Johannes Thumshirn | 637dc0f | 2017-06-07 11:45:32 +0200 | [diff] [blame] | 62 | uuid_t uuid; |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 63 | u32 anagrpid; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 64 | |
Chaitanya Kulkarni | 55eb942e | 2018-06-20 00:01:41 -0400 | [diff] [blame] | 65 | bool buffered_io; |
Solganik Alexander | e4fcf07 | 2016-10-30 10:35:15 +0200 | [diff] [blame] | 66 | bool enabled; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 67 | struct nvmet_subsys *subsys; |
| 68 | const char *device_path; |
| 69 | |
| 70 | struct config_group device_group; |
| 71 | struct config_group group; |
| 72 | |
| 73 | struct completion disable_done; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 74 | mempool_t *bvec_pool; |
| 75 | struct kmem_cache *bvec_cache; |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 76 | |
| 77 | int use_p2pmem; |
| 78 | struct pci_dev *p2p_dev; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 79 | }; |
| 80 | |
| 81 | static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) |
| 82 | { |
| 83 | return container_of(to_config_group(item), struct nvmet_ns, group); |
| 84 | } |
| 85 | |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 86 | static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns) |
| 87 | { |
| 88 | return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL; |
| 89 | } |
| 90 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 91 | struct nvmet_cq { |
| 92 | u16 qid; |
| 93 | u16 size; |
| 94 | }; |
| 95 | |
| 96 | struct nvmet_sq { |
| 97 | struct nvmet_ctrl *ctrl; |
| 98 | struct percpu_ref ref; |
| 99 | u16 qid; |
| 100 | u16 size; |
James Smart | f9cf2a6 | 2017-10-18 14:33:59 -0700 | [diff] [blame] | 101 | u32 sqhd; |
Sagi Grimberg | e6a622f | 2018-11-19 14:11:12 -0800 | [diff] [blame] | 102 | bool sqhd_disabled; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 103 | struct completion free_done; |
Sagi Grimberg | 427242c | 2017-03-06 18:46:20 +0200 | [diff] [blame] | 104 | struct completion confirm_done; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 105 | }; |
| 106 | |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 107 | struct nvmet_ana_group { |
| 108 | struct config_group group; |
| 109 | struct nvmet_port *port; |
| 110 | u32 grpid; |
| 111 | }; |
| 112 | |
| 113 | static inline struct nvmet_ana_group *to_ana_group(struct config_item *item) |
| 114 | { |
| 115 | return container_of(to_config_group(item), struct nvmet_ana_group, |
| 116 | group); |
| 117 | } |
| 118 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 119 | /** |
| 120 | * struct nvmet_port - Common structure to keep port |
| 121 | * information for the target. |
Christoph Hellwig | fe4a979 | 2018-05-26 14:11:25 +0200 | [diff] [blame] | 122 | * @entry: Entry into referrals or transport list. |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 123 | * @disc_addr: Address information is stored in a format defined |
| 124 | * for a discovery log page entry. |
| 125 | * @group: ConfigFS group for this element's folder. |
| 126 | * @priv: Private data for the transport. |
| 127 | */ |
| 128 | struct nvmet_port { |
| 129 | struct list_head entry; |
| 130 | struct nvmf_disc_rsp_page_entry disc_addr; |
| 131 | struct config_group group; |
| 132 | struct config_group subsys_group; |
| 133 | struct list_head subsystems; |
| 134 | struct config_group referrals_group; |
| 135 | struct list_head referrals; |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 136 | struct list_head global_entry; |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 137 | struct config_group ana_groups_group; |
| 138 | struct nvmet_ana_group ana_default_group; |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 139 | enum nvme_ana_state *ana_state; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 140 | void *priv; |
| 141 | bool enabled; |
Steve Wise | 0d5ee2b | 2018-06-20 07:15:10 -0700 | [diff] [blame] | 142 | int inline_data_size; |
James Smart | 9d09dd8 | 2019-05-14 14:58:02 -0700 | [diff] [blame] | 143 | const struct nvmet_fabrics_ops *tr_ops; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 144 | }; |
| 145 | |
| 146 | static inline struct nvmet_port *to_nvmet_port(struct config_item *item) |
| 147 | { |
| 148 | return container_of(to_config_group(item), struct nvmet_port, |
| 149 | group); |
| 150 | } |
| 151 | |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 152 | static inline struct nvmet_port *ana_groups_to_port( |
| 153 | struct config_item *item) |
| 154 | { |
| 155 | return container_of(to_config_group(item), struct nvmet_port, |
| 156 | ana_groups_group); |
| 157 | } |
| 158 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 159 | struct nvmet_ctrl { |
| 160 | struct nvmet_subsys *subsys; |
| 161 | struct nvmet_cq **cqs; |
| 162 | struct nvmet_sq **sqs; |
| 163 | |
Sagi Grimberg | c09305a | 2018-11-02 10:28:13 -0700 | [diff] [blame] | 164 | bool cmd_seen; |
| 165 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 166 | struct mutex lock; |
| 167 | u64 cap; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 168 | u32 cc; |
| 169 | u32 csts; |
| 170 | |
Omri Mann | 28dd5cf | 2017-08-30 15:22:59 +0300 | [diff] [blame] | 171 | uuid_t hostid; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 172 | u16 cntlid; |
| 173 | u32 kato; |
| 174 | |
Christoph Hellwig | 4ee4328 | 2018-06-07 15:09:50 +0200 | [diff] [blame] | 175 | struct nvmet_port *port; |
| 176 | |
Christoph Hellwig | c86b8f7 | 2018-05-30 15:04:47 +0200 | [diff] [blame] | 177 | u32 aen_enabled; |
Christoph Hellwig | 55fdd6b | 2018-05-30 15:05:09 +0200 | [diff] [blame] | 178 | unsigned long aen_masked; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 179 | struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS]; |
| 180 | unsigned int nr_async_event_cmds; |
| 181 | struct list_head async_events; |
| 182 | struct work_struct async_event_work; |
| 183 | |
| 184 | struct list_head subsys_entry; |
| 185 | struct kref ref; |
| 186 | struct delayed_work ka_work; |
| 187 | struct work_struct fatal_err_work; |
| 188 | |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 189 | const struct nvmet_fabrics_ops *ops; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 190 | |
Christoph Hellwig | c16734e | 2018-05-25 17:16:09 +0200 | [diff] [blame] | 191 | __le32 *changed_ns_list; |
| 192 | u32 nr_changed_ns; |
| 193 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 194 | char subsysnqn[NVMF_NQN_FIELD_LEN]; |
| 195 | char hostnqn[NVMF_NQN_FIELD_LEN]; |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 196 | |
Chaitanya Kulkarni | 5a3a6d6 | 2018-11-19 15:16:39 -0800 | [diff] [blame] | 197 | struct device *p2p_client; |
| 198 | struct radix_tree_root p2p_ns_map; |
Chaitanya Kulkarni | e4a9762 | 2018-12-12 15:11:39 -0800 | [diff] [blame] | 199 | |
| 200 | spinlock_t error_lock; |
| 201 | u64 err_counter; |
| 202 | struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS]; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 203 | }; |
| 204 | |
| 205 | struct nvmet_subsys { |
| 206 | enum nvme_subsys_type type; |
| 207 | |
| 208 | struct mutex lock; |
| 209 | struct kref ref; |
| 210 | |
| 211 | struct list_head namespaces; |
Christoph Hellwig | 793c7cf | 2018-05-13 19:00:13 +0200 | [diff] [blame] | 212 | unsigned int nr_namespaces; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 213 | unsigned int max_nsid; |
| 214 | |
| 215 | struct list_head ctrls; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 216 | |
| 217 | struct list_head hosts; |
| 218 | bool allow_any_host; |
| 219 | |
| 220 | u16 max_qid; |
| 221 | |
| 222 | u64 ver; |
Johannes Thumshirn | 2e7f5d2 | 2017-07-14 15:36:55 +0200 | [diff] [blame] | 223 | u64 serial; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 224 | char *subsysnqn; |
| 225 | |
| 226 | struct config_group group; |
| 227 | |
| 228 | struct config_group namespaces_group; |
| 229 | struct config_group allowed_hosts_group; |
| 230 | }; |
| 231 | |
| 232 | static inline struct nvmet_subsys *to_subsys(struct config_item *item) |
| 233 | { |
| 234 | return container_of(to_config_group(item), struct nvmet_subsys, group); |
| 235 | } |
| 236 | |
| 237 | static inline struct nvmet_subsys *namespaces_to_subsys( |
| 238 | struct config_item *item) |
| 239 | { |
| 240 | return container_of(to_config_group(item), struct nvmet_subsys, |
| 241 | namespaces_group); |
| 242 | } |
| 243 | |
| 244 | struct nvmet_host { |
| 245 | struct config_group group; |
| 246 | }; |
| 247 | |
| 248 | static inline struct nvmet_host *to_host(struct config_item *item) |
| 249 | { |
| 250 | return container_of(to_config_group(item), struct nvmet_host, group); |
| 251 | } |
| 252 | |
| 253 | static inline char *nvmet_host_name(struct nvmet_host *host) |
| 254 | { |
| 255 | return config_item_name(&host->group.cg_item); |
| 256 | } |
| 257 | |
| 258 | struct nvmet_host_link { |
| 259 | struct list_head entry; |
| 260 | struct nvmet_host *host; |
| 261 | }; |
| 262 | |
| 263 | struct nvmet_subsys_link { |
| 264 | struct list_head entry; |
| 265 | struct nvmet_subsys *subsys; |
| 266 | }; |
| 267 | |
| 268 | struct nvmet_req; |
| 269 | struct nvmet_fabrics_ops { |
| 270 | struct module *owner; |
| 271 | unsigned int type; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 272 | unsigned int msdbd; |
| 273 | bool has_keyed_sgls : 1; |
| 274 | void (*queue_response)(struct nvmet_req *req); |
| 275 | int (*add_port)(struct nvmet_port *port); |
| 276 | void (*remove_port)(struct nvmet_port *port); |
| 277 | void (*delete_ctrl)(struct nvmet_ctrl *ctrl); |
Sagi Grimberg | 4c65268 | 2018-01-24 20:27:10 +0200 | [diff] [blame] | 278 | void (*disc_traddr)(struct nvmet_req *req, |
| 279 | struct nvmet_port *port, char *traddr); |
Sagi Grimberg | 1672ddb | 2018-12-03 17:52:11 -0800 | [diff] [blame] | 280 | u16 (*install_queue)(struct nvmet_sq *nvme_sq); |
James Smart | 9d09dd8 | 2019-05-14 14:58:02 -0700 | [diff] [blame] | 281 | void (*discovery_chg)(struct nvmet_port *port); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 282 | }; |
| 283 | |
| 284 | #define NVMET_MAX_INLINE_BIOVEC 8 |
Sagi Grimberg | 73383ad | 2018-09-28 15:40:43 -0700 | [diff] [blame] | 285 | #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 286 | |
| 287 | struct nvmet_req { |
| 288 | struct nvme_command *cmd; |
Max Gurtovoy | fc6c973 | 2019-04-08 18:39:59 +0300 | [diff] [blame] | 289 | struct nvme_completion *cqe; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 290 | struct nvmet_sq *sq; |
| 291 | struct nvmet_cq *cq; |
| 292 | struct nvmet_ns *ns; |
| 293 | struct scatterlist *sg; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 294 | struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC]; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 295 | union { |
| 296 | struct { |
| 297 | struct bio inline_bio; |
| 298 | } b; |
| 299 | struct { |
| 300 | bool mpool_alloc; |
| 301 | struct kiocb iocb; |
| 302 | struct bio_vec *bvec; |
| 303 | struct work_struct work; |
| 304 | } f; |
| 305 | }; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 306 | int sg_cnt; |
Christoph Hellwig | 5e62d5c | 2017-11-09 14:29:58 +0100 | [diff] [blame] | 307 | /* data length as parsed from the SGL descriptor: */ |
| 308 | size_t transfer_len; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 309 | |
| 310 | struct nvmet_port *port; |
| 311 | |
| 312 | void (*execute)(struct nvmet_req *req); |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 313 | const struct nvmet_fabrics_ops *ops; |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 314 | |
Chaitanya Kulkarni | 5a3a6d6 | 2018-11-19 15:16:39 -0800 | [diff] [blame] | 315 | struct pci_dev *p2p_dev; |
| 316 | struct device *p2p_client; |
Chaitanya Kulkarni | e4a9762 | 2018-12-12 15:11:39 -0800 | [diff] [blame] | 317 | u16 error_loc; |
| 318 | u64 error_slba; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 319 | }; |
| 320 | |
Chaitanya Kulkarni | 55eb942e | 2018-06-20 00:01:41 -0400 | [diff] [blame] | 321 | extern struct workqueue_struct *buffered_io_wq; |
| 322 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 323 | static inline void nvmet_set_result(struct nvmet_req *req, u32 result) |
| 324 | { |
Max Gurtovoy | fc6c973 | 2019-04-08 18:39:59 +0300 | [diff] [blame] | 325 | req->cqe->result.u32 = cpu_to_le32(result); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | /* |
| 329 | * NVMe command writes actually are DMA reads for us on the target side. |
| 330 | */ |
| 331 | static inline enum dma_data_direction |
| 332 | nvmet_data_dir(struct nvmet_req *req) |
| 333 | { |
| 334 | return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
| 335 | } |
| 336 | |
| 337 | struct nvmet_async_event { |
| 338 | struct list_head entry; |
| 339 | u8 event_type; |
| 340 | u8 event_info; |
| 341 | u8 log_page; |
| 342 | }; |
| 343 | |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 344 | static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn) |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 345 | { |
Chaitanya Kulkarni | b7c8f36 | 2018-12-12 15:11:37 -0800 | [diff] [blame] | 346 | int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15; |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 347 | |
| 348 | if (!rae) |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 349 | clear_bit(bn, &req->sq->ctrl->aen_masked); |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 350 | } |
| 351 | |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 352 | static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn) |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 353 | { |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 354 | if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn))) |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 355 | return true; |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 356 | return test_and_set_bit(bn, &ctrl->aen_masked); |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 357 | } |
| 358 | |
Jay Sternberg | 9010745 | 2018-11-12 13:56:36 -0800 | [diff] [blame] | 359 | void nvmet_get_feat_kato(struct nvmet_req *req); |
| 360 | void nvmet_get_feat_async_event(struct nvmet_req *req); |
| 361 | u16 nvmet_set_feat_kato(struct nvmet_req *req); |
| 362 | u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask); |
| 363 | void nvmet_execute_async_event(struct nvmet_req *req); |
| 364 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 365 | u16 nvmet_parse_connect_cmd(struct nvmet_req *req); |
Bart Van Assche | 9d05a96 | 2019-06-28 09:53:30 -0700 | [diff] [blame] | 366 | void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 367 | u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); |
| 368 | u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 369 | u16 nvmet_parse_admin_cmd(struct nvmet_req *req); |
| 370 | u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); |
| 371 | u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 372 | |
| 373 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 374 | struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); |
Vijay Immanuel | 549f01a | 2017-05-08 16:38:35 -0700 | [diff] [blame] | 375 | void nvmet_req_uninit(struct nvmet_req *req); |
Christoph Hellwig | e9061c3 | 2019-10-23 10:35:44 -0600 | [diff] [blame^] | 376 | bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len); |
Christoph Hellwig | 5e62d5c | 2017-11-09 14:29:58 +0100 | [diff] [blame] | 377 | void nvmet_req_execute(struct nvmet_req *req); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 378 | void nvmet_req_complete(struct nvmet_req *req, u16 status); |
Logan Gunthorpe | 5b2322e | 2018-10-04 15:27:46 -0600 | [diff] [blame] | 379 | int nvmet_req_alloc_sgl(struct nvmet_req *req); |
| 380 | void nvmet_req_free_sgl(struct nvmet_req *req); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 381 | |
Jay Sternberg | f9362ac | 2018-11-12 13:56:35 -0800 | [diff] [blame] | 382 | void nvmet_execute_keep_alive(struct nvmet_req *req); |
| 383 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 384 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, |
| 385 | u16 size); |
| 386 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, |
| 387 | u16 size); |
| 388 | void nvmet_sq_destroy(struct nvmet_sq *sq); |
| 389 | int nvmet_sq_init(struct nvmet_sq *sq); |
| 390 | |
| 391 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); |
| 392 | |
| 393 | void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); |
| 394 | u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, |
| 395 | struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); |
| 396 | u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, |
| 397 | struct nvmet_req *req, struct nvmet_ctrl **ret); |
| 398 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 399 | u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 400 | |
| 401 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, |
| 402 | enum nvme_subsys_type type); |
| 403 | void nvmet_subsys_put(struct nvmet_subsys *subsys); |
Sagi Grimberg | 344770b | 2016-11-27 22:29:17 +0200 | [diff] [blame] | 404 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 405 | |
| 406 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); |
| 407 | void nvmet_put_namespace(struct nvmet_ns *ns); |
| 408 | int nvmet_ns_enable(struct nvmet_ns *ns); |
| 409 | void nvmet_ns_disable(struct nvmet_ns *ns); |
| 410 | struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid); |
| 411 | void nvmet_ns_free(struct nvmet_ns *ns); |
| 412 | |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 413 | void nvmet_send_ana_event(struct nvmet_subsys *subsys, |
| 414 | struct nvmet_port *port); |
| 415 | void nvmet_port_send_ana_event(struct nvmet_port *port); |
| 416 | |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 417 | int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); |
| 418 | void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 419 | |
Logan Gunthorpe | 3aed867 | 2019-07-31 17:35:31 -0600 | [diff] [blame] | 420 | void nvmet_port_del_ctrls(struct nvmet_port *port, |
| 421 | struct nvmet_subsys *subsys); |
| 422 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 423 | int nvmet_enable_port(struct nvmet_port *port); |
| 424 | void nvmet_disable_port(struct nvmet_port *port); |
| 425 | |
| 426 | void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 427 | void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 428 | |
| 429 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, |
| 430 | size_t len); |
| 431 | u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, |
| 432 | size_t len); |
Christoph Hellwig | c7759ff | 2018-05-22 11:10:02 +0200 | [diff] [blame] | 433 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 434 | |
| 435 | u32 nvmet_get_log_page_len(struct nvme_command *cmd); |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 436 | u64 nvmet_get_log_page_offset(struct nvme_command *cmd); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 437 | |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 438 | extern struct list_head *nvmet_ports; |
| 439 | void nvmet_port_disc_changed(struct nvmet_port *port, |
| 440 | struct nvmet_subsys *subsys); |
| 441 | void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, |
| 442 | struct nvmet_host *host); |
| 443 | void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, |
| 444 | u8 event_info, u8 log_page); |
| 445 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 446 | #define NVMET_QUEUE_SIZE 1024 |
James Smart | 3375e29 | 2017-09-11 16:14:50 -0700 | [diff] [blame] | 447 | #define NVMET_NR_QUEUES 128 |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 448 | #define NVMET_MAX_CMD NVMET_QUEUE_SIZE |
Christoph Hellwig | 793c7cf | 2018-05-13 19:00:13 +0200 | [diff] [blame] | 449 | |
| 450 | /* |
| 451 | * Nice round number that makes a list of nsids fit into a page. |
| 452 | * Should become tunable at some point in the future. |
| 453 | */ |
| 454 | #define NVMET_MAX_NAMESPACES 1024 |
| 455 | |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 456 | /* |
| 457 | * 0 is not a valid ANA group ID, so we start numbering at 1. |
| 458 | * |
| 459 | * ANA Group 1 exists without manual intervention, has namespaces assigned to it |
| 460 | * by default, and is available in an optimized state through all ports. |
| 461 | */ |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 462 | #define NVMET_MAX_ANAGRPS 128 |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 463 | #define NVMET_DEFAULT_ANA_GRPID 1 |
| 464 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 465 | #define NVMET_KAS 10 |
Jay Sternberg | f9362ac | 2018-11-12 13:56:35 -0800 | [diff] [blame] | 466 | #define NVMET_DISC_KATO_MS 120000 |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 467 | |
| 468 | int __init nvmet_init_configfs(void); |
| 469 | void __exit nvmet_exit_configfs(void); |
| 470 | |
| 471 | int __init nvmet_init_discovery(void); |
| 472 | void nvmet_exit_discovery(void); |
| 473 | |
| 474 | extern struct nvmet_subsys *nvmet_disc_subsys; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 475 | extern struct rw_semaphore nvmet_config_sem; |
| 476 | |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 477 | extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; |
| 478 | extern u64 nvmet_ana_chgcnt; |
| 479 | extern struct rw_semaphore nvmet_ana_sem; |
| 480 | |
Sagi Grimberg | 253928e | 2018-11-12 13:56:39 -0800 | [diff] [blame] | 481 | bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 482 | |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 483 | int nvmet_bdev_ns_enable(struct nvmet_ns *ns); |
| 484 | int nvmet_file_ns_enable(struct nvmet_ns *ns); |
| 485 | void nvmet_bdev_ns_disable(struct nvmet_ns *ns); |
| 486 | void nvmet_file_ns_disable(struct nvmet_ns *ns); |
Chaitanya Kulkarni | dedf0be | 2018-08-07 23:01:07 -0700 | [diff] [blame] | 487 | u16 nvmet_bdev_flush(struct nvmet_req *req); |
| 488 | u16 nvmet_file_flush(struct nvmet_req *req); |
| 489 | void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 490 | |
| 491 | static inline u32 nvmet_rw_len(struct nvmet_req *req) |
| 492 | { |
| 493 | return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << |
| 494 | req->ns->blksize_shift; |
| 495 | } |
Chaitanya Kulkarni | c6aa354 | 2018-12-12 15:11:43 -0800 | [diff] [blame] | 496 | |
Christoph Hellwig | 59ef0ea | 2019-10-23 10:35:43 -0600 | [diff] [blame] | 497 | static inline u32 nvmet_dsm_len(struct nvmet_req *req) |
| 498 | { |
| 499 | return (le32_to_cpu(req->cmd->dsm.nr) + 1) * |
| 500 | sizeof(struct nvme_dsm_range); |
| 501 | } |
| 502 | |
Chaitanya Kulkarni | c6aa354 | 2018-12-12 15:11:43 -0800 | [diff] [blame] | 503 | u16 errno_to_nvme_status(struct nvmet_req *req, int errno); |
Bart Van Assche | 9d05a96 | 2019-06-28 09:53:30 -0700 | [diff] [blame] | 504 | |
| 505 | /* Convert a 32-bit number to a 16-bit 0's based number */ |
| 506 | static inline __le16 to0based(u32 a) |
| 507 | { |
| 508 | return cpu_to_le16(max(1U, min(1U << 16, a)) - 1); |
| 509 | } |
| 510 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 511 | #endif /* _NVMET_H */ |