Christoph Hellwig | 77141dc | 2019-02-18 11:36:11 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _NVMET_H |
| 7 | #define _NVMET_H |
| 8 | |
| 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/device.h> |
| 12 | #include <linux/kref.h> |
| 13 | #include <linux/percpu-refcount.h> |
| 14 | #include <linux/list.h> |
| 15 | #include <linux/mutex.h> |
Christoph Hellwig | 8e41226 | 2017-05-17 09:54:27 +0200 | [diff] [blame] | 16 | #include <linux/uuid.h> |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 17 | #include <linux/nvme.h> |
| 18 | #include <linux/configfs.h> |
| 19 | #include <linux/rcupdate.h> |
| 20 | #include <linux/blkdev.h> |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 21 | #include <linux/radix-tree.h> |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 22 | |
| 23 | #define NVMET_ASYNC_EVENTS 4 |
| 24 | #define NVMET_ERROR_LOG_SLOTS 128 |
Chaitanya Kulkarni | 5698b80 | 2018-12-17 18:35:29 -0800 | [diff] [blame] | 25 | #define NVMET_NO_ERROR_LOC ((u16)-1) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 26 | |
Christoph Hellwig | c86b8f7 | 2018-05-30 15:04:47 +0200 | [diff] [blame] | 27 | /* |
| 28 | * Supported optional AENs: |
| 29 | */ |
| 30 | #define NVMET_AEN_CFG_OPTIONAL \ |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 31 | (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE) |
Jay Sternberg | f301c2b | 2018-11-12 13:56:37 -0800 | [diff] [blame] | 32 | #define NVMET_DISC_AEN_CFG_OPTIONAL \ |
| 33 | (NVME_AEN_CFG_DISC_CHANGE) |
Christoph Hellwig | c86b8f7 | 2018-05-30 15:04:47 +0200 | [diff] [blame] | 34 | |
| 35 | /* |
| 36 | * Plus mandatory SMART AENs (we'll never send them, but allow enabling them): |
| 37 | */ |
| 38 | #define NVMET_AEN_CFG_ALL \ |
| 39 | (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \ |
| 40 | NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \ |
| 41 | NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL) |
| 42 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 43 | /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM |
| 44 | * The 16 bit shift is to set IATTR bit to 1, which means offending |
| 45 | * offset starts in the data section of connect() |
| 46 | */ |
| 47 | #define IPO_IATTR_CONNECT_DATA(x) \ |
| 48 | (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x)))) |
| 49 | #define IPO_IATTR_CONNECT_SQE(x) \ |
| 50 | (cpu_to_le32(offsetof(struct nvmf_connect_command, x))) |
| 51 | |
| 52 | struct nvmet_ns { |
| 53 | struct list_head dev_link; |
| 54 | struct percpu_ref ref; |
| 55 | struct block_device *bdev; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 56 | struct file *file; |
Chaitanya Kulkarni | dedf0be | 2018-08-07 23:01:07 -0700 | [diff] [blame] | 57 | bool readonly; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 58 | u32 nsid; |
| 59 | u32 blksize_shift; |
| 60 | loff_t size; |
| 61 | u8 nguid[16]; |
Johannes Thumshirn | 637dc0f | 2017-06-07 11:45:32 +0200 | [diff] [blame] | 62 | uuid_t uuid; |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 63 | u32 anagrpid; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 64 | |
Chaitanya Kulkarni | 55eb942e | 2018-06-20 00:01:41 -0400 | [diff] [blame] | 65 | bool buffered_io; |
Solganik Alexander | e4fcf07 | 2016-10-30 10:35:15 +0200 | [diff] [blame] | 66 | bool enabled; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 67 | struct nvmet_subsys *subsys; |
| 68 | const char *device_path; |
| 69 | |
| 70 | struct config_group device_group; |
| 71 | struct config_group group; |
| 72 | |
| 73 | struct completion disable_done; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 74 | mempool_t *bvec_pool; |
| 75 | struct kmem_cache *bvec_cache; |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 76 | |
| 77 | int use_p2pmem; |
| 78 | struct pci_dev *p2p_dev; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 79 | }; |
| 80 | |
| 81 | static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) |
| 82 | { |
| 83 | return container_of(to_config_group(item), struct nvmet_ns, group); |
| 84 | } |
| 85 | |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 86 | static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns) |
| 87 | { |
| 88 | return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL; |
| 89 | } |
| 90 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 91 | struct nvmet_cq { |
| 92 | u16 qid; |
| 93 | u16 size; |
| 94 | }; |
| 95 | |
| 96 | struct nvmet_sq { |
| 97 | struct nvmet_ctrl *ctrl; |
| 98 | struct percpu_ref ref; |
| 99 | u16 qid; |
| 100 | u16 size; |
James Smart | f9cf2a6 | 2017-10-18 14:33:59 -0700 | [diff] [blame] | 101 | u32 sqhd; |
Sagi Grimberg | e6a622f | 2018-11-19 14:11:12 -0800 | [diff] [blame] | 102 | bool sqhd_disabled; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 103 | struct completion free_done; |
Sagi Grimberg | 427242c | 2017-03-06 18:46:20 +0200 | [diff] [blame] | 104 | struct completion confirm_done; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 105 | }; |
| 106 | |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 107 | struct nvmet_ana_group { |
| 108 | struct config_group group; |
| 109 | struct nvmet_port *port; |
| 110 | u32 grpid; |
| 111 | }; |
| 112 | |
| 113 | static inline struct nvmet_ana_group *to_ana_group(struct config_item *item) |
| 114 | { |
| 115 | return container_of(to_config_group(item), struct nvmet_ana_group, |
| 116 | group); |
| 117 | } |
| 118 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 119 | /** |
| 120 | * struct nvmet_port - Common structure to keep port |
| 121 | * information for the target. |
Christoph Hellwig | fe4a979 | 2018-05-26 14:11:25 +0200 | [diff] [blame] | 122 | * @entry: Entry into referrals or transport list. |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 123 | * @disc_addr: Address information is stored in a format defined |
| 124 | * for a discovery log page entry. |
| 125 | * @group: ConfigFS group for this element's folder. |
| 126 | * @priv: Private data for the transport. |
| 127 | */ |
| 128 | struct nvmet_port { |
| 129 | struct list_head entry; |
| 130 | struct nvmf_disc_rsp_page_entry disc_addr; |
| 131 | struct config_group group; |
| 132 | struct config_group subsys_group; |
| 133 | struct list_head subsystems; |
| 134 | struct config_group referrals_group; |
| 135 | struct list_head referrals; |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 136 | struct list_head global_entry; |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 137 | struct config_group ana_groups_group; |
| 138 | struct nvmet_ana_group ana_default_group; |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 139 | enum nvme_ana_state *ana_state; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 140 | void *priv; |
| 141 | bool enabled; |
Steve Wise | 0d5ee2b | 2018-06-20 07:15:10 -0700 | [diff] [blame] | 142 | int inline_data_size; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 143 | }; |
| 144 | |
| 145 | static inline struct nvmet_port *to_nvmet_port(struct config_item *item) |
| 146 | { |
| 147 | return container_of(to_config_group(item), struct nvmet_port, |
| 148 | group); |
| 149 | } |
| 150 | |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 151 | static inline struct nvmet_port *ana_groups_to_port( |
| 152 | struct config_item *item) |
| 153 | { |
| 154 | return container_of(to_config_group(item), struct nvmet_port, |
| 155 | ana_groups_group); |
| 156 | } |
| 157 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 158 | struct nvmet_ctrl { |
| 159 | struct nvmet_subsys *subsys; |
| 160 | struct nvmet_cq **cqs; |
| 161 | struct nvmet_sq **sqs; |
| 162 | |
Sagi Grimberg | c09305a | 2018-11-02 10:28:13 -0700 | [diff] [blame] | 163 | bool cmd_seen; |
| 164 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 165 | struct mutex lock; |
| 166 | u64 cap; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 167 | u32 cc; |
| 168 | u32 csts; |
| 169 | |
Omri Mann | 28dd5cf | 2017-08-30 15:22:59 +0300 | [diff] [blame] | 170 | uuid_t hostid; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 171 | u16 cntlid; |
| 172 | u32 kato; |
| 173 | |
Christoph Hellwig | 4ee4328 | 2018-06-07 15:09:50 +0200 | [diff] [blame] | 174 | struct nvmet_port *port; |
| 175 | |
Christoph Hellwig | c86b8f7 | 2018-05-30 15:04:47 +0200 | [diff] [blame] | 176 | u32 aen_enabled; |
Christoph Hellwig | 55fdd6b | 2018-05-30 15:05:09 +0200 | [diff] [blame] | 177 | unsigned long aen_masked; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 178 | struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS]; |
| 179 | unsigned int nr_async_event_cmds; |
| 180 | struct list_head async_events; |
| 181 | struct work_struct async_event_work; |
| 182 | |
| 183 | struct list_head subsys_entry; |
| 184 | struct kref ref; |
| 185 | struct delayed_work ka_work; |
| 186 | struct work_struct fatal_err_work; |
| 187 | |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 188 | const struct nvmet_fabrics_ops *ops; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 189 | |
Christoph Hellwig | c16734e | 2018-05-25 17:16:09 +0200 | [diff] [blame] | 190 | __le32 *changed_ns_list; |
| 191 | u32 nr_changed_ns; |
| 192 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 193 | char subsysnqn[NVMF_NQN_FIELD_LEN]; |
| 194 | char hostnqn[NVMF_NQN_FIELD_LEN]; |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 195 | |
Chaitanya Kulkarni | 5a3a6d6 | 2018-11-19 15:16:39 -0800 | [diff] [blame] | 196 | struct device *p2p_client; |
| 197 | struct radix_tree_root p2p_ns_map; |
Chaitanya Kulkarni | e4a9762 | 2018-12-12 15:11:39 -0800 | [diff] [blame] | 198 | |
| 199 | spinlock_t error_lock; |
| 200 | u64 err_counter; |
| 201 | struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS]; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 202 | }; |
| 203 | |
| 204 | struct nvmet_subsys { |
| 205 | enum nvme_subsys_type type; |
| 206 | |
| 207 | struct mutex lock; |
| 208 | struct kref ref; |
| 209 | |
| 210 | struct list_head namespaces; |
Christoph Hellwig | 793c7cf | 2018-05-13 19:00:13 +0200 | [diff] [blame] | 211 | unsigned int nr_namespaces; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 212 | unsigned int max_nsid; |
| 213 | |
| 214 | struct list_head ctrls; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 215 | |
| 216 | struct list_head hosts; |
| 217 | bool allow_any_host; |
| 218 | |
| 219 | u16 max_qid; |
| 220 | |
| 221 | u64 ver; |
Johannes Thumshirn | 2e7f5d2 | 2017-07-14 15:36:55 +0200 | [diff] [blame] | 222 | u64 serial; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 223 | char *subsysnqn; |
| 224 | |
| 225 | struct config_group group; |
| 226 | |
| 227 | struct config_group namespaces_group; |
| 228 | struct config_group allowed_hosts_group; |
| 229 | }; |
| 230 | |
| 231 | static inline struct nvmet_subsys *to_subsys(struct config_item *item) |
| 232 | { |
| 233 | return container_of(to_config_group(item), struct nvmet_subsys, group); |
| 234 | } |
| 235 | |
| 236 | static inline struct nvmet_subsys *namespaces_to_subsys( |
| 237 | struct config_item *item) |
| 238 | { |
| 239 | return container_of(to_config_group(item), struct nvmet_subsys, |
| 240 | namespaces_group); |
| 241 | } |
| 242 | |
| 243 | struct nvmet_host { |
| 244 | struct config_group group; |
| 245 | }; |
| 246 | |
| 247 | static inline struct nvmet_host *to_host(struct config_item *item) |
| 248 | { |
| 249 | return container_of(to_config_group(item), struct nvmet_host, group); |
| 250 | } |
| 251 | |
| 252 | static inline char *nvmet_host_name(struct nvmet_host *host) |
| 253 | { |
| 254 | return config_item_name(&host->group.cg_item); |
| 255 | } |
| 256 | |
| 257 | struct nvmet_host_link { |
| 258 | struct list_head entry; |
| 259 | struct nvmet_host *host; |
| 260 | }; |
| 261 | |
| 262 | struct nvmet_subsys_link { |
| 263 | struct list_head entry; |
| 264 | struct nvmet_subsys *subsys; |
| 265 | }; |
| 266 | |
| 267 | struct nvmet_req; |
| 268 | struct nvmet_fabrics_ops { |
| 269 | struct module *owner; |
| 270 | unsigned int type; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 271 | unsigned int msdbd; |
| 272 | bool has_keyed_sgls : 1; |
| 273 | void (*queue_response)(struct nvmet_req *req); |
| 274 | int (*add_port)(struct nvmet_port *port); |
| 275 | void (*remove_port)(struct nvmet_port *port); |
| 276 | void (*delete_ctrl)(struct nvmet_ctrl *ctrl); |
Sagi Grimberg | 4c65268 | 2018-01-24 20:27:10 +0200 | [diff] [blame] | 277 | void (*disc_traddr)(struct nvmet_req *req, |
| 278 | struct nvmet_port *port, char *traddr); |
Sagi Grimberg | 1672ddb | 2018-12-03 17:52:11 -0800 | [diff] [blame] | 279 | u16 (*install_queue)(struct nvmet_sq *nvme_sq); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 280 | }; |
| 281 | |
| 282 | #define NVMET_MAX_INLINE_BIOVEC 8 |
Sagi Grimberg | 73383ad | 2018-09-28 15:40:43 -0700 | [diff] [blame] | 283 | #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 284 | |
| 285 | struct nvmet_req { |
| 286 | struct nvme_command *cmd; |
Max Gurtovoy | fc6c973 | 2019-04-08 18:39:59 +0300 | [diff] [blame^] | 287 | struct nvme_completion *cqe; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 288 | struct nvmet_sq *sq; |
| 289 | struct nvmet_cq *cq; |
| 290 | struct nvmet_ns *ns; |
| 291 | struct scatterlist *sg; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 292 | struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC]; |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 293 | union { |
| 294 | struct { |
| 295 | struct bio inline_bio; |
| 296 | } b; |
| 297 | struct { |
| 298 | bool mpool_alloc; |
| 299 | struct kiocb iocb; |
| 300 | struct bio_vec *bvec; |
| 301 | struct work_struct work; |
| 302 | } f; |
| 303 | }; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 304 | int sg_cnt; |
Christoph Hellwig | 5e62d5c | 2017-11-09 14:29:58 +0100 | [diff] [blame] | 305 | /* data length as parsed from the command: */ |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 306 | size_t data_len; |
Christoph Hellwig | 5e62d5c | 2017-11-09 14:29:58 +0100 | [diff] [blame] | 307 | /* data length as parsed from the SGL descriptor: */ |
| 308 | size_t transfer_len; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 309 | |
| 310 | struct nvmet_port *port; |
| 311 | |
| 312 | void (*execute)(struct nvmet_req *req); |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 313 | const struct nvmet_fabrics_ops *ops; |
Logan Gunthorpe | c692509 | 2018-10-04 15:27:47 -0600 | [diff] [blame] | 314 | |
Chaitanya Kulkarni | 5a3a6d6 | 2018-11-19 15:16:39 -0800 | [diff] [blame] | 315 | struct pci_dev *p2p_dev; |
| 316 | struct device *p2p_client; |
Chaitanya Kulkarni | e4a9762 | 2018-12-12 15:11:39 -0800 | [diff] [blame] | 317 | u16 error_loc; |
| 318 | u64 error_slba; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 319 | }; |
| 320 | |
Chaitanya Kulkarni | 55eb942e | 2018-06-20 00:01:41 -0400 | [diff] [blame] | 321 | extern struct workqueue_struct *buffered_io_wq; |
| 322 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 323 | static inline void nvmet_set_result(struct nvmet_req *req, u32 result) |
| 324 | { |
Max Gurtovoy | fc6c973 | 2019-04-08 18:39:59 +0300 | [diff] [blame^] | 325 | req->cqe->result.u32 = cpu_to_le32(result); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | /* |
| 329 | * NVMe command writes actually are DMA reads for us on the target side. |
| 330 | */ |
| 331 | static inline enum dma_data_direction |
| 332 | nvmet_data_dir(struct nvmet_req *req) |
| 333 | { |
| 334 | return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
| 335 | } |
| 336 | |
| 337 | struct nvmet_async_event { |
| 338 | struct list_head entry; |
| 339 | u8 event_type; |
| 340 | u8 event_info; |
| 341 | u8 log_page; |
| 342 | }; |
| 343 | |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 344 | static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn) |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 345 | { |
Chaitanya Kulkarni | b7c8f36 | 2018-12-12 15:11:37 -0800 | [diff] [blame] | 346 | int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15; |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 347 | |
| 348 | if (!rae) |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 349 | clear_bit(bn, &req->sq->ctrl->aen_masked); |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 350 | } |
| 351 | |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 352 | static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn) |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 353 | { |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 354 | if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn))) |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 355 | return true; |
Jay Sternberg | 7114dde | 2018-11-12 13:56:34 -0800 | [diff] [blame] | 356 | return test_and_set_bit(bn, &ctrl->aen_masked); |
Jay Sternberg | 6c8312a | 2018-11-12 13:56:33 -0800 | [diff] [blame] | 357 | } |
| 358 | |
Jay Sternberg | 9010745 | 2018-11-12 13:56:36 -0800 | [diff] [blame] | 359 | void nvmet_get_feat_kato(struct nvmet_req *req); |
| 360 | void nvmet_get_feat_async_event(struct nvmet_req *req); |
| 361 | u16 nvmet_set_feat_kato(struct nvmet_req *req); |
| 362 | u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask); |
| 363 | void nvmet_execute_async_event(struct nvmet_req *req); |
| 364 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 365 | u16 nvmet_parse_connect_cmd(struct nvmet_req *req); |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 366 | u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); |
| 367 | u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 368 | u16 nvmet_parse_admin_cmd(struct nvmet_req *req); |
| 369 | u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); |
| 370 | u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 371 | |
| 372 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 373 | struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); |
Vijay Immanuel | 549f01a | 2017-05-08 16:38:35 -0700 | [diff] [blame] | 374 | void nvmet_req_uninit(struct nvmet_req *req); |
Christoph Hellwig | 5e62d5c | 2017-11-09 14:29:58 +0100 | [diff] [blame] | 375 | void nvmet_req_execute(struct nvmet_req *req); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 376 | void nvmet_req_complete(struct nvmet_req *req, u16 status); |
Logan Gunthorpe | 5b2322e | 2018-10-04 15:27:46 -0600 | [diff] [blame] | 377 | int nvmet_req_alloc_sgl(struct nvmet_req *req); |
| 378 | void nvmet_req_free_sgl(struct nvmet_req *req); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 379 | |
Jay Sternberg | f9362ac | 2018-11-12 13:56:35 -0800 | [diff] [blame] | 380 | void nvmet_execute_keep_alive(struct nvmet_req *req); |
| 381 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 382 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, |
| 383 | u16 size); |
| 384 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, |
| 385 | u16 size); |
| 386 | void nvmet_sq_destroy(struct nvmet_sq *sq); |
| 387 | int nvmet_sq_init(struct nvmet_sq *sq); |
| 388 | |
| 389 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); |
| 390 | |
| 391 | void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); |
| 392 | u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, |
| 393 | struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); |
| 394 | u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, |
| 395 | struct nvmet_req *req, struct nvmet_ctrl **ret); |
| 396 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 397 | u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 398 | |
| 399 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, |
| 400 | enum nvme_subsys_type type); |
| 401 | void nvmet_subsys_put(struct nvmet_subsys *subsys); |
Sagi Grimberg | 344770b | 2016-11-27 22:29:17 +0200 | [diff] [blame] | 402 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 403 | |
| 404 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); |
| 405 | void nvmet_put_namespace(struct nvmet_ns *ns); |
| 406 | int nvmet_ns_enable(struct nvmet_ns *ns); |
| 407 | void nvmet_ns_disable(struct nvmet_ns *ns); |
| 408 | struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid); |
| 409 | void nvmet_ns_free(struct nvmet_ns *ns); |
| 410 | |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 411 | void nvmet_send_ana_event(struct nvmet_subsys *subsys, |
| 412 | struct nvmet_port *port); |
| 413 | void nvmet_port_send_ana_event(struct nvmet_port *port); |
| 414 | |
Christoph Hellwig | e929f06 | 2018-03-20 20:41:35 +0100 | [diff] [blame] | 415 | int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); |
| 416 | void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 417 | |
| 418 | int nvmet_enable_port(struct nvmet_port *port); |
| 419 | void nvmet_disable_port(struct nvmet_port *port); |
| 420 | |
| 421 | void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port); |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 422 | void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 423 | |
| 424 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, |
| 425 | size_t len); |
| 426 | u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, |
| 427 | size_t len); |
Christoph Hellwig | c7759ff | 2018-05-22 11:10:02 +0200 | [diff] [blame] | 428 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 429 | |
| 430 | u32 nvmet_get_log_page_len(struct nvme_command *cmd); |
Keith Busch | d808b7f | 2019-04-09 10:03:59 -0600 | [diff] [blame] | 431 | u64 nvmet_get_log_page_offset(struct nvme_command *cmd); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 432 | |
Jay Sternberg | b662a07 | 2018-11-12 13:56:40 -0800 | [diff] [blame] | 433 | extern struct list_head *nvmet_ports; |
| 434 | void nvmet_port_disc_changed(struct nvmet_port *port, |
| 435 | struct nvmet_subsys *subsys); |
| 436 | void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, |
| 437 | struct nvmet_host *host); |
| 438 | void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, |
| 439 | u8 event_info, u8 log_page); |
| 440 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 441 | #define NVMET_QUEUE_SIZE 1024 |
James Smart | 3375e29 | 2017-09-11 16:14:50 -0700 | [diff] [blame] | 442 | #define NVMET_NR_QUEUES 128 |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 443 | #define NVMET_MAX_CMD NVMET_QUEUE_SIZE |
Christoph Hellwig | 793c7cf | 2018-05-13 19:00:13 +0200 | [diff] [blame] | 444 | |
| 445 | /* |
| 446 | * Nice round number that makes a list of nsids fit into a page. |
| 447 | * Should become tunable at some point in the future. |
| 448 | */ |
| 449 | #define NVMET_MAX_NAMESPACES 1024 |
| 450 | |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 451 | /* |
| 452 | * 0 is not a valid ANA group ID, so we start numbering at 1. |
| 453 | * |
| 454 | * ANA Group 1 exists without manual intervention, has namespaces assigned to it |
| 455 | * by default, and is available in an optimized state through all ports. |
| 456 | */ |
Christoph Hellwig | 62ac0d3 | 2018-06-01 08:59:25 +0200 | [diff] [blame] | 457 | #define NVMET_MAX_ANAGRPS 128 |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 458 | #define NVMET_DEFAULT_ANA_GRPID 1 |
| 459 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 460 | #define NVMET_KAS 10 |
Jay Sternberg | f9362ac | 2018-11-12 13:56:35 -0800 | [diff] [blame] | 461 | #define NVMET_DISC_KATO_MS 120000 |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 462 | |
| 463 | int __init nvmet_init_configfs(void); |
| 464 | void __exit nvmet_exit_configfs(void); |
| 465 | |
| 466 | int __init nvmet_init_discovery(void); |
| 467 | void nvmet_exit_discovery(void); |
| 468 | |
| 469 | extern struct nvmet_subsys *nvmet_disc_subsys; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 470 | extern struct rw_semaphore nvmet_config_sem; |
| 471 | |
Christoph Hellwig | 72efd25 | 2018-07-19 07:35:20 -0700 | [diff] [blame] | 472 | extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; |
| 473 | extern u64 nvmet_ana_chgcnt; |
| 474 | extern struct rw_semaphore nvmet_ana_sem; |
| 475 | |
Sagi Grimberg | 253928e | 2018-11-12 13:56:39 -0800 | [diff] [blame] | 476 | bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 477 | |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 478 | int nvmet_bdev_ns_enable(struct nvmet_ns *ns); |
| 479 | int nvmet_file_ns_enable(struct nvmet_ns *ns); |
| 480 | void nvmet_bdev_ns_disable(struct nvmet_ns *ns); |
| 481 | void nvmet_file_ns_disable(struct nvmet_ns *ns); |
Chaitanya Kulkarni | dedf0be | 2018-08-07 23:01:07 -0700 | [diff] [blame] | 482 | u16 nvmet_bdev_flush(struct nvmet_req *req); |
| 483 | u16 nvmet_file_flush(struct nvmet_req *req); |
| 484 | void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); |
Chaitanya Kulkarni | d5eff33 | 2018-05-23 00:34:39 -0400 | [diff] [blame] | 485 | |
| 486 | static inline u32 nvmet_rw_len(struct nvmet_req *req) |
| 487 | { |
| 488 | return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << |
| 489 | req->ns->blksize_shift; |
| 490 | } |
Chaitanya Kulkarni | c6aa354 | 2018-12-12 15:11:43 -0800 | [diff] [blame] | 491 | |
| 492 | u16 errno_to_nvme_status(struct nvmet_req *req, int errno); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 493 | #endif /* _NVMET_H */ |