blob: 93e0c2aa3e71fdc1b13604c569d049f1cf681082 [file] [log] [blame]
Christoph Hellwig77141dc2019-02-18 11:36:11 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Hellwiga07b4972016-06-21 18:04:20 +02002/*
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
Christoph Hellwiga07b4972016-06-21 18:04:20 +02004 */
5
6#ifndef _NVMET_H
7#define _NVMET_H
8
9#include <linux/dma-mapping.h>
10#include <linux/types.h>
11#include <linux/device.h>
12#include <linux/kref.h>
13#include <linux/percpu-refcount.h>
14#include <linux/list.h>
15#include <linux/mutex.h>
Christoph Hellwig8e412262017-05-17 09:54:27 +020016#include <linux/uuid.h>
Christoph Hellwiga07b4972016-06-21 18:04:20 +020017#include <linux/nvme.h>
18#include <linux/configfs.h>
19#include <linux/rcupdate.h>
20#include <linux/blkdev.h>
Logan Gunthorpec6925092018-10-04 15:27:47 -060021#include <linux/radix-tree.h>
Christoph Hellwiga07b4972016-06-21 18:04:20 +020022
23#define NVMET_ASYNC_EVENTS 4
24#define NVMET_ERROR_LOG_SLOTS 128
Chaitanya Kulkarni5698b802018-12-17 18:35:29 -080025#define NVMET_NO_ERROR_LOC ((u16)-1)
Mark Ruijter013b7eb2020-01-30 10:29:32 -080026#define NVMET_DEFAULT_CTRL_MODEL "Linux"
Christoph Hellwiga07b4972016-06-21 18:04:20 +020027
Christoph Hellwigc86b8f72018-05-30 15:04:47 +020028/*
29 * Supported optional AENs:
30 */
31#define NVMET_AEN_CFG_OPTIONAL \
Christoph Hellwig62ac0d32018-06-01 08:59:25 +020032 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
Jay Sternbergf301c2b2018-11-12 13:56:37 -080033#define NVMET_DISC_AEN_CFG_OPTIONAL \
34 (NVME_AEN_CFG_DISC_CHANGE)
Christoph Hellwigc86b8f72018-05-30 15:04:47 +020035
36/*
37 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
38 */
39#define NVMET_AEN_CFG_ALL \
40 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
41 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
42 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
43
Christoph Hellwiga07b4972016-06-21 18:04:20 +020044/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
45 * The 16 bit shift is to set IATTR bit to 1, which means offending
46 * offset starts in the data section of connect()
47 */
48#define IPO_IATTR_CONNECT_DATA(x) \
49 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
50#define IPO_IATTR_CONNECT_SQE(x) \
51 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
52
53struct nvmet_ns {
54 struct list_head dev_link;
55 struct percpu_ref ref;
56 struct block_device *bdev;
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040057 struct file *file;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -070058 bool readonly;
Christoph Hellwiga07b4972016-06-21 18:04:20 +020059 u32 nsid;
60 u32 blksize_shift;
61 loff_t size;
62 u8 nguid[16];
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +020063 uuid_t uuid;
Christoph Hellwig72efd252018-07-19 07:35:20 -070064 u32 anagrpid;
Christoph Hellwiga07b4972016-06-21 18:04:20 +020065
Chaitanya Kulkarni55eb942e2018-06-20 00:01:41 -040066 bool buffered_io;
Solganik Alexandere4fcf072016-10-30 10:35:15 +020067 bool enabled;
Christoph Hellwiga07b4972016-06-21 18:04:20 +020068 struct nvmet_subsys *subsys;
69 const char *device_path;
70
71 struct config_group device_group;
72 struct config_group group;
73
74 struct completion disable_done;
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040075 mempool_t *bvec_pool;
76 struct kmem_cache *bvec_cache;
Logan Gunthorpec6925092018-10-04 15:27:47 -060077
78 int use_p2pmem;
79 struct pci_dev *p2p_dev;
Christoph Hellwiga07b4972016-06-21 18:04:20 +020080};
81
82static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
83{
84 return container_of(to_config_group(item), struct nvmet_ns, group);
85}
86
Logan Gunthorpec6925092018-10-04 15:27:47 -060087static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
88{
89 return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
90}
91
Christoph Hellwiga07b4972016-06-21 18:04:20 +020092struct nvmet_cq {
93 u16 qid;
94 u16 size;
95};
96
97struct nvmet_sq {
98 struct nvmet_ctrl *ctrl;
99 struct percpu_ref ref;
100 u16 qid;
101 u16 size;
James Smartf9cf2a62017-10-18 14:33:59 -0700102 u32 sqhd;
Sagi Grimberge6a622f2018-11-19 14:11:12 -0800103 bool sqhd_disabled;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200104 struct completion free_done;
Sagi Grimberg427242c2017-03-06 18:46:20 +0200105 struct completion confirm_done;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200106};
107
Christoph Hellwig62ac0d32018-06-01 08:59:25 +0200108struct nvmet_ana_group {
109 struct config_group group;
110 struct nvmet_port *port;
111 u32 grpid;
112};
113
114static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
115{
116 return container_of(to_config_group(item), struct nvmet_ana_group,
117 group);
118}
119
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200120/**
121 * struct nvmet_port - Common structure to keep port
122 * information for the target.
Christoph Hellwigfe4a9792018-05-26 14:11:25 +0200123 * @entry: Entry into referrals or transport list.
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200124 * @disc_addr: Address information is stored in a format defined
125 * for a discovery log page entry.
126 * @group: ConfigFS group for this element's folder.
127 * @priv: Private data for the transport.
128 */
129struct nvmet_port {
130 struct list_head entry;
131 struct nvmf_disc_rsp_page_entry disc_addr;
132 struct config_group group;
133 struct config_group subsys_group;
134 struct list_head subsystems;
135 struct config_group referrals_group;
136 struct list_head referrals;
Jay Sternbergb662a072018-11-12 13:56:40 -0800137 struct list_head global_entry;
Christoph Hellwig62ac0d32018-06-01 08:59:25 +0200138 struct config_group ana_groups_group;
139 struct nvmet_ana_group ana_default_group;
Christoph Hellwig72efd252018-07-19 07:35:20 -0700140 enum nvme_ana_state *ana_state;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200141 void *priv;
142 bool enabled;
Steve Wise0d5ee2b2018-06-20 07:15:10 -0700143 int inline_data_size;
James Smart9d09dd82019-05-14 14:58:02 -0700144 const struct nvmet_fabrics_ops *tr_ops;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200145};
146
147static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
148{
149 return container_of(to_config_group(item), struct nvmet_port,
150 group);
151}
152
Christoph Hellwig62ac0d32018-06-01 08:59:25 +0200153static inline struct nvmet_port *ana_groups_to_port(
154 struct config_item *item)
155{
156 return container_of(to_config_group(item), struct nvmet_port,
157 ana_groups_group);
158}
159
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200160struct nvmet_ctrl {
161 struct nvmet_subsys *subsys;
162 struct nvmet_cq **cqs;
163 struct nvmet_sq **sqs;
164
Sagi Grimbergc09305a2018-11-02 10:28:13 -0700165 bool cmd_seen;
166
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200167 struct mutex lock;
168 u64 cap;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200169 u32 cc;
170 u32 csts;
171
Omri Mann28dd5cf2017-08-30 15:22:59 +0300172 uuid_t hostid;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200173 u16 cntlid;
174 u32 kato;
175
Christoph Hellwig4ee43282018-06-07 15:09:50 +0200176 struct nvmet_port *port;
177
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200178 u32 aen_enabled;
Christoph Hellwig55fdd6b2018-05-30 15:05:09 +0200179 unsigned long aen_masked;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200180 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
181 unsigned int nr_async_event_cmds;
182 struct list_head async_events;
183 struct work_struct async_event_work;
184
185 struct list_head subsys_entry;
186 struct kref ref;
187 struct delayed_work ka_work;
188 struct work_struct fatal_err_work;
189
Christoph Hellwige929f062018-03-20 20:41:35 +0100190 const struct nvmet_fabrics_ops *ops;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200191
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200192 __le32 *changed_ns_list;
193 u32 nr_changed_ns;
194
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200195 char subsysnqn[NVMF_NQN_FIELD_LEN];
196 char hostnqn[NVMF_NQN_FIELD_LEN];
Logan Gunthorpec6925092018-10-04 15:27:47 -0600197
Chaitanya Kulkarni5a3a6d62018-11-19 15:16:39 -0800198 struct device *p2p_client;
199 struct radix_tree_root p2p_ns_map;
Chaitanya Kulkarnie4a97622018-12-12 15:11:39 -0800200
201 spinlock_t error_lock;
202 u64 err_counter;
203 struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200204};
205
Mark Ruijter013b7eb2020-01-30 10:29:32 -0800206struct nvmet_subsys_model {
207 struct rcu_head rcuhead;
208 char number[];
209};
210
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200211struct nvmet_subsys {
212 enum nvme_subsys_type type;
213
214 struct mutex lock;
215 struct kref ref;
216
217 struct list_head namespaces;
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200218 unsigned int nr_namespaces;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200219 unsigned int max_nsid;
Chaitanya Kulkarni94a39d62020-01-30 10:29:31 -0800220 u16 cntlid_min;
221 u16 cntlid_max;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200222
223 struct list_head ctrls;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200224
225 struct list_head hosts;
226 bool allow_any_host;
227
228 u16 max_qid;
229
230 u64 ver;
Johannes Thumshirn2e7f5d22017-07-14 15:36:55 +0200231 u64 serial;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200232 char *subsysnqn;
233
234 struct config_group group;
235
236 struct config_group namespaces_group;
237 struct config_group allowed_hosts_group;
Mark Ruijter013b7eb2020-01-30 10:29:32 -0800238
239 struct nvmet_subsys_model __rcu *model;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200240};
241
242static inline struct nvmet_subsys *to_subsys(struct config_item *item)
243{
244 return container_of(to_config_group(item), struct nvmet_subsys, group);
245}
246
247static inline struct nvmet_subsys *namespaces_to_subsys(
248 struct config_item *item)
249{
250 return container_of(to_config_group(item), struct nvmet_subsys,
251 namespaces_group);
252}
253
254struct nvmet_host {
255 struct config_group group;
256};
257
258static inline struct nvmet_host *to_host(struct config_item *item)
259{
260 return container_of(to_config_group(item), struct nvmet_host, group);
261}
262
263static inline char *nvmet_host_name(struct nvmet_host *host)
264{
265 return config_item_name(&host->group.cg_item);
266}
267
268struct nvmet_host_link {
269 struct list_head entry;
270 struct nvmet_host *host;
271};
272
273struct nvmet_subsys_link {
274 struct list_head entry;
275 struct nvmet_subsys *subsys;
276};
277
278struct nvmet_req;
279struct nvmet_fabrics_ops {
280 struct module *owner;
281 unsigned int type;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200282 unsigned int msdbd;
283 bool has_keyed_sgls : 1;
284 void (*queue_response)(struct nvmet_req *req);
285 int (*add_port)(struct nvmet_port *port);
286 void (*remove_port)(struct nvmet_port *port);
287 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
Sagi Grimberg4c652682018-01-24 20:27:10 +0200288 void (*disc_traddr)(struct nvmet_req *req,
289 struct nvmet_port *port, char *traddr);
Sagi Grimberg1672ddb2018-12-03 17:52:11 -0800290 u16 (*install_queue)(struct nvmet_sq *nvme_sq);
James Smart9d09dd82019-05-14 14:58:02 -0700291 void (*discovery_chg)(struct nvmet_port *port);
Max Gurtovoy02cb00e2020-03-08 12:55:03 +0200292 u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200293};
294
295#define NVMET_MAX_INLINE_BIOVEC 8
Sagi Grimberg73383ad2018-09-28 15:40:43 -0700296#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200297
298struct nvmet_req {
299 struct nvme_command *cmd;
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300300 struct nvme_completion *cqe;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200301 struct nvmet_sq *sq;
302 struct nvmet_cq *cq;
303 struct nvmet_ns *ns;
304 struct scatterlist *sg;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200305 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400306 union {
307 struct {
308 struct bio inline_bio;
309 } b;
310 struct {
311 bool mpool_alloc;
312 struct kiocb iocb;
313 struct bio_vec *bvec;
314 struct work_struct work;
315 } f;
316 };
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200317 int sg_cnt;
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +0100318 /* data length as parsed from the SGL descriptor: */
319 size_t transfer_len;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200320
321 struct nvmet_port *port;
322
323 void (*execute)(struct nvmet_req *req);
Christoph Hellwige929f062018-03-20 20:41:35 +0100324 const struct nvmet_fabrics_ops *ops;
Logan Gunthorpec6925092018-10-04 15:27:47 -0600325
Chaitanya Kulkarni5a3a6d62018-11-19 15:16:39 -0800326 struct pci_dev *p2p_dev;
327 struct device *p2p_client;
Chaitanya Kulkarnie4a97622018-12-12 15:11:39 -0800328 u16 error_loc;
329 u64 error_slba;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200330};
331
Chaitanya Kulkarni55eb942e2018-06-20 00:01:41 -0400332extern struct workqueue_struct *buffered_io_wq;
333
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200334static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
335{
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300336 req->cqe->result.u32 = cpu_to_le32(result);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200337}
338
339/*
340 * NVMe command writes actually are DMA reads for us on the target side.
341 */
342static inline enum dma_data_direction
343nvmet_data_dir(struct nvmet_req *req)
344{
345 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
346}
347
348struct nvmet_async_event {
349 struct list_head entry;
350 u8 event_type;
351 u8 event_info;
352 u8 log_page;
353};
354
Jay Sternberg7114dde2018-11-12 13:56:34 -0800355static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
Jay Sternberg6c8312a2018-11-12 13:56:33 -0800356{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800357 int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
Jay Sternberg6c8312a2018-11-12 13:56:33 -0800358
359 if (!rae)
Jay Sternberg7114dde2018-11-12 13:56:34 -0800360 clear_bit(bn, &req->sq->ctrl->aen_masked);
Jay Sternberg6c8312a2018-11-12 13:56:33 -0800361}
362
Jay Sternberg7114dde2018-11-12 13:56:34 -0800363static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
Jay Sternberg6c8312a2018-11-12 13:56:33 -0800364{
Jay Sternberg7114dde2018-11-12 13:56:34 -0800365 if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
Jay Sternberg6c8312a2018-11-12 13:56:33 -0800366 return true;
Jay Sternberg7114dde2018-11-12 13:56:34 -0800367 return test_and_set_bit(bn, &ctrl->aen_masked);
Jay Sternberg6c8312a2018-11-12 13:56:33 -0800368}
369
Jay Sternberg90107452018-11-12 13:56:36 -0800370void nvmet_get_feat_kato(struct nvmet_req *req);
371void nvmet_get_feat_async_event(struct nvmet_req *req);
372u16 nvmet_set_feat_kato(struct nvmet_req *req);
373u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
374void nvmet_execute_async_event(struct nvmet_req *req);
375
Parav Pandit64a0ca82017-02-27 23:21:33 -0600376u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
Bart Van Assche9d05a962019-06-28 09:53:30 -0700377void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400378u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
379u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
Parav Pandit64a0ca82017-02-27 23:21:33 -0600380u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
381u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
382u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200383
384bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
Christoph Hellwige929f062018-03-20 20:41:35 +0100385 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
Vijay Immanuel549f01a2017-05-08 16:38:35 -0700386void nvmet_req_uninit(struct nvmet_req *req);
Christoph Hellwige9061c32019-10-23 10:35:44 -0600387bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
Sagi Grimbergb716e682020-01-26 23:23:28 -0800388bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200389void nvmet_req_complete(struct nvmet_req *req, u16 status);
Logan Gunthorpe5b2322e2018-10-04 15:27:46 -0600390int nvmet_req_alloc_sgl(struct nvmet_req *req);
391void nvmet_req_free_sgl(struct nvmet_req *req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200392
Jay Sternbergf9362ac2018-11-12 13:56:35 -0800393void nvmet_execute_keep_alive(struct nvmet_req *req);
394
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200395void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
396 u16 size);
397void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
398 u16 size);
399void nvmet_sq_destroy(struct nvmet_sq *sq);
400int nvmet_sq_init(struct nvmet_sq *sq);
401
402void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
403
404void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
405u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
406 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
407u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
408 struct nvmet_req *req, struct nvmet_ctrl **ret);
409void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
Parav Pandit64a0ca82017-02-27 23:21:33 -0600410u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200411
412struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
413 enum nvme_subsys_type type);
414void nvmet_subsys_put(struct nvmet_subsys *subsys);
Sagi Grimberg344770b2016-11-27 22:29:17 +0200415void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200416
417struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
418void nvmet_put_namespace(struct nvmet_ns *ns);
419int nvmet_ns_enable(struct nvmet_ns *ns);
420void nvmet_ns_disable(struct nvmet_ns *ns);
421struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
422void nvmet_ns_free(struct nvmet_ns *ns);
423
Christoph Hellwig62ac0d32018-06-01 08:59:25 +0200424void nvmet_send_ana_event(struct nvmet_subsys *subsys,
425 struct nvmet_port *port);
426void nvmet_port_send_ana_event(struct nvmet_port *port);
427
Christoph Hellwige929f062018-03-20 20:41:35 +0100428int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
429void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200430
Logan Gunthorpe3aed8672019-07-31 17:35:31 -0600431void nvmet_port_del_ctrls(struct nvmet_port *port,
432 struct nvmet_subsys *subsys);
433
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200434int nvmet_enable_port(struct nvmet_port *port);
435void nvmet_disable_port(struct nvmet_port *port);
436
437void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
Jay Sternbergb662a072018-11-12 13:56:40 -0800438void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200439
440u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
441 size_t len);
442u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
443 size_t len);
Christoph Hellwigc7759ff2018-05-22 11:10:02 +0200444u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200445
446u32 nvmet_get_log_page_len(struct nvme_command *cmd);
Keith Buschd808b7f2019-04-09 10:03:59 -0600447u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200448
Jay Sternbergb662a072018-11-12 13:56:40 -0800449extern struct list_head *nvmet_ports;
450void nvmet_port_disc_changed(struct nvmet_port *port,
451 struct nvmet_subsys *subsys);
452void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
453 struct nvmet_host *host);
454void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
455 u8 event_info, u8 log_page);
456
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200457#define NVMET_QUEUE_SIZE 1024
James Smart3375e292017-09-11 16:14:50 -0700458#define NVMET_NR_QUEUES 128
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200459#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200460
461/*
462 * Nice round number that makes a list of nsids fit into a page.
463 * Should become tunable at some point in the future.
464 */
465#define NVMET_MAX_NAMESPACES 1024
466
Christoph Hellwig72efd252018-07-19 07:35:20 -0700467/*
468 * 0 is not a valid ANA group ID, so we start numbering at 1.
469 *
470 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
471 * by default, and is available in an optimized state through all ports.
472 */
Christoph Hellwig62ac0d32018-06-01 08:59:25 +0200473#define NVMET_MAX_ANAGRPS 128
Christoph Hellwig72efd252018-07-19 07:35:20 -0700474#define NVMET_DEFAULT_ANA_GRPID 1
475
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200476#define NVMET_KAS 10
Jay Sternbergf9362ac2018-11-12 13:56:35 -0800477#define NVMET_DISC_KATO_MS 120000
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200478
479int __init nvmet_init_configfs(void);
480void __exit nvmet_exit_configfs(void);
481
482int __init nvmet_init_discovery(void);
483void nvmet_exit_discovery(void);
484
485extern struct nvmet_subsys *nvmet_disc_subsys;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200486extern struct rw_semaphore nvmet_config_sem;
487
Christoph Hellwig72efd252018-07-19 07:35:20 -0700488extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
489extern u64 nvmet_ana_chgcnt;
490extern struct rw_semaphore nvmet_ana_sem;
491
Sagi Grimberg253928e2018-11-12 13:56:39 -0800492bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200493
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400494int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
495int nvmet_file_ns_enable(struct nvmet_ns *ns);
496void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
497void nvmet_file_ns_disable(struct nvmet_ns *ns);
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700498u16 nvmet_bdev_flush(struct nvmet_req *req);
499u16 nvmet_file_flush(struct nvmet_req *req);
500void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
Anthony Iliopoulose8cd1ff2020-04-19 16:48:50 -0700501void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
502int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
Chaitanya Kulkarni463c5fa2020-05-19 01:06:27 -0700503void nvmet_ns_revalidate(struct nvmet_ns *ns);
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400504
505static inline u32 nvmet_rw_len(struct nvmet_req *req)
506{
507 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
508 req->ns->blksize_shift;
509}
Chaitanya Kulkarnic6aa3542018-12-12 15:11:43 -0800510
Christoph Hellwig59ef0ea2019-10-23 10:35:43 -0600511static inline u32 nvmet_dsm_len(struct nvmet_req *req)
512{
513 return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
514 sizeof(struct nvme_dsm_range);
515}
516
Chaitanya Kulkarnic6aa3542018-12-12 15:11:43 -0800517u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
Bart Van Assche9d05a962019-06-28 09:53:30 -0700518
519/* Convert a 32-bit number to a 16-bit 0's based number */
520static inline __le16 to0based(u32 a)
521{
522 return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
523}
524
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200525#endif /* _NVMET_H */