blob: 43c797e3380ba873d550fa6cff395984c5152ef1 [file] [log] [blame]
Christoph Hellwig9002c4e2019-02-18 09:31:03 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02002/*
3 * NVMe over Fabrics common host code.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02005 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/init.h>
8#include <linux/miscdevice.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/parser.h>
12#include <linux/seq_file.h>
13#include "nvme.h"
14#include "fabrics.h"
15
16static LIST_HEAD(nvmf_transports);
Roland Dreier489beb92017-08-29 10:33:44 -070017static DECLARE_RWSEM(nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020018
19static LIST_HEAD(nvmf_hosts);
20static DEFINE_MUTEX(nvmf_hosts_mutex);
21
22static struct nvmf_host *nvmf_default_host;
23
24static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
25{
26 struct nvmf_host *host;
27
28 list_for_each_entry(host, &nvmf_hosts, list) {
29 if (!strcmp(host->nqn, hostnqn))
30 return host;
31 }
32
33 return NULL;
34}
35
36static struct nvmf_host *nvmf_host_add(const char *hostnqn)
37{
38 struct nvmf_host *host;
39
40 mutex_lock(&nvmf_hosts_mutex);
41 host = __nvmf_host_find(hostnqn);
Christoph Hellwig98096d82016-08-18 11:16:35 -070042 if (host) {
43 kref_get(&host->ref);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020044 goto out_unlock;
Christoph Hellwig98096d82016-08-18 11:16:35 -070045 }
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020046
47 host = kmalloc(sizeof(*host), GFP_KERNEL);
48 if (!host)
49 goto out_unlock;
50
51 kref_init(&host->ref);
Hannes Reinecke1e5f4462018-05-25 11:04:03 +020052 strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020053
54 list_add_tail(&host->list, &nvmf_hosts);
55out_unlock:
56 mutex_unlock(&nvmf_hosts_mutex);
57 return host;
58}
59
60static struct nvmf_host *nvmf_host_default(void)
61{
62 struct nvmf_host *host;
63
64 host = kmalloc(sizeof(*host), GFP_KERNEL);
65 if (!host)
66 return NULL;
67
68 kref_init(&host->ref);
Ewan D. Milne6b018232018-01-05 12:44:06 -050069 uuid_gen(&host->id);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020070 snprintf(host->nqn, NVMF_NQN_SIZE,
Daniel Verkamp40a5fce2017-08-30 15:18:19 -070071 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020072
73 mutex_lock(&nvmf_hosts_mutex);
74 list_add_tail(&host->list, &nvmf_hosts);
75 mutex_unlock(&nvmf_hosts_mutex);
76
77 return host;
78}
79
80static void nvmf_host_destroy(struct kref *ref)
81{
82 struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
83
Ming Line76debd2016-07-01 12:13:32 -070084 mutex_lock(&nvmf_hosts_mutex);
85 list_del(&host->list);
86 mutex_unlock(&nvmf_hosts_mutex);
87
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020088 kfree(host);
89}
90
91static void nvmf_host_put(struct nvmf_host *host)
92{
93 if (host)
94 kref_put(&host->ref, nvmf_host_destroy);
95}
96
97/**
98 * nvmf_get_address() - Get address/port
99 * @ctrl: Host NVMe controller instance which we got the address
100 * @buf: OUTPUT parameter that will contain the address/port
101 * @size: buffer size
102 */
103int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
104{
James Smart0fe51ff2016-08-02 10:40:01 +0300105 int len = 0;
106
107 if (ctrl->opts->mask & NVMF_OPT_TRADDR)
Takashi Iwai8d8a50e2020-03-11 09:50:37 +0100108 len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
James Smart0fe51ff2016-08-02 10:40:01 +0300109 if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
Takashi Iwai8d8a50e2020-03-11 09:50:37 +0100110 len += scnprintf(buf + len, size - len, "%strsvcid=%s",
James Smart0fe51ff2016-08-02 10:40:01 +0300111 (len) ? "," : "", ctrl->opts->trsvcid);
James Smart478bcb92016-08-02 10:42:10 +0300112 if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
Takashi Iwai8d8a50e2020-03-11 09:50:37 +0100113 len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
James Smart478bcb92016-08-02 10:42:10 +0300114 (len) ? "," : "", ctrl->opts->host_traddr);
Martin Belanger3ede8f72021-05-20 15:09:34 -0400115 if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)
116 len += scnprintf(buf + len, size - len, "%shost_iface=%s",
117 (len) ? "," : "", ctrl->opts->host_iface);
Takashi Iwai8d8a50e2020-03-11 09:50:37 +0100118 len += scnprintf(buf + len, size - len, "\n");
James Smart0fe51ff2016-08-02 10:40:01 +0300119
120 return len;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200121}
122EXPORT_SYMBOL_GPL(nvmf_get_address);
123
124/**
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200125 * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
126 * @ctrl: Host NVMe controller instance maintaining the admin
127 * queue used to submit the property read command to
128 * the allocated NVMe controller resource on the target system.
129 * @off: Starting offset value of the targeted property
130 * register (see the fabrics section of the NVMe standard).
131 * @val: OUTPUT parameter that will contain the value of
132 * the property after a successful read.
133 *
134 * Used by the host system to retrieve a 32-bit capsule property value
135 * from an NVMe controller on the target system.
136 *
137 * ("Capsule property" is an "PCIe register concept" applied to the
138 * NVMe fabrics space.)
139 *
140 * Return:
141 * 0: successful read
142 * > 0: NVMe error status code
143 * < 0: Linux errno error code
144 */
145int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
146{
147 struct nvme_command cmd;
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800148 union nvme_result res;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200149 int ret;
150
151 memset(&cmd, 0, sizeof(cmd));
152 cmd.prop_get.opcode = nvme_fabrics_command;
153 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
154 cmd.prop_get.offset = cpu_to_le32(off);
155
Sagi Grimberge7832cb2019-08-02 19:33:59 -0700156 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
Sagi Grimberg6287b512018-12-14 11:06:07 -0800157 NVME_QID_ANY, 0, 0, false);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200158
159 if (ret >= 0)
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800160 *val = le64_to_cpu(res.u64);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200161 if (unlikely(ret != 0))
162 dev_err(ctrl->device,
163 "Property Get error: %d, offset %#x\n",
164 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
165
166 return ret;
167}
168EXPORT_SYMBOL_GPL(nvmf_reg_read32);
169
170/**
171 * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
172 * @ctrl: Host NVMe controller instance maintaining the admin
173 * queue used to submit the property read command to
174 * the allocated controller resource on the target system.
175 * @off: Starting offset value of the targeted property
176 * register (see the fabrics section of the NVMe standard).
177 * @val: OUTPUT parameter that will contain the value of
178 * the property after a successful read.
179 *
180 * Used by the host system to retrieve a 64-bit capsule property value
181 * from an NVMe controller on the target system.
182 *
183 * ("Capsule property" is an "PCIe register concept" applied to the
184 * NVMe fabrics space.)
185 *
186 * Return:
187 * 0: successful read
188 * > 0: NVMe error status code
189 * < 0: Linux errno error code
190 */
191int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
192{
Chaitanya Kulkarni2796a8e2021-06-14 19:45:51 -0700193 struct nvme_command cmd = { };
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800194 union nvme_result res;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200195 int ret;
196
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200197 cmd.prop_get.opcode = nvme_fabrics_command;
198 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
199 cmd.prop_get.attrib = 1;
200 cmd.prop_get.offset = cpu_to_le32(off);
201
Sagi Grimberge7832cb2019-08-02 19:33:59 -0700202 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
Sagi Grimberg6287b512018-12-14 11:06:07 -0800203 NVME_QID_ANY, 0, 0, false);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200204
205 if (ret >= 0)
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800206 *val = le64_to_cpu(res.u64);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200207 if (unlikely(ret != 0))
208 dev_err(ctrl->device,
209 "Property Get error: %d, offset %#x\n",
210 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
211 return ret;
212}
213EXPORT_SYMBOL_GPL(nvmf_reg_read64);
214
215/**
216 * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
217 * @ctrl: Host NVMe controller instance maintaining the admin
218 * queue used to submit the property read command to
219 * the allocated NVMe controller resource on the target system.
220 * @off: Starting offset value of the targeted property
221 * register (see the fabrics section of the NVMe standard).
222 * @val: Input parameter that contains the value to be
223 * written to the property.
224 *
225 * Used by the NVMe host system to write a 32-bit capsule property value
226 * to an NVMe controller on the target system.
227 *
228 * ("Capsule property" is an "PCIe register concept" applied to the
229 * NVMe fabrics space.)
230 *
231 * Return:
232 * 0: successful write
233 * > 0: NVMe error status code
234 * < 0: Linux errno error code
235 */
236int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
237{
Chaitanya Kulkarnic22c2722021-06-14 19:45:52 -0700238 struct nvme_command cmd = { };
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200239 int ret;
240
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200241 cmd.prop_set.opcode = nvme_fabrics_command;
242 cmd.prop_set.fctype = nvme_fabrics_type_property_set;
243 cmd.prop_set.attrib = 0;
244 cmd.prop_set.offset = cpu_to_le32(off);
245 cmd.prop_set.value = cpu_to_le64(val);
246
Sagi Grimberge7832cb2019-08-02 19:33:59 -0700247 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
Sagi Grimberg6287b512018-12-14 11:06:07 -0800248 NVME_QID_ANY, 0, 0, false);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200249 if (unlikely(ret))
250 dev_err(ctrl->device,
251 "Property Set error: %d, offset %#x\n",
252 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
253 return ret;
254}
255EXPORT_SYMBOL_GPL(nvmf_reg_write32);
256
257/**
Chaitanya Kulkarni25e1de82021-05-21 15:41:57 -0700258 * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
259 * connect() errors.
260 * @ctrl: The specific /dev/nvmeX device that had the error.
261 * @errval: Error code to be decoded in a more human-friendly
262 * printout.
263 * @offset: For use with the NVMe error code
264 * NVME_SC_CONNECT_INVALID_PARAM.
265 * @cmd: This is the SQE portion of a submission capsule.
266 * @data: This is the "Data" portion of a submission capsule.
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200267 */
268static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
269 int errval, int offset, struct nvme_command *cmd,
270 struct nvmf_connect_data *data)
271{
Chaitanya Kulkarni97ba6932021-05-21 15:42:00 -0700272 int err_sctype = errval & ~NVME_SC_DNR;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200273
274 switch (err_sctype) {
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200275 case (NVME_SC_CONNECT_INVALID_PARAM):
276 if (offset >> 16) {
277 char *inv_data = "Connect Invalid Data Parameter";
278
279 switch (offset & 0xffff) {
280 case (offsetof(struct nvmf_connect_data, cntlid)):
281 dev_err(ctrl->device,
282 "%s, cntlid: %d\n",
283 inv_data, data->cntlid);
284 break;
285 case (offsetof(struct nvmf_connect_data, hostnqn)):
286 dev_err(ctrl->device,
287 "%s, hostnqn \"%s\"\n",
288 inv_data, data->hostnqn);
289 break;
290 case (offsetof(struct nvmf_connect_data, subsysnqn)):
291 dev_err(ctrl->device,
292 "%s, subsysnqn \"%s\"\n",
293 inv_data, data->subsysnqn);
294 break;
295 default:
296 dev_err(ctrl->device,
297 "%s, starting byte offset: %d\n",
298 inv_data, offset & 0xffff);
299 break;
300 }
301 } else {
302 char *inv_sqe = "Connect Invalid SQE Parameter";
303
304 switch (offset) {
305 case (offsetof(struct nvmf_connect_command, qid)):
306 dev_err(ctrl->device,
307 "%s, qid %d\n",
308 inv_sqe, cmd->connect.qid);
309 break;
310 default:
311 dev_err(ctrl->device,
312 "%s, starting byte offset: %d\n",
313 inv_sqe, offset);
314 }
315 }
316 break;
Guan Junxiong97ddc36e2017-06-13 10:51:24 +0800317 case NVME_SC_CONNECT_INVALID_HOST:
318 dev_err(ctrl->device,
319 "Connect for subsystem %s is not allowed, hostnqn: %s\n",
320 data->subsysnqn, data->hostnqn);
321 break;
Guan Junxiong97ddc36e2017-06-13 10:51:24 +0800322 case NVME_SC_CONNECT_CTRL_BUSY:
323 dev_err(ctrl->device,
324 "Connect command failed: controller is busy or not available\n");
325 break;
Guan Junxiong97ddc36e2017-06-13 10:51:24 +0800326 case NVME_SC_CONNECT_FORMAT:
327 dev_err(ctrl->device,
328 "Connect incompatible format: %d",
329 cmd->connect.recfmt);
330 break;
Chaitanya Kulkarni63d20f52021-06-03 10:28:03 +0300331 case NVME_SC_HOST_PATH_ERROR:
332 dev_err(ctrl->device,
333 "Connect command failed: host path error\n");
334 break;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200335 default:
336 dev_err(ctrl->device,
337 "Connect command failed, error wo/DNR bit: %d\n",
338 err_sctype);
339 break;
Chaitanya Kulkarni6f860c92021-05-21 15:41:59 -0700340 }
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200341}
342
343/**
344 * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
345 * API function.
346 * @ctrl: Host nvme controller instance used to request
347 * a new NVMe controller allocation on the target
348 * system and establish an NVMe Admin connection to
349 * that controller.
350 *
351 * This function enables an NVMe host device to request a new allocation of
352 * an NVMe controller resource on a target system as well establish a
353 * fabrics-protocol connection of the NVMe Admin queue between the
354 * host system device and the allocated NVMe controller on the
355 * target system via a NVMe Fabrics "Connect" command.
356 *
357 * Return:
358 * 0: success
359 * > 0: NVMe error status code
360 * < 0: Linux errno error code
361 *
362 */
363int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
364{
Chaitanya Kulkarnibfa9d122021-06-14 19:45:53 -0700365 struct nvme_command cmd = { };
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800366 union nvme_result res;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200367 struct nvmf_connect_data *data;
368 int ret;
369
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200370 cmd.connect.opcode = nvme_fabrics_command;
371 cmd.connect.fctype = nvme_fabrics_type_connect;
372 cmd.connect.qid = 0;
Sagi Grimberg7aa1f422017-06-18 16:15:59 +0300373 cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
Jay Freyenseef994d9d2016-08-17 15:00:26 -0700374
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200375 /*
376 * Set keep-alive timeout in seconds granularity (ms * 1000)
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200377 */
Hannes Reineckea70b81b2021-04-16 13:46:20 +0200378 cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200379
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800380 if (ctrl->opts->disable_sqflow)
381 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
382
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200383 data = kzalloc(sizeof(*data), GFP_KERNEL);
384 if (!data)
385 return -ENOMEM;
386
Christoph Hellwig8e412262017-05-17 09:54:27 +0200387 uuid_copy(&data->hostid, &ctrl->opts->host->id);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200388 data->cntlid = cpu_to_le16(0xffff);
389 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
390 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
391
Sagi Grimberge7832cb2019-08-02 19:33:59 -0700392 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200393 data, sizeof(*data), 0, NVME_QID_ANY, 1,
Sagi Grimberg6287b512018-12-14 11:06:07 -0800394 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200395 if (ret) {
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800396 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200397 &cmd, data);
398 goto out_free_data;
399 }
400
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800401 ctrl->cntlid = le16_to_cpu(res.u16);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200402
403out_free_data:
404 kfree(data);
405 return ret;
406}
407EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
408
409/**
410 * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
411 * API function.
412 * @ctrl: Host nvme controller instance used to establish an
413 * NVMe I/O queue connection to the already allocated NVMe
414 * controller on the target system.
415 * @qid: NVMe I/O queue number for the new I/O connection between
416 * host and target (note qid == 0 is illegal as this is
417 * the Admin queue, per NVMe standard).
Bart Van Asschea467fc52019-02-14 14:50:53 -0800418 * @poll: Whether or not to poll for the completion of the connect cmd.
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200419 *
420 * This function issues a fabrics-protocol connection
421 * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
422 * between the host system device and the allocated NVMe controller
423 * on the target system.
424 *
425 * Return:
426 * 0: success
427 * > 0: NVMe error status code
428 * < 0: Linux errno error code
429 */
Sagi Grimberg26c68222018-12-14 11:06:08 -0800430int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200431{
432 struct nvme_command cmd;
433 struct nvmf_connect_data *data;
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800434 union nvme_result res;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200435 int ret;
436
437 memset(&cmd, 0, sizeof(cmd));
438 cmd.connect.opcode = nvme_fabrics_command;
439 cmd.connect.fctype = nvme_fabrics_type_connect;
440 cmd.connect.qid = cpu_to_le16(qid);
441 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
442
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800443 if (ctrl->opts->disable_sqflow)
444 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
445
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200446 data = kzalloc(sizeof(*data), GFP_KERNEL);
447 if (!data)
448 return -ENOMEM;
449
Christoph Hellwig8e412262017-05-17 09:54:27 +0200450 uuid_copy(&data->hostid, &ctrl->opts->host->id);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200451 data->cntlid = cpu_to_le16(ctrl->cntlid);
452 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
453 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
454
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800455 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200456 data, sizeof(*data), 0, qid, 1,
Sagi Grimberg26c68222018-12-14 11:06:08 -0800457 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200458 if (ret) {
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800459 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200460 &cmd, data);
461 }
462 kfree(data);
463 return ret;
464}
465EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
466
Sagi Grimberg42a45272017-03-18 20:52:36 +0200467bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
468{
Tal Shorer66414e82018-08-07 23:42:39 +0300469 if (ctrl->opts->max_reconnects == -1 ||
Sagi Grimbergfdf9dfa2017-05-04 13:33:15 +0300470 ctrl->nr_reconnects < ctrl->opts->max_reconnects)
Sagi Grimberg42a45272017-03-18 20:52:36 +0200471 return true;
472
473 return false;
474}
475EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
476
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200477/**
478 * nvmf_register_transport() - NVMe Fabrics Library registration function.
479 * @ops: Transport ops instance to be registered to the
480 * common fabrics library.
481 *
482 * API function that registers the type of specific transport fabric
483 * being implemented to the common NVMe fabrics library. Part of
484 * the overall init sequence of starting up a fabrics driver.
485 */
Johannes Thumshirne5a39dd2017-01-27 09:03:45 +0100486int nvmf_register_transport(struct nvmf_transport_ops *ops)
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200487{
Christoph Hellwig5a1e5952018-02-22 07:24:08 -0800488 if (!ops->create_ctrl)
Johannes Thumshirne5a39dd2017-01-27 09:03:45 +0100489 return -EINVAL;
490
Roland Dreier489beb92017-08-29 10:33:44 -0700491 down_write(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200492 list_add_tail(&ops->entry, &nvmf_transports);
Roland Dreier489beb92017-08-29 10:33:44 -0700493 up_write(&nvmf_transports_rwsem);
Johannes Thumshirne5a39dd2017-01-27 09:03:45 +0100494
495 return 0;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200496}
497EXPORT_SYMBOL_GPL(nvmf_register_transport);
498
499/**
500 * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
501 * @ops: Transport ops instance to be unregistered from the
502 * common fabrics library.
503 *
504 * Fabrics API function that unregisters the type of specific transport
505 * fabric being implemented from the common NVMe fabrics library.
506 * Part of the overall exit sequence of unloading the implemented driver.
507 */
508void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
509{
Roland Dreier489beb92017-08-29 10:33:44 -0700510 down_write(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200511 list_del(&ops->entry);
Roland Dreier489beb92017-08-29 10:33:44 -0700512 up_write(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200513}
514EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
515
516static struct nvmf_transport_ops *nvmf_lookup_transport(
517 struct nvmf_ctrl_options *opts)
518{
519 struct nvmf_transport_ops *ops;
520
Roland Dreier489beb92017-08-29 10:33:44 -0700521 lockdep_assert_held(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200522
523 list_for_each_entry(ops, &nvmf_transports, entry) {
524 if (strcmp(ops->name, opts->transport) == 0)
525 return ops;
526 }
527
528 return NULL;
529}
530
531static const match_table_t opt_tokens = {
532 { NVMF_OPT_TRANSPORT, "transport=%s" },
533 { NVMF_OPT_TRADDR, "traddr=%s" },
534 { NVMF_OPT_TRSVCID, "trsvcid=%s" },
535 { NVMF_OPT_NQN, "nqn=%s" },
536 { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
537 { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200538 { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
Sagi Grimberg42a45272017-03-18 20:52:36 +0200539 { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" },
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200540 { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200541 { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
James Smart478bcb92016-08-02 10:42:10 +0300542 { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
Martin Belanger3ede8f72021-05-20 15:09:34 -0400543 { NVMF_OPT_HOST_IFACE, "host_iface=%s" },
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200544 { NVMF_OPT_HOST_ID, "hostid=%s" },
James Smart3b338762017-10-20 16:17:06 -0700545 { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800546 { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
Sagi Grimberg3b49fa82018-12-03 17:52:12 -0800547 { NVMF_OPT_HDR_DIGEST, "hdr_digest" },
Sagi Grimberg20d44e82018-12-03 17:52:13 -0800548 { NVMF_OPT_DATA_DIGEST, "data_digest" },
Sagi Grimberg330f6b82018-12-11 23:38:56 -0800549 { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
Sagi Grimberg89d43802018-12-14 11:06:09 -0800550 { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
Israel Rukshin52b44512019-08-18 12:08:51 +0300551 { NVMF_OPT_TOS, "tos=%d" },
Victor Gladkov8c4dfea2020-11-24 18:34:59 +0000552 { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200553 { NVMF_OPT_ERR, NULL }
554};
555
556static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
557 const char *buf)
558{
559 substring_t args[MAX_OPT_ARGS];
560 char *options, *o, *p;
561 int token, ret = 0;
562 size_t nqnlen = 0;
Sagi Grimberg42a45272017-03-18 20:52:36 +0200563 int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200564 uuid_t hostid;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200565
566 /* Set defaults */
567 opts->queue_size = NVMF_DEF_QUEUE_SIZE;
568 opts->nr_io_queues = num_online_cpus();
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200569 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
Martin George32feb6d2021-02-11 23:28:26 +0530570 opts->kato = 0;
James Smart3b338762017-10-20 16:17:06 -0700571 opts->duplicate_connect = false;
Victor Gladkov8c4dfea2020-11-24 18:34:59 +0000572 opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
Sagi Grimberg3b49fa82018-12-03 17:52:12 -0800573 opts->hdr_digest = false;
Sagi Grimberg20d44e82018-12-03 17:52:13 -0800574 opts->data_digest = false;
Israel Rukshin52b44512019-08-18 12:08:51 +0300575 opts->tos = -1; /* < 0 == use transport default */
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200576
577 options = o = kstrdup(buf, GFP_KERNEL);
578 if (!options)
579 return -ENOMEM;
580
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200581 uuid_gen(&hostid);
582
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200583 while ((p = strsep(&o, ",\n")) != NULL) {
584 if (!*p)
585 continue;
586
587 token = match_token(p, opt_tokens, args);
588 opts->mask |= token;
589 switch (token) {
590 case NVMF_OPT_TRANSPORT:
591 p = match_strdup(args);
592 if (!p) {
593 ret = -ENOMEM;
594 goto out;
595 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800596 kfree(opts->transport);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200597 opts->transport = p;
598 break;
599 case NVMF_OPT_NQN:
600 p = match_strdup(args);
601 if (!p) {
602 ret = -ENOMEM;
603 goto out;
604 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800605 kfree(opts->subsysnqn);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200606 opts->subsysnqn = p;
607 nqnlen = strlen(opts->subsysnqn);
608 if (nqnlen >= NVMF_NQN_SIZE) {
609 pr_err("%s needs to be < %d bytes\n",
Bart Van Assche6eb72832016-10-18 13:10:24 -0700610 opts->subsysnqn, NVMF_NQN_SIZE);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200611 ret = -EINVAL;
612 goto out;
613 }
614 opts->discovery_nqn =
615 !(strcmp(opts->subsysnqn,
616 NVME_DISC_SUBSYS_NAME));
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200617 break;
618 case NVMF_OPT_TRADDR:
619 p = match_strdup(args);
620 if (!p) {
621 ret = -ENOMEM;
622 goto out;
623 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800624 kfree(opts->traddr);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200625 opts->traddr = p;
626 break;
627 case NVMF_OPT_TRSVCID:
628 p = match_strdup(args);
629 if (!p) {
630 ret = -ENOMEM;
631 goto out;
632 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800633 kfree(opts->trsvcid);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200634 opts->trsvcid = p;
635 break;
636 case NVMF_OPT_QUEUE_SIZE:
637 if (match_int(args, &token)) {
638 ret = -EINVAL;
639 goto out;
640 }
641 if (token < NVMF_MIN_QUEUE_SIZE ||
642 token > NVMF_MAX_QUEUE_SIZE) {
643 pr_err("Invalid queue_size %d\n", token);
644 ret = -EINVAL;
645 goto out;
646 }
647 opts->queue_size = token;
648 break;
649 case NVMF_OPT_NR_IO_QUEUES:
650 if (match_int(args, &token)) {
651 ret = -EINVAL;
652 goto out;
653 }
654 if (token <= 0) {
655 pr_err("Invalid number of IOQs %d\n", token);
656 ret = -EINVAL;
657 goto out;
658 }
Roland Dreier04758212018-03-05 11:59:53 -0800659 if (opts->discovery_nqn) {
660 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
661 break;
662 }
663
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200664 opts->nr_io_queues = min_t(unsigned int,
665 num_online_cpus(), token);
666 break;
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200667 case NVMF_OPT_KATO:
668 if (match_int(args, &token)) {
669 ret = -EINVAL;
670 goto out;
671 }
672
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200673 if (token < 0) {
674 pr_err("Invalid keep_alive_tmo %d\n", token);
675 ret = -EINVAL;
676 goto out;
Guilherme G. Piccoli8edd11c2017-09-14 13:59:28 -0300677 } else if (token == 0 && !opts->discovery_nqn) {
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200678 /* Allowed for debug */
679 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
680 }
681 opts->kato = token;
682 break;
Sagi Grimberg42a45272017-03-18 20:52:36 +0200683 case NVMF_OPT_CTRL_LOSS_TMO:
684 if (match_int(args, &token)) {
685 ret = -EINVAL;
686 goto out;
687 }
688
689 if (token < 0)
690 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
691 ctrl_loss_tmo = token;
692 break;
Victor Gladkov8c4dfea2020-11-24 18:34:59 +0000693 case NVMF_OPT_FAIL_FAST_TMO:
694 if (match_int(args, &token)) {
695 ret = -EINVAL;
696 goto out;
697 }
698
699 if (token >= 0)
700 pr_warn("I/O fail on reconnect controller after %d sec\n",
701 token);
702 opts->fast_io_fail_tmo = token;
703 break;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200704 case NVMF_OPT_HOSTNQN:
705 if (opts->host) {
706 pr_err("hostnqn already user-assigned: %s\n",
707 opts->host->nqn);
708 ret = -EADDRINUSE;
709 goto out;
710 }
711 p = match_strdup(args);
712 if (!p) {
713 ret = -ENOMEM;
714 goto out;
715 }
716 nqnlen = strlen(p);
717 if (nqnlen >= NVMF_NQN_SIZE) {
718 pr_err("%s needs to be < %d bytes\n",
719 p, NVMF_NQN_SIZE);
Bart Van Assche8eadfcb2016-10-18 13:10:46 -0700720 kfree(p);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200721 ret = -EINVAL;
722 goto out;
723 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800724 nvmf_host_put(opts->host);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200725 opts->host = nvmf_host_add(p);
Bart Van Assche8eadfcb2016-10-18 13:10:46 -0700726 kfree(p);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200727 if (!opts->host) {
728 ret = -ENOMEM;
729 goto out;
730 }
731 break;
732 case NVMF_OPT_RECONNECT_DELAY:
733 if (match_int(args, &token)) {
734 ret = -EINVAL;
735 goto out;
736 }
737 if (token <= 0) {
738 pr_err("Invalid reconnect_delay %d\n", token);
739 ret = -EINVAL;
740 goto out;
741 }
742 opts->reconnect_delay = token;
743 break;
James Smart478bcb92016-08-02 10:42:10 +0300744 case NVMF_OPT_HOST_TRADDR:
745 p = match_strdup(args);
746 if (!p) {
747 ret = -ENOMEM;
748 goto out;
749 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800750 kfree(opts->host_traddr);
James Smart478bcb92016-08-02 10:42:10 +0300751 opts->host_traddr = p;
752 break;
Martin Belanger3ede8f72021-05-20 15:09:34 -0400753 case NVMF_OPT_HOST_IFACE:
754 p = match_strdup(args);
755 if (!p) {
756 ret = -ENOMEM;
757 goto out;
758 }
759 kfree(opts->host_iface);
760 opts->host_iface = p;
761 break;
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200762 case NVMF_OPT_HOST_ID:
763 p = match_strdup(args);
764 if (!p) {
765 ret = -ENOMEM;
766 goto out;
767 }
Roland Dreierdf351ef2018-01-11 13:38:00 -0800768 ret = uuid_parse(p, &hostid);
Roland Dreierdf351ef2018-01-11 13:38:00 -0800769 if (ret) {
Guan Junxiong9b483da2017-08-03 21:40:26 +0800770 pr_err("Invalid hostid %s\n", p);
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200771 ret = -EINVAL;
Johannes Thumshirn6e494122018-01-25 09:09:25 +0100772 kfree(p);
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200773 goto out;
774 }
Johannes Thumshirn6e494122018-01-25 09:09:25 +0100775 kfree(p);
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200776 break;
James Smart3b338762017-10-20 16:17:06 -0700777 case NVMF_OPT_DUP_CONNECT:
778 opts->duplicate_connect = true;
779 break;
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800780 case NVMF_OPT_DISABLE_SQFLOW:
781 opts->disable_sqflow = true;
782 break;
Sagi Grimberg3b49fa82018-12-03 17:52:12 -0800783 case NVMF_OPT_HDR_DIGEST:
784 opts->hdr_digest = true;
785 break;
Sagi Grimberg20d44e82018-12-03 17:52:13 -0800786 case NVMF_OPT_DATA_DIGEST:
787 opts->data_digest = true;
788 break;
Sagi Grimberg330f6b82018-12-11 23:38:56 -0800789 case NVMF_OPT_NR_WRITE_QUEUES:
790 if (match_int(args, &token)) {
791 ret = -EINVAL;
792 goto out;
793 }
794 if (token <= 0) {
795 pr_err("Invalid nr_write_queues %d\n", token);
796 ret = -EINVAL;
797 goto out;
798 }
799 opts->nr_write_queues = token;
800 break;
Sagi Grimberg89d43802018-12-14 11:06:09 -0800801 case NVMF_OPT_NR_POLL_QUEUES:
802 if (match_int(args, &token)) {
803 ret = -EINVAL;
804 goto out;
805 }
806 if (token <= 0) {
807 pr_err("Invalid nr_poll_queues %d\n", token);
808 ret = -EINVAL;
809 goto out;
810 }
811 opts->nr_poll_queues = token;
812 break;
Israel Rukshin52b44512019-08-18 12:08:51 +0300813 case NVMF_OPT_TOS:
814 if (match_int(args, &token)) {
815 ret = -EINVAL;
816 goto out;
817 }
818 if (token < 0) {
819 pr_err("Invalid type of service %d\n", token);
820 ret = -EINVAL;
821 goto out;
822 }
823 if (token > 255) {
824 pr_warn("Clamping type of service to 255\n");
825 token = 255;
826 }
827 opts->tos = token;
828 break;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200829 default:
830 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
831 p);
832 ret = -EINVAL;
833 goto out;
834 }
835 }
836
Hannes Reinecke461fbc82018-05-24 16:18:15 +0200837 if (opts->discovery_nqn) {
Hannes Reinecke461fbc82018-05-24 16:18:15 +0200838 opts->nr_io_queues = 0;
Sagi Grimberg9846ac02019-01-07 23:54:23 -0800839 opts->nr_write_queues = 0;
840 opts->nr_poll_queues = 0;
Hannes Reinecke181303d2018-05-24 16:18:17 +0200841 opts->duplicate_connect = true;
Martin George32feb6d2021-02-11 23:28:26 +0530842 } else {
843 if (!opts->kato)
844 opts->kato = NVME_DEFAULT_KATO;
Hannes Reinecke461fbc82018-05-24 16:18:15 +0200845 }
Victor Gladkov8c4dfea2020-11-24 18:34:59 +0000846 if (ctrl_loss_tmo < 0) {
Sagi Grimberg42a45272017-03-18 20:52:36 +0200847 opts->max_reconnects = -1;
Victor Gladkov8c4dfea2020-11-24 18:34:59 +0000848 } else {
Sagi Grimberg42a45272017-03-18 20:52:36 +0200849 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
850 opts->reconnect_delay);
Victor Gladkov8c4dfea2020-11-24 18:34:59 +0000851 if (ctrl_loss_tmo < opts->fast_io_fail_tmo)
852 pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
853 opts->fast_io_fail_tmo, ctrl_loss_tmo);
854 }
Sagi Grimberg42a45272017-03-18 20:52:36 +0200855
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200856 if (!opts->host) {
857 kref_get(&nvmf_default_host->ref);
858 opts->host = nvmf_default_host;
859 }
860
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200861 uuid_copy(&opts->host->id, &hostid);
862
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200863out:
864 kfree(options);
865 return ret;
866}
867
868static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
869 unsigned int required_opts)
870{
871 if ((opts->mask & required_opts) != required_opts) {
872 int i;
873
874 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
875 if ((opt_tokens[i].token & required_opts) &&
876 !(opt_tokens[i].token & opts->mask)) {
877 pr_warn("missing parameter '%s'\n",
878 opt_tokens[i].pattern);
879 }
880 }
881
882 return -EINVAL;
883 }
884
885 return 0;
886}
887
Sagi Grimbergb7c7be6f62018-10-18 17:40:40 -0700888bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
889 struct nvmf_ctrl_options *opts)
890{
891 if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
892 strcmp(opts->traddr, ctrl->opts->traddr) ||
893 strcmp(opts->trsvcid, ctrl->opts->trsvcid))
894 return false;
895
896 /*
897 * Checking the local address is rough. In most cases, none is specified
898 * and the host port is selected by the stack.
899 *
900 * Assume no match if:
901 * - local address is specified and address is not the same
902 * - local address is not specified but remote is, or vice versa
903 * (admin using specific host_traddr when it matters).
904 */
905 if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
906 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
907 if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
908 return false;
909 } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
910 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
911 return false;
912 }
913
914 return true;
915}
916EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
917
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200918static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
919 unsigned int allowed_opts)
920{
921 if (opts->mask & ~allowed_opts) {
922 int i;
923
924 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
Christoph Hellwig81a0b8d2017-08-17 13:57:49 +0200925 if ((opt_tokens[i].token & opts->mask) &&
926 (opt_tokens[i].token & ~allowed_opts)) {
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200927 pr_warn("invalid parameter '%s'\n",
928 opt_tokens[i].pattern);
929 }
930 }
931
932 return -EINVAL;
933 }
934
935 return 0;
936}
937
938void nvmf_free_options(struct nvmf_ctrl_options *opts)
939{
940 nvmf_host_put(opts->host);
941 kfree(opts->transport);
942 kfree(opts->traddr);
943 kfree(opts->trsvcid);
944 kfree(opts->subsysnqn);
James Smart478bcb92016-08-02 10:42:10 +0300945 kfree(opts->host_traddr);
Martin Belanger3ede8f72021-05-20 15:09:34 -0400946 kfree(opts->host_iface);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200947 kfree(opts);
948}
949EXPORT_SYMBOL_GPL(nvmf_free_options);
950
951#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
952#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200953 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800954 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
Victor Gladkov8c4dfea2020-11-24 18:34:59 +0000955 NVMF_OPT_DISABLE_SQFLOW |\
956 NVMF_OPT_FAIL_FAST_TMO)
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200957
958static struct nvme_ctrl *
Minwoo Im94e970b2019-05-11 22:42:55 +0900959nvmf_create_ctrl(struct device *dev, const char *buf)
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200960{
961 struct nvmf_ctrl_options *opts;
962 struct nvmf_transport_ops *ops;
963 struct nvme_ctrl *ctrl;
964 int ret;
965
966 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
967 if (!opts)
968 return ERR_PTR(-ENOMEM);
969
970 ret = nvmf_parse_options(opts, buf);
971 if (ret)
972 goto out_free_opts;
973
Sagi Grimbergd1f10712017-09-24 16:15:55 +0300974
975 request_module("nvme-%s", opts->transport);
976
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200977 /*
978 * Check the generic options first as we need a valid transport for
979 * the lookup below. Then clear the generic flags so that transport
980 * drivers don't have to care about them.
981 */
982 ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
983 if (ret)
984 goto out_free_opts;
985 opts->mask &= ~NVMF_REQUIRED_OPTS;
986
Roland Dreier489beb92017-08-29 10:33:44 -0700987 down_read(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200988 ops = nvmf_lookup_transport(opts);
989 if (!ops) {
990 pr_info("no handler found for transport %s.\n",
991 opts->transport);
992 ret = -EINVAL;
993 goto out_unlock;
994 }
995
Roy Shterman0de5cd32017-12-25 14:18:30 +0200996 if (!try_module_get(ops->module)) {
997 ret = -EBUSY;
998 goto out_unlock;
999 }
Johannes Thumshirn12a0b662018-06-01 09:11:20 +02001000 up_read(&nvmf_transports_rwsem);
Roy Shterman0de5cd32017-12-25 14:18:30 +02001001
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001002 ret = nvmf_check_required_opts(opts, ops->required_opts);
1003 if (ret)
Roy Shterman0de5cd32017-12-25 14:18:30 +02001004 goto out_module_put;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001005 ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
1006 ops->allowed_opts | ops->required_opts);
1007 if (ret)
Roy Shterman0de5cd32017-12-25 14:18:30 +02001008 goto out_module_put;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001009
1010 ctrl = ops->create_ctrl(dev, opts);
1011 if (IS_ERR(ctrl)) {
1012 ret = PTR_ERR(ctrl);
Roy Shterman0de5cd32017-12-25 14:18:30 +02001013 goto out_module_put;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001014 }
1015
Roy Shterman0de5cd32017-12-25 14:18:30 +02001016 module_put(ops->module);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001017 return ctrl;
1018
Roy Shterman0de5cd32017-12-25 14:18:30 +02001019out_module_put:
1020 module_put(ops->module);
Johannes Thumshirn12a0b662018-06-01 09:11:20 +02001021 goto out_free_opts;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001022out_unlock:
Roland Dreier489beb92017-08-29 10:33:44 -07001023 up_read(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001024out_free_opts:
Bart Van Asschef3116d82016-10-18 13:11:03 -07001025 nvmf_free_options(opts);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001026 return ERR_PTR(ret);
1027}
1028
1029static struct class *nvmf_class;
1030static struct device *nvmf_device;
1031static DEFINE_MUTEX(nvmf_dev_mutex);
1032
1033static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
1034 size_t count, loff_t *pos)
1035{
1036 struct seq_file *seq_file = file->private_data;
1037 struct nvme_ctrl *ctrl;
1038 const char *buf;
1039 int ret = 0;
1040
1041 if (count > PAGE_SIZE)
1042 return -ENOMEM;
1043
1044 buf = memdup_user_nul(ubuf, count);
1045 if (IS_ERR(buf))
1046 return PTR_ERR(buf);
1047
1048 mutex_lock(&nvmf_dev_mutex);
1049 if (seq_file->private) {
1050 ret = -EINVAL;
1051 goto out_unlock;
1052 }
1053
Minwoo Im94e970b2019-05-11 22:42:55 +09001054 ctrl = nvmf_create_ctrl(nvmf_device, buf);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001055 if (IS_ERR(ctrl)) {
1056 ret = PTR_ERR(ctrl);
1057 goto out_unlock;
1058 }
1059
1060 seq_file->private = ctrl;
1061
1062out_unlock:
1063 mutex_unlock(&nvmf_dev_mutex);
1064 kfree(buf);
1065 return ret ? ret : count;
1066}
1067
1068static int nvmf_dev_show(struct seq_file *seq_file, void *private)
1069{
1070 struct nvme_ctrl *ctrl;
1071 int ret = 0;
1072
1073 mutex_lock(&nvmf_dev_mutex);
1074 ctrl = seq_file->private;
1075 if (!ctrl) {
1076 ret = -EINVAL;
1077 goto out_unlock;
1078 }
1079
1080 seq_printf(seq_file, "instance=%d,cntlid=%d\n",
1081 ctrl->instance, ctrl->cntlid);
1082
1083out_unlock:
1084 mutex_unlock(&nvmf_dev_mutex);
1085 return ret;
1086}
1087
1088static int nvmf_dev_open(struct inode *inode, struct file *file)
1089{
1090 /*
1091 * The miscdevice code initializes file->private_data, but doesn't
1092 * make use of it later.
1093 */
1094 file->private_data = NULL;
1095 return single_open(file, nvmf_dev_show, NULL);
1096}
1097
1098static int nvmf_dev_release(struct inode *inode, struct file *file)
1099{
1100 struct seq_file *seq_file = file->private_data;
1101 struct nvme_ctrl *ctrl = seq_file->private;
1102
1103 if (ctrl)
1104 nvme_put_ctrl(ctrl);
1105 return single_release(inode, file);
1106}
1107
1108static const struct file_operations nvmf_dev_fops = {
1109 .owner = THIS_MODULE,
1110 .write = nvmf_dev_write,
1111 .read = seq_read,
1112 .open = nvmf_dev_open,
1113 .release = nvmf_dev_release,
1114};
1115
1116static struct miscdevice nvmf_misc = {
1117 .minor = MISC_DYNAMIC_MINOR,
1118 .name = "nvme-fabrics",
1119 .fops = &nvmf_dev_fops,
1120};
1121
1122static int __init nvmf_init(void)
1123{
1124 int ret;
1125
1126 nvmf_default_host = nvmf_host_default();
1127 if (!nvmf_default_host)
1128 return -ENOMEM;
1129
1130 nvmf_class = class_create(THIS_MODULE, "nvme-fabrics");
1131 if (IS_ERR(nvmf_class)) {
1132 pr_err("couldn't register class nvme-fabrics\n");
1133 ret = PTR_ERR(nvmf_class);
1134 goto out_free_host;
1135 }
1136
1137 nvmf_device =
1138 device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
1139 if (IS_ERR(nvmf_device)) {
1140 pr_err("couldn't create nvme-fabris device!\n");
1141 ret = PTR_ERR(nvmf_device);
1142 goto out_destroy_class;
1143 }
1144
1145 ret = misc_register(&nvmf_misc);
1146 if (ret) {
1147 pr_err("couldn't register misc device: %d\n", ret);
1148 goto out_destroy_device;
1149 }
1150
1151 return 0;
1152
1153out_destroy_device:
1154 device_destroy(nvmf_class, MKDEV(0, 0));
1155out_destroy_class:
1156 class_destroy(nvmf_class);
1157out_free_host:
1158 nvmf_host_put(nvmf_default_host);
1159 return ret;
1160}
1161
1162static void __exit nvmf_exit(void)
1163{
1164 misc_deregister(&nvmf_misc);
1165 device_destroy(nvmf_class, MKDEV(0, 0));
1166 class_destroy(nvmf_class);
1167 nvmf_host_put(nvmf_default_host);
1168
Minwoo Ima2faf942019-04-12 00:18:31 +09001169 BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001170 BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
1171 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
1172 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
1173 BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
1174}
1175
1176MODULE_LICENSE("GPL v2");
1177
1178module_init(nvmf_init);
1179module_exit(nvmf_exit);